text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
""" Class that contains client access to the JobStateUpdate handler. """
from DIRAC.Core.Base.Client import Client, createClient
@createClient("WorkloadManagement/JobStateUpdate")
class JobStateUpdateClient(Client):
"""JobStateUpdateClient sets url for the JobStateUpdateHandler."""
def __init__(self, url=None, **kwargs):
"""
Sets URL for JobStateUpdate handler
:param self: self reference
:param url: url of the JobStateUpdateHandler
:param kwargs: forwarded to the Base Client class
"""
super(JobStateUpdateClient, self).__init__(**kwargs)
if not url:
self.serverURL = "WorkloadManagement/JobStateUpdate"
else:
self.serverURL = url
|
DIRACGrid/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Client/JobStateUpdateClient.py
|
Python
|
gpl-3.0
| 747
|
[
"DIRAC"
] |
2cdc4449cc823d5ad34939dea2c629f4f60e0297951cdcc7bc41698900d8084b
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A powerful dynamic attention wrapper object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import math
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = [
"AttentionMechanism",
"AttentionWrapper",
"AttentionWrapperState",
"LuongAttention",
"BahdanauAttention",
"hardmax",
"safe_cumprod",
"monotonic_attention",
"BahdanauMonotonicAttention",
"LuongMonotonicAttention",
]
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
class AttentionMechanism(object):
pass
def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = nest.map_structure(
lambda m: ops.convert_to_tensor(m, name="memory"), memory)
if memory_sequence_length is not None:
memory_sequence_length = ops.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length")
if check_inner_dims_defined:
def _check_dims(m):
if not m.get_shape()[2:].is_fully_defined():
raise ValueError("Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.get_shape()))
nest.map_structure(_check_dims, memory)
if memory_sequence_length is None:
seq_len_mask = None
else:
seq_len_mask = array_ops.sequence_mask(
memory_sequence_length,
maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
dtype=nest.flatten(memory)[0].dtype)
seq_len_batch_size = (
memory_sequence_length.shape[0].value
or array_ops.shape(memory_sequence_length)[0])
def _maybe_mask(m, seq_len_mask):
rank = m.get_shape().ndims
rank = rank if rank is not None else array_ops.rank(m)
extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
m_batch_size = m.shape[0].value or array_ops.shape(m)[0]
if memory_sequence_length is not None:
message = ("memory_sequence_length and memory tensor batch sizes do not "
"match.")
with ops.control_dependencies([
check_ops.assert_equal(
seq_len_batch_size, m_batch_size, message=message)]):
seq_len_mask = array_ops.reshape(
seq_len_mask,
array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0))
return m * seq_len_mask
else:
return m
return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
if memory_sequence_length is None:
return score
message = ("All values in memory_sequence_length must greater than zero.")
with ops.control_dependencies(
[check_ops.assert_positive(memory_sequence_length, message=message)]):
score_mask = array_ops.sequence_mask(
memory_sequence_length, maxlen=array_ops.shape(score)[1])
score_mask_values = score_mask_value * array_ops.ones_like(score)
return array_ops.where(score_mask, score, score_mask_values)
class _BaseAttentionMechanism(AttentionMechanism):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
"""
def __init__(self,
query_layer,
memory,
probability_fn,
memory_sequence_length=None,
memory_layer=None,
check_inner_dims_defined=True,
score_mask_value=None,
name=None):
"""Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be:
`probabilities = probability_fn(score, previous_alignments)`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's
depth must match the depth of `query_layer`.
If `memory_layer` is not provided, the shape of `memory` must match
that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
"""
if (query_layer is not None
and not isinstance(query_layer, layers_base.Layer)):
raise TypeError(
"query_layer is not a Layer: %s" % type(query_layer).__name__)
if (memory_layer is not None
and not isinstance(memory_layer, layers_base.Layer)):
raise TypeError(
"memory_layer is not a Layer: %s" % type(memory_layer).__name__)
self._query_layer = query_layer
self._memory_layer = memory_layer
self.dtype = memory_layer.dtype
if not callable(probability_fn):
raise TypeError("probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__)
if score_mask_value is None:
score_mask_value = dtypes.as_dtype(self._memory_layer.dtype).as_numpy_dtype(-np.inf)
self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda
probability_fn(
_maybe_mask_score(score, memory_sequence_length, score_mask_value),
prev))
with ops.name_scope(
name, "BaseAttentionMechanismInit", nest.flatten(memory)):
self._values = _prepare_memory(
memory, memory_sequence_length,
check_inner_dims_defined=check_inner_dims_defined)
self._keys = (
self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable
else self._values)
self._batch_size = (
self._keys.shape[0].value or array_ops.shape(self._keys)[0])
self._alignments_size = (self._keys.shape[1].value or
array_ops.shape(self._keys)[1])
@property
def memory_layer(self):
return self._memory_layer
@property
def query_layer(self):
return self._query_layer
@property
def values(self):
return self._values
@property
def keys(self):
return self._keys
@property
def batch_size(self):
return self._batch_size
@property
def alignments_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def _luong_score(query, keys, scale):
"""Implements Luong-style (multiplicative) scoring function.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, call this function with `scale=True`.
Args:
query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
scale: Whether to apply a scale to the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the keys' dimension (%s)?"
% (query, depth, keys, key_units, key_units))
dtype = query.dtype
# Reshape from [batch_size, depth] to [batch_size, 1, depth]
# for matmul.
query = array_ops.expand_dims(query, 1)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, 1, depth] and
# keys is [batch_size, max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# batched matmul on:
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_time, 1, max_time].
# we then squeeze out the center singleton dimension.
score = math_ops.matmul(query, keys, transpose_b=True)
score = array_ops.squeeze(score, [1])
if scale:
# Scalar used in weight scaling
g = variable_scope.get_variable(
"attention_g", dtype=dtype, initializer=1.)
score = g * score
return score
class LuongAttention(_BaseAttentionMechanism):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="LuongAttention"):
"""Construct the AttentionMechanism mechanism.
Args:
num_units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional) Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional) The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the memory layer of the attention mechanism.
name: Name to use when creating ops.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(LuongAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._name = name
def __call__(self, query, previous_alignments):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
previous_alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_attention", [query]):
score = _luong_score(query, self._keys, self._scale)
alignments = self._probability_fn(score, previous_alignments)
return alignments
def _bahdanau_score(processed_query, keys, normalize):
"""Implements Bahdanau-style (additive) scoring function.
This attention has two forms. The first is Bhandanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, set `normalize=True`.
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
normalize: Whether to normalize the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
dtype = processed_query.dtype
# Get the number of hidden units from the trailing dimension of keys
num_units = keys.shape[2].value or array_ops.shape(keys)[2]
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = array_ops.expand_dims(processed_query, 1)
v = variable_scope.get_variable(
"attention_v", [num_units], dtype=dtype)
if normalize:
# Scalar used in weight normalization
g = variable_scope.get_variable(
"attention_g", dtype=dtype,
initializer=math.sqrt((1. / num_units)))
# Bias added prior to the nonlinearity
b = variable_scope.get_variable(
"attention_b", [num_units], dtype=dtype,
initializer=init_ops.zeros_initializer())
# normed_v = g * v / ||v||
normed_v = g * v * math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(v)))
return math_ops.reduce_sum(
normed_v * math_ops.tanh(keys + processed_query + b), [2])
else:
return math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query), [2])
class BahdanauAttention(_BaseAttentionMechanism):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="BahdanauAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(BahdanauAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
def __call__(self, query, previous_alignments):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
previous_alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
alignments = self._probability_fn(score, previous_alignments)
return alignments
def safe_cumprod(x, *args, **kwargs):
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the argument
is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with ops.name_scope(None, "SafeCumprod", [x]):
x = ops.convert_to_tensor(x, name="x")
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return math_ops.exp(math_ops.cumsum(
math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs))
def monotonic_attention(p_choose_i, previous_attention, mode):
"""Compute monotonic attention distribution from choosing probabilities.
Monotonic attention implies that the input sequence is processed in an
explicitly left-to-right manner when generating the output sequence. In
addition, once an input sequence element is attended to at a given output
timestep, elements occurring before it cannot be attended to at subsequent
output timesteps. This function generates attention distributions according
to these assumptions. For more information, see ``Online and Linear-Time
Attention by Enforcing Monotonic Alignments''.
Args:
p_choose_i: Probability of choosing input sequence/memory element i. Should
be of shape (batch_size, input_sequence_length), and should all be in the
range [0, 1].
previous_attention: The attention distribution from the previous output
timestep. Should be of shape (batch_size, input_sequence_length). For
the first output timestep, preevious_attention[n] should be [1, 0, 0, ...,
0] for all n in [0, ... batch_size - 1].
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'.
* 'recursive' uses tf.scan to recursively compute the distribution.
This is slowest but is exact, general, and does not suffer from
numerical instabilities.
* 'parallel' uses parallelized cumulative-sum and cumulative-product
operations to compute a closed-form solution to the recurrence
relation defining the attention distribution. This makes it more
efficient than 'recursive', but it requires numerical checks which
make the distribution non-exact. This can be a problem in particular
when input_sequence_length is long and/or p_choose_i has entries very
close to 0 or 1.
* 'hard' requires that the probabilities in p_choose_i are all either 0
or 1, and subsequently uses a more efficient and exact solution.
Returns:
A tensor of shape (batch_size, input_sequence_length) representing the
attention distributions for each sequence in the batch.
Raises:
ValueError: mode is not one of 'recursive', 'parallel', 'hard'.
"""
# Force things to be tensors
p_choose_i = ops.convert_to_tensor(p_choose_i, name="p_choose_i")
previous_attention = ops.convert_to_tensor(
previous_attention, name="previous_attention")
if mode == "recursive":
# Use .shape[0].value when it's not None, or fall back on symbolic shape
batch_size = p_choose_i.shape[0].value or array_ops.shape(p_choose_i)[0]
# Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]]
shifted_1mp_choose_i = array_ops.concat(
[array_ops.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1)
# Compute attention distribution recursively as
# q[i] = (1 - p_choose_i[i])*q[i - 1] + previous_attention[i]
# attention[i] = p_choose_i[i]*q[i]
attention = p_choose_i*array_ops.transpose(functional_ops.scan(
# Need to use reshape to remind TF of the shape between loop iterations
lambda x, yz: array_ops.reshape(yz[0]*x + yz[1], (batch_size,)),
# Loop variables yz[0] and yz[1]
[array_ops.transpose(shifted_1mp_choose_i),
array_ops.transpose(previous_attention)],
# Initial value of x is just zeros
array_ops.zeros((batch_size,))))
elif mode == "parallel":
# safe_cumprod computes cumprod in logspace with numeric checks
cumprod_1mp_choose_i = safe_cumprod(1 - p_choose_i, axis=1, exclusive=True)
# Compute recurrence relation solution
attention = p_choose_i*cumprod_1mp_choose_i*math_ops.cumsum(
previous_attention /
# Clip cumprod_1mp to avoid divide-by-zero
clip_ops.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.), axis=1)
elif mode == "hard":
# Remove any probabilities before the index chosen last time step
p_choose_i *= math_ops.cumsum(previous_attention, axis=1)
# Now, use exclusive cumprod to remove probabilities after the first
# chosen index, like so:
# p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]
# cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]
# Product of above: [0, 0, 0, 1, 0, 0, 0, 0]
attention = p_choose_i*math_ops.cumprod(
1 - p_choose_i, axis=1, exclusive=True)
else:
raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.")
return attention
def _monotonic_probability_fn(score, previous_alignments, sigmoid_noise, mode,
seed=None):
"""Attention probability function for monotonic attention.
Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage
the model to make discrete attention decisions, passes them through a sigmoid
to obtain "choosing" probabilities, and then calls monotonic_attention to
obtain the attention distribution. For more information, see
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
Args:
score: Unnormalized attention scores, shape `[batch_size, alignments_size]`
previous_alignments: Previous attention distribution, shape
`[batch_size, alignments_size]`
sigmoid_noise: Standard deviation of pre-sigmoid noise. Setting this larger
than 0 will encourage the model to produce large attention scores,
effectively making the choosing probabilities discrete and the resulting
attention distribution one-hot. It should be set to 0 at test-time, and
when hard attention is not desired.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
seed: (optional) Random seed for pre-sigmoid noise.
Returns:
A `[batch_size, alignments_size]`-shape tensor corresponding to the
resulting attention distribution.
"""
# Optionally add pre-sigmoid noise to the scores
if sigmoid_noise > 0:
noise = random_ops.random_normal(array_ops.shape(score), dtype=score.dtype,
seed=seed)
score += sigmoid_noise*noise
# Compute "choosing" probabilities from the attention scores
if mode == "hard":
# When mode is hard, use a hard sigmoid
p_choose_i = math_ops.cast(score > 0, score.dtype)
else:
p_choose_i = math_ops.sigmoid(score)
# Convert from choosing probabilities to attention distribution
return monotonic_attention(p_choose_i, previous_alignments, mode)
class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
distribution,which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the monotonic attentions.
Initializes to dirac distributions, i.e. [1, 0, 0, ...memory length..., 0]
for all entries in the batch.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return array_ops.one_hot(
array_ops.zeros((batch_size,), dtype=dtypes.int32), max_time,
dtype=dtype)
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
This type of attention encorces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Since the attention scores are passed
through a sigmoid, a learnable scalar bias parameter is applied after the
score function and before the sigmoid. Otherwise, it is equivalent to
BahdanauAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="BahdanauMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode,
seed=sigmoid_noise_seed)
super(BahdanauMonotonicAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
self._score_bias_init = score_bias_init
def __call__(self, query, previous_alignments):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
previous_alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(
None, "bahdanau_monotonic_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
score_bias = variable_scope.get_variable(
"attention_score_bias", dtype=processed_query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, previous_alignments)
return alignments
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
This type of attention encorces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Otherwise, it is equivalent to
LuongAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="LuongMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode,
seed=sigmoid_noise_seed)
super(LuongMonotonicAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._score_bias_init = score_bias_init
self._name = name
def __call__(self, query, previous_alignments):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
previous_alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_monotonic_attention",
[query]):
score = _luong_score(query, self._keys, self._scale)
score_bias = variable_scope.get_variable(
"attention_score_bias", dtype=query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, previous_alignments)
return alignments
class AttentionWrapperState(
collections.namedtuple("AttentionWrapperState",
("cell_state", "attention", "time", "alignments",
"alignment_history"))):
"""`namedtuple` storing the state of a `AttentionWrapper`.
Contains:
- `cell_state`: The state of the wrapped `RNNCell` at the previous time
step.
- `attention`: The attention emitted at the previous time step.
- `time`: int32 scalar containing the current time step.
- `alignments`: A single or tuple of `Tensor`(s) containing the alignments
emitted at the previous time step for each attention mechanism.
- `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s)
containing alignment matrices from all time steps for each attention
mechanism. Call `stack()` on each to convert to a `Tensor`.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
Example:
```python
initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...)
initial_state = initial_state.clone(cell_state=encoder_state)
```
Args:
**kwargs: Any properties of the state object to replace in the returned
`AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overridden properties as provided in `kwargs`.
"""
return super(AttentionWrapperState, self)._replace(**kwargs)
def hardmax(logits, name=None):
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with ops.name_scope(name, "Hardmax", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
if logits.get_shape()[-1].value is not None:
depth = logits.get_shape()[-1].value
else:
depth = array_ops.shape(logits)[-1]
return array_ops.one_hot(
math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
def _compute_attention(attention_mechanism, cell_output, previous_alignments,
attention_layer):
"""Computes the attention and alignments for a given attention_mechanism."""
alignments = attention_mechanism(
cell_output, previous_alignments=previous_alignments)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = array_ops.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
context = math_ops.matmul(expanded_alignments, attention_mechanism.values)
context = array_ops.squeeze(context, [1])
if attention_layer is not None:
attention = attention_layer(array_ops.concat([cell_output, context], 1))
else:
attention = context
return attention, alignments
class AttentionWrapper(rnn_cell_impl.RNNCell):
"""Wraps another `RNNCell` with attention.
"""
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Args:
cell: An instance of `RNNCell`.
attention_mechanism: A list of `AttentionMechanism` instances or a single
instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If None
(default), use the context as attention at each time step. Otherwise,
feed the context and cell output into the attention layer to generate
attention at each time step. If attention_mechanism is a list,
attention_layer_size must be a list of the same length.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as a
time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: array_ops.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
the output of `cell`. This is the beahvior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
up to the next cell in an RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when
the user calls `zero_state()`. Note that if this value is provided
now, and the user uses a `batch_size` argument of `zero_state` which
does not match the batch size of `initial_cell_state`, proper
behavior is not guaranteed.
name: Name to use when creating ops.
Raises:
TypeError: `attention_layer_size` is not None and (`attention_mechanism`
is a list but `attention_layer_size` is not; or vice versa).
ValueError: if `attention_layer_size` is not None, `attention_mechanism`
is a list, and its length does not match that of `attention_layer_size`.
"""
super(AttentionWrapper, self).__init__(name=name)
if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
raise TypeError(
"cell must be an RNNCell, saw type: %s" % type(cell).__name__)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s"
% type(attention_mechanism).__name__)
else:
self._is_multi = False
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s"
% type(attention_mechanism).__name__)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: array_ops.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s"
% type(cell_input_fn).__name__)
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size
if isinstance(attention_layer_size, (list, tuple))
else (attention_layer_size,))
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d"
% (len(attention_layer_sizes), len(attention_mechanisms)))
self._attention_layers = tuple(
layers_core.Dense(
attention_layer_size, name="attention_layer", use_bias=False,
dtype=attention_mechanisms[i].dtype)
for i, attention_layer_size in enumerate(attention_layer_sizes))
self._attention_layer_size = sum(attention_layer_sizes)
else:
self._attention_layers = None
self._attention_layer_size = sum(
attention_mechanism.values.get_shape()[-1].value
for attention_mechanism in attention_mechanisms)
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with ops.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0].value
or array_ops.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)):
self._initial_cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="check_initial_cell_state"),
initial_cell_state)
def _batch_size_checks(self, batch_size, error_message):
return [check_ops.assert_equal(batch_size,
attention_mechanism.batch_size,
message=error_message)
for attention_mechanism in self._attention_mechanisms]
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were passed
to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if AttentionMechanism(s)
were passed to the constructor as a sequence or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention:
return self._attention_layer_size
else:
return self._cell.output_size
@property
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
() for _ in self._attention_mechanisms)) # sometimes a TensorArray
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with ops.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
return AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=dtypes.int32),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._item_or_tuple(
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
tensor_array_ops.TensorArray(dtype=dtype, size=0,
dynamic_size=True)
if self._alignment_history else ()
for _ in self._attention_mechanisms))
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state))
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
cell_output.shape[0].value or array_ops.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)):
cell_output = array_ops.identity(
cell_output, name="checked_cell_output")
if self._is_multi:
previous_alignments = state.alignments
previous_alignment_history = state.alignment_history
else:
previous_alignments = [state.alignments]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments = _compute_attention(
attention_mechanism, cell_output, previous_alignments[i],
self._attention_layers[i] if self._attention_layers else None)
alignment_history = previous_alignment_history[i].write(
state.time, alignments) if self._alignment_history else ()
all_alignments.append(alignments)
all_histories.append(alignment_history)
all_attentions.append(attention)
attention = array_ops.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(all_histories))
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
|
horance-liu/tensorflow
|
tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
|
Python
|
apache-2.0
| 57,798
|
[
"DIRAC"
] |
2de2a74051e2fad4328045e451e5ec2d7961795c6181893cbc57049a07e6d2de
|
import tempfile
import cgi
from .base import Cleaver
from .compat import urlencode, parse_qsl
from .backend import CleaverBackend
from .identity import CleaverIdentityProvider
class SplitMiddleware(object):
def __init__(self, app, identity, backend, environ_key='cleaver',
allow_override=False, count_humans_only=False,
human_callback_token='__cleaver_human_verification__'):
"""
Makes a Cleaver instance available every request under
``environ['cleaver']``.
:param identity any implementation of
``identity.CleaverIdentityProvider`` or
a callable that emulates
``identity.CleaverIdentityProvider.get_identity``.
:param backend any implementation of
``cleaver.backend.CleaverBackend``
:param environ_key location where the Cleaver instance will be keyed in
the WSGI environ
:param allow_override when True, specific variants can be overriden via
the request query string, e.g.,
http://mypythonapp.com?cleaver:button_size=small
Especially useful for tests and QA.
:param count_humans_only when False, every request (including those
originating from bots and web crawlers) is
treated as a unique visit (defaults to False).
:param human_callback_token when ``count_humans_only`` is True, this
token in the URL will trigger a simple
verification process for humans.
"""
self.app = app
if not isinstance(identity, CleaverIdentityProvider) and \
not callable(identity):
raise RuntimeError(
'%s must be callable or implement '
'cleaver.identity.CleaverIdentityProvider' % identity
)
if not isinstance(backend, CleaverBackend):
raise RuntimeError(
'%s must implement cleaver.backend.CleaverBackend' % backend
)
self._identity = identity
self._backend = backend
self.environ_key = environ_key
self.allow_override = allow_override
self.count_humans_only = count_humans_only
self.human_callback_token = human_callback_token
def __call__(self, environ, start_response):
cleaver = Cleaver(
environ,
self._identity,
self._backend,
count_humans_only=self.count_humans_only
)
environ[self.environ_key] = cleaver
if self.allow_override:
self._handle_variant_overrides(environ)
#
# If human verification is required and this request represents
# a valid AJAX callback (which bots aren't generally capable of), then
# mark the visitor as human.
#
if self.count_humans_only and \
environ.get('REQUEST_METHOD', '') == 'POST' and \
self.human_callback_token in environ.get('PATH_INFO', ''):
fp, length = SplitMiddleware._copy_body_to_tempfile(environ)
environ.setdefault('CONTENT_LENGTH', length)
fs = cgi.FieldStorage(
fp=fp,
environ=environ,
keep_blank_values=True
)
try:
try:
x = int(fs.getlist('x')[0])
except (IndexError, ValueError):
x = 0
try:
y = int(fs.getlist('y')[0])
except (IndexError, ValueError):
y = 0
try:
z = int(fs.getlist('z')[0])
except (IndexError, ValueError):
z = 0
# The AJAX call will include three POST arguments, X, Y, and Z
#
# Part of the "not a robot test" is validating that X + Y = Z
# (most web crawlers won't perform complicated Javascript
# execution like math and HTTP callbacks, because it's just too
# expensive at scale)
if x and y and z and x + y == z:
# Mark the visitor as a human
self._backend.mark_human(cleaver.identity)
# If the visitor has been assigned any experiment variants,
# tally their participation.
for e in self._backend.all_experiments():
variant = self._backend.get_variant(
cleaver.identity,
e.name
)
if variant:
self._backend.mark_participant(e.name, variant)
start_response(
'204 No Content',
[('Content-Type', 'text/plain')]
)
return []
except (KeyError, ValueError):
pass
start_response(
'401 Unauthorized',
[('Content-Type', 'text/plain')]
)
return []
return self.app(environ, start_response)
def _handle_variant_overrides(self, environ):
# Parse the QUERY_STRING into a dictionary, and make an editable copy
parsed = dict(parse_qsl(environ.get('QUERY_STRING', '')))
qs = parsed.copy()
# For each key that starts with cleaver: ...
for k in parsed:
if k.startswith('cleaver:'):
# Store the key -> value in ``environ['cleaver.override']``
# and remove it from the editable ``qs`` copy.
environ.setdefault('cleaver.override', {})[
k.split('cleaver:')[1]
] = qs.pop(k)
# If any overriden variables were changed, re-encode QUERY_STRING so
# that the next WSGI layer doesn't see the parsed ``cleaver:``
# arguments.
if 'cleaver.override' in environ:
environ['QUERY_STRING'] = urlencode(qs)
@classmethod
def _copy_body_to_tempfile(cls, environ):
"""
Copy wsgi.input to a tempfile so it can be reused.
"""
try:
length = int(environ.get('CONTENT_LENGTH', 0))
except ValueError:
length = 0
try:
fileobj = tempfile.SpooledTemporaryFile(1024*1024)
except AttributeError: # pragma: nocover
fileobj = tempfile.TemporaryFile() # py25 fallback
if length:
remaining = length
while remaining > 0:
data = environ['wsgi.input'].read(min(remaining, 65536))
if not data:
raise IOError(
"Client disconnected (%s more bytes were expected)"
% remaining
)
fileobj.write(data)
remaining -= len(data)
fileobj.seek(0)
environ['wsgi.input'] = fileobj
return fileobj, length
|
ryanpetrello/cleaver
|
cleaver/middleware.py
|
Python
|
bsd-3-clause
| 7,263
|
[
"VisIt"
] |
7287a028167272df0a800db226bd671af3ae78a9cc2c42fdc85a8a5eff231702
|
"""
Module for plotting and analysis of reaction/diffusion-related results
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
try:
basestring
except NameError:
basestring = str
from future import standard_library
standard_library.install_aliases()
from netpyne import __gui__
if __gui__:
import matplotlib.pyplot as plt
from matplotlib_scalebar.scalebar import ScaleBar
from .utils import exception, _showFigure, _saveFigData
# -------------------------------------------------------------------------------------------------------------------
## Plot RxD concentration
# -------------------------------------------------------------------------------------------------------------------
@exception
def plotRxDConcentration(speciesLabel, regionLabel, plane='xy', figSize=(5,10), clim=None, fontSize=10, scalebar=False, title=True, showFig=True, saveFig=True, **kwargs):
"""
Function to plot reaction-diffusion concentrations
Parameters
----------
speciesLabel : <type>
<Short description of speciesLabel>
**Default:** *required*
regionLabel : <type>
<Short description of regionLabel>
**Default:** *required*
plane : str
<Short description of plane>
**Default:** ``'xy'``
**Options:** ``<option>`` <description of option>
figSize : tuple
<Short description of figSize>
**Default:** ``(5, 10)``
**Options:** ``<option>`` <description of option>
fontSize : int
<Short description of fontSize>
**Default:** ``10``
**Options:** ``<option>`` <description of option>
scalebar : bool
<Short description of scalebar>
**Default:** ``False``
**Options:** ``<option>`` <description of option>
title : bool
<Short description of title>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
showFig : bool
<Short description of showFig>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
saveFig : bool
<Short description of saveFig>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
from .. import sim
print('Plotting RxD concentration ...')
# set font size
plt.rcParams.update({'font.size': fontSize})
species = sim.net.rxd['species'][speciesLabel]['hObj']
region = sim.net.rxd['regions'][regionLabel]['hObj']
plane2mean = {'xz': 1, 'xy': 2}
extent = []
extent.append(sim.net.rxd['regions'][regionLabel][plane[0] + 'lo'])
extent.append(sim.net.rxd['regions'][regionLabel][plane[0] + 'hi'])
extent.append(sim.net.rxd['regions'][regionLabel][plane[1] + 'lo'])
extent.append(sim.net.rxd['regions'][regionLabel][plane[1] + 'hi'])
vmin = None
vmax = None
if clim is not None:
vmin = clim[0]
vmax = clim[1]
fig = plt.figure(figsize=figSize)
plt.imshow(species[region].states3d[:].mean(plane2mean[plane]).T, interpolation='nearest', origin='upper', extent=extent, vmin=vmin, vmax=vmax)
import numpy as np
print(' min:', np.min(species[region].states3d[:].mean(plane2mean[plane]).T))
print(' max:', np.max(species[region].states3d[:].mean(plane2mean[plane]).T))
ax = plt.gca()
if scalebar:
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
sb = ScaleBar(1e-6)
sb.location = 'lower left'
ax.add_artist(sb)
cb = plt.colorbar(label='[' + species.name + '] (mM)')
plt.xlabel(plane[0] + ' location (um)')
plt.ylabel(plane[1] + ' location (um)')
if saveFig == 'movie':
from neuron import h
cb.ax.set_title('Time = ' + str(round(h.t, 1)), fontsize=fontSize)
if title:
plt.title('RxD: ' + species.name + ' concentration')
plt.tight_layout()
# show fig
if showFig: _showFigure()
# save figure
if saveFig:
if isinstance(saveFig, basestring):
if saveFig == 'movie':
filename = sim.cfg.filename + '_rxd_concentration_movie_' + str(round(h.t, 1)) + '.png'
else:
filename = saveFig
else:
filename = sim.cfg.filename + '_rxd_concentration.png'
plt.savefig(filename)
return fig, {'data': species[region].states3d[:].mean(plane2mean[plane])}
|
Neurosim-lab/netpyne
|
netpyne/analysis/rxd.py
|
Python
|
mit
| 4,498
|
[
"NEURON"
] |
38a8e1a08cf13fb27dc88efa58f6ec12854fc889ee099fa6b41531ff489f3fef
|
# -*- coding: utf-8 -*-
"""
NEST v2 implementation of the PyNN API.
:copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS.
:license: CeCILL, see LICENSE for details.
"""
import numpy
import nest
import logging
from itertools import repeat
try:
xrange
except NameError: # Python 3
xrange = range
from pyNN import common, errors
from pyNN.space import Space
from . import simulator
from pyNN.random import RandomDistribution
from .standardmodels.synapses import StaticSynapse
from .conversion import make_sli_compatible
logger = logging.getLogger("PyNN")
def listify(obj):
if isinstance(obj, numpy.ndarray):
return obj.astype(float).tolist()
elif numpy.isscalar(obj):
return float(obj) # NEST chokes on numpy's float types
else:
return obj
class Projection(common.Projection):
__doc__ = common.Projection.__doc__
_simulator = simulator
_static_synapse_class = StaticSynapse
def __init__(self, presynaptic_population, postsynaptic_population,
connector, synapse_type=None, source=None, receptor_type=None,
space=Space(), label=None):
common.Projection.__init__(self, presynaptic_population, postsynaptic_population,
connector, synapse_type, source, receptor_type,
space, label)
self.nest_synapse_model = self.synapse_type._get_nest_synapse_model()
self.nest_synapse_label = Projection._nProj
self.synapse_type._set_tau_minus(self.post.local_cells)
self._sources = []
self._connections = None
# This is used to keep track of common synapse properties (to my
# knowledge they only become apparent once connections are created
# within nest --obreitwi, 13-02-14)
self._common_synapse_properties = {}
self._common_synapse_property_names = None
# Create connections
connector.connect(self)
self._set_tsodyks_params()
def __getitem__(self, i):
"""Return the `i`th connection on the local MPI node."""
if isinstance(i, int):
if i < len(self):
return simulator.Connection(self, i)
else:
raise IndexError("%d > %d" % (i, len(self) - 1))
elif isinstance(i, slice):
if i.stop < len(self):
return [simulator.Connection(self, j) for j in range(i.start, i.stop, i.step or 1)]
else:
raise IndexError("%d > %d" % (i.stop, len(self) - 1))
def __len__(self):
"""Return the number of connections on the local MPI node."""
local_nodes = nest.GetNodes([0], local_only=True)[0]
local_connections = nest.GetConnections(target=local_nodes,
synapse_model=self.nest_synapse_model,
synapse_label=self.nest_synapse_label)
return len(local_connections)
@property
def nest_connections(self):
if self._connections is None:
self._sources = numpy.unique(self._sources)
if self._sources.size > 0:
self._connections = nest.GetConnections(self._sources.tolist(),
synapse_model=self.nest_synapse_model,
synapse_label=self.nest_synapse_label)
else:
self._connections = []
return self._connections
@property
def connections(self):
"""
Returns an iterator over local connections in this projection, as `Connection` objects.
"""
return (simulator.Connection(self, i) for i in range(len(self)))
def _set_tsodyks_params(self):
if 'tsodyks' in self.nest_synapse_model:
# there should be a better way to do this.
# In particular, if the synaptic time constant is changed after
# creating the Projection, tau_psc ought to be changed as well.
assert self.receptor_type in ('excitatory', 'inhibitory'), "only basic synapse types support Tsodyks-Markram connections"
logger.debug("setting tau_psc")
if len(self.nest_connections) > 0:
targets = nest.GetStatus(self.nest_connections, 'target')
if self.receptor_type == 'inhibitory':
param_name = self.post.local_cells[0].celltype.translations['tau_syn_I']['translated_name']
if self.receptor_type == 'excitatory':
param_name = self.post.local_cells[0].celltype.translations['tau_syn_E']['translated_name']
tau_syn = nest.GetStatus(targets, param_name)
nest.SetStatus(self.nest_connections, 'tau_psc', tau_syn)
def _connect(self, rule_params, syn_params):
"""
Create connections by calling nest.Connect on the presynaptic and postsynaptic population
with the parameters provided by params.
"""
syn_params.update({'synapse_label': self.nest_synapse_label})
nest.Connect(self.pre.all_cells.astype(int).tolist(),
self.post.all_cells.astype(int).tolist(),
rule_params, syn_params)
self._sources = [cid[0] for cid in nest.GetConnections(synapse_model=self.nest_synapse_model,
synapse_label=self.nest_synapse_label)]
def _convergent_connect(self, presynaptic_indices, postsynaptic_index,
**connection_parameters):
"""
Connect a neuron to one or more other neurons with a static connection.
`sources` -- a 1D array of pre-synaptic cell IDs
`target` -- the ID of the post-synaptic cell.
TO UPDATE
"""
#logger.debug("Connecting to index %s from %s with %s" % (postsynaptic_index, presynaptic_indices, connection_parameters))
presynaptic_cells = self.pre.all_cells[presynaptic_indices]
postsynaptic_cell = self.post[postsynaptic_index]
assert presynaptic_cells.size == presynaptic_indices.size
assert len(presynaptic_cells) > 0, presynaptic_cells
weights = connection_parameters.pop('weight')
if self.receptor_type == 'inhibitory' and self.post.conductance_based:
weights *= -1 # NEST wants negative values for inhibitory weights, even if these are conductances
if hasattr(self.post, "celltype") and hasattr(self.post.celltype, "receptor_scale"): # this is a bit of a hack
weights *= self.post.celltype.receptor_scale # needed for the Izhikevich model
delays = connection_parameters.pop('delay')
# Create connections, with weights and delays
# Setting other connection parameters is done afterwards
if postsynaptic_cell.celltype.standard_receptor_type:
try:
if not numpy.isscalar(weights):
weights = numpy.array([weights])
if not numpy.isscalar(delays):
delays = numpy.array([delays])
syn_dict = {'model': self.nest_synapse_model,
'weight': weights, 'delay': delays,
'synapse_label': self.nest_synapse_label}
nest.Connect(presynaptic_cells.astype(int).tolist(),
[int(postsynaptic_cell)],
'all_to_all',
syn_dict)
except nest.NESTError as e:
errmsg = "%s. presynaptic_cells=%s, postsynaptic_cell=%s, weights=%s, delays=%s, synapse model='%s'" % (
e, presynaptic_cells, postsynaptic_cell,
weights, delays, self.nest_synapse_model)
raise errors.ConnectionError(errmsg)
else:
receptor_type = postsynaptic_cell.celltype.get_receptor_type(self.receptor_type)
if numpy.isscalar(weights):
weights = repeat(weights)
if numpy.isscalar(delays):
delays = repeat(delays)
for pre, w, d in zip(presynaptic_cells, weights, delays):
nest.Connect([pre], [postsynaptic_cell],
'one_to_one',
{'weight': w, 'delay': d, 'receptor_type': receptor_type,
'model': self.nest_synapse_model,
'synapse_label': self.nest_synapse_label})
# Book-keeping
self._connections = None # reset the caching of the connection list, since this will have to be recalculated
self._sources.extend(presynaptic_cells)
# Clean the connection parameters
connection_parameters.pop('tau_minus', None) # TODO: set tau_minus on the post-synaptic cells
connection_parameters.pop('dendritic_delay_fraction', None)
connection_parameters.pop('w_min_always_zero_in_NEST', None)
# We need to distinguish between common synapse parameters and local ones
# We just get the parameters of the first connection (is there an easier way?)
if self._common_synapse_property_names is None:
self._identify_common_synapse_properties()
# Set connection parameters other than weight and delay
if connection_parameters:
#logger.debug(connection_parameters)
sort_indices = numpy.argsort(presynaptic_cells)
connections = nest.GetConnections(source=numpy.unique(presynaptic_cells.astype(int)).tolist(),
target=[int(postsynaptic_cell)],
synapse_model=self.nest_synapse_model,
synapse_label=self.nest_synapse_label)
for name, value in connection_parameters.items():
value = make_sli_compatible(value)
if name not in self._common_synapse_property_names:
#logger.debug("Setting %s=%s for connections %s" % (name, value, connections))
if isinstance(value, numpy.ndarray):
# the str() is to work around a bug handling unicode names in SetStatus in NEST 2.4.1 when using Python 2
nest.SetStatus(connections, str(name), value[sort_indices].tolist())
else:
nest.SetStatus(connections, str(name), value)
else:
self._set_common_synapse_property(name, value)
def _identify_common_synapse_properties(self):
"""
Use the connection between the sample indices to distinguish
between local and common synapse properties.
"""
sample_connection = nest.GetConnections(source=[int(self._sources[0])],
synapse_model=self.nest_synapse_model,
synapse_label=self.nest_synapse_label)[:1]
local_parameters = nest.GetStatus(sample_connection)[0].keys()
all_parameters = nest.GetDefaults(self.nest_synapse_model).keys()
self._common_synapse_property_names = [name for name in all_parameters
if name not in local_parameters]
def _set_attributes(self, parameter_space):
parameter_space.evaluate(mask=(slice(None), self.post._mask_local)) # only columns for connections that exist on this machine
sources = numpy.unique(self._sources).tolist()
if self._common_synapse_property_names is None:
self._identify_common_synapse_properties()
for postsynaptic_cell, connection_parameters in zip(self.post.local_cells,
parameter_space.columns()):
connections = nest.GetConnections(source=sources,
target=[postsynaptic_cell],
synapse_model=self.nest_synapse_model,
synapse_label=self.nest_synapse_label)
if connections:
source_mask = self.pre.id_to_index([x[0] for x in connections])
for name, value in connection_parameters.items():
if name == "weight" and self.receptor_type == 'inhibitory' and self.post.conductance_based:
value *= -1 # NEST uses negative values for inhibitory weights, even if these are conductances
value = make_sli_compatible(value)
if name not in self._common_synapse_property_names:
if len(source_mask) > 1:
nest.SetStatus(connections, name, value[source_mask])
elif isinstance(value, numpy.ndarray): # OneToOneConnector
nest.SetStatus(connections, name, value[source_mask])
else:
nest.SetStatus(connections, name, value)
else:
self._set_common_synapse_property(name, value)
def _set_common_synapse_property(self, name, value):
"""
Sets the common synapse property while making sure its value stays
unique (i.e. it can only be set once).
"""
if name in self._common_synapse_properties:
unequal = self._common_synapse_properties[name] != value
# handle both scalars and numpy ndarray
if isinstance(unequal, numpy.ndarray):
raise_error = unequal.any()
else:
raise_error = unequal
if raise_error:
raise ValueError("{} cannot be heterogeneous "
"within a single Projection. Warning: "
"Projection was only partially initialized."
" Please call sim.nest.reset() to reset "
"your network and start over!".format(name))
self._common_synapse_properties[name] = value
nest.SetDefaults(self.nest_synapse_model, name, value)
#def saveConnections(self, file, gather=True, compatible_output=True):
# """
# Save connections to file in a format suitable for reading in with a
# FromFileConnector.
# """
# import operator
#
# if isinstance(file, basestring):
# file = recording.files.StandardTextFile(file, mode='w')
#
# lines = nest.GetStatus(self.nest_connections, ('source', 'target', 'weight', 'delay'))
# if gather == True and simulator.state.num_processes > 1:
# all_lines = { simulator.state.mpi_rank: lines }
# all_lines = recording.gather_dict(all_lines)
# if simulator.state.mpi_rank == 0:
# lines = reduce(operator.add, all_lines.values())
# elif simulator.state.num_processes > 1:
# file.rename('%s.%d' % (file.name, simulator.state.mpi_rank))
# logger.debug("--- Projection[%s].__saveConnections__() ---" % self.label)
#
# if gather == False or simulator.state.mpi_rank == 0:
# lines = numpy.array(lines, dtype=float)
# lines[:,2] *= 0.001
# if compatible_output:
# lines[:,0] = self.pre.id_to_index(lines[:,0])
# lines[:,1] = self.post.id_to_index(lines[:,1])
# file.write(lines, {'pre' : self.pre.label, 'post' : self.post.label})
# file.close()
def _get_attributes_as_list(self, names):
nest_names = []
for name in names:
if name == 'presynaptic_index':
nest_names.append('source')
elif name == 'postsynaptic_index':
nest_names.append('target')
else:
nest_names.append(name)
values = nest.GetStatus(self.nest_connections, nest_names)
if 'weight' in names: # other attributes could also have scale factors - need to use translation mechanisms
values = numpy.array(values) # ought to preserve int type for source, target
scale_factors = numpy.ones(len(names))
scale_factors[names.index('weight')] = 0.001
if self.receptor_type == 'inhibitory' and self.post.conductance_based:
scale_factors[names.index('weight')] *= -1 # NEST uses negative values for inhibitory weights, even if these are conductances
values *= scale_factors
values = values.tolist()
if 'presynaptic_index' in names:
values = numpy.array(values)
values[:, names.index('presynaptic_index')] -= self.pre.first_id
values = values.tolist()
if 'postsynaptic_index' in names:
values = numpy.array(values)
values[:, names.index('postsynaptic_index')] -= self.post.first_id
values = values.tolist()
for i in xrange(len(values)):
values[i] = tuple(values[i])
return values
def _get_attributes_as_arrays(self, names, multiple_synapses='sum'):
multi_synapse_operation = Projection.MULTI_SYNAPSE_OPERATIONS[multiple_synapses]
all_values = []
for attribute_name in names:
if attribute_name[-1] == "s": # weights --> weight, delays --> delay
attribute_name = attribute_name[:-1]
value_arr = numpy.nan * numpy.ones((self.pre.size, self.post.size))
connection_attributes = nest.GetStatus(self.nest_connections,
('source', 'target', attribute_name))
for conn in connection_attributes:
# (offset is always 0,0 for connections created with connect())
src, tgt, value = conn
addr = self.pre.id_to_index(src), self.post.id_to_index(tgt)
if numpy.isnan(value_arr[addr]):
value_arr[addr] = value
else:
value_arr[addr] = multi_synapse_operation(value_arr[addr], value)
if attribute_name == 'weight':
value_arr *= 0.001
if self.receptor_type == 'inhibitory' and self.post.conductance_based:
value_arr *= -1 # NEST uses negative values for inhibitory weights, even if these are conductances
all_values.append(value_arr)
return all_values
def _set_initial_value_array(self, variable, value):
local_value = value.evaluate(simplify=True)
nest.SetStatus(self.nest_connections, variable, local_value)
|
anupkdas-nus/global_synapses
|
pyNN-dispackgaes/nest/projections.py
|
Python
|
gpl-3.0
| 18,779
|
[
"NEURON"
] |
773e6aaad191475dd82b85a3ef9e9ba05f609aa0f725e4ea2d60fd08b599c26a
|
import vtk
import numpy
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
import sys
sys.dont_write_bytecode = True
def assignElements2Dlin(mesh, elements):
numRows, numCols = elements.shape
# TODO can we do this faster?
for i in range(numRows):
v0, v1, v2, v3, v4, v5 = elements[i, :]
tri1 = vtk.vtkTriangle()
tri1.GetPointIds().SetId(0, v0)
tri1.GetPointIds().SetId(1, v3)
tri1.GetPointIds().SetId(2, v4)
tri2 = vtk.vtkTriangle()
tri2.GetPointIds().SetId(0, v3)
tri2.GetPointIds().SetId(1, v1)
tri2.GetPointIds().SetId(2, v5)
tri3 = vtk.vtkTriangle()
tri3.GetPointIds().SetId(0, v3)
tri3.GetPointIds().SetId(1, v5)
tri3.GetPointIds().SetId(2, v4)
tri4 = vtk.vtkTriangle()
tri4.GetPointIds().SetId(0, v4)
tri4.GetPointIds().SetId(1, v5)
tri4.GetPointIds().SetId(2, v2)
mesh.InsertNextCell(tri1.GetCellType(), tri1.GetPointIds())
mesh.InsertNextCell(tri1.GetCellType(), tri2.GetPointIds())
mesh.InsertNextCell(tri1.GetCellType(), tri3.GetPointIds())
mesh.InsertNextCell(tri1.GetCellType(), tri4.GetPointIds())
return mesh
def assignElements3Dlin(mesh, elements):
numRows, numCols = elements.shape
tet = vtk.vtkTetra()
# TODO can we do this faster?
for i in range(numRows):
v0, v1, v2, v3, v4, v5, v6, v7, v8, v9 = elements[i, :]
'''
tet.GetPointIds().SetId(0, v0)
tet.GetPointIds().SetId(1, v4)
tet.GetPointIds().SetId(2, v6)
tet.GetPointIds().SetId(3, v7)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v6)
tet.GetPointIds().SetId(1, v4)
tet.GetPointIds().SetId(2, v5)
tet.GetPointIds().SetId(3, v7)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v4)
tet.GetPointIds().SetId(1, v1)
tet.GetPointIds().SetId(2, v5)
tet.GetPointIds().SetId(3, v8)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v6)
tet.GetPointIds().SetId(1, v5)
tet.GetPointIds().SetId(2, v2)
tet.GetPointIds().SetId(3, v7)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v4)
tet.GetPointIds().SetId(1, v8)
tet.GetPointIds().SetId(2, v5)
tet.GetPointIds().SetId(3, v7)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v2)
tet.GetPointIds().SetId(1, v7)
tet.GetPointIds().SetId(2, v5)
tet.GetPointIds().SetId(3, v9)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v7)
tet.GetPointIds().SetId(1, v5)
tet.GetPointIds().SetId(2, v9)
tet.GetPointIds().SetId(3, v8)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v7)
tet.GetPointIds().SetId(1, v8)
tet.GetPointIds().SetId(2, v9)
tet.GetPointIds().SetId(3, v3)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
'''
tet.GetPointIds().SetId(0, v0)
tet.GetPointIds().SetId(1, v4)
tet.GetPointIds().SetId(2, v6)
tet.GetPointIds().SetId(3, v7)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v4)
tet.GetPointIds().SetId(1, v1)
tet.GetPointIds().SetId(2, v5)
tet.GetPointIds().SetId(3, v8)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v6)
tet.GetPointIds().SetId(1, v5)
tet.GetPointIds().SetId(2, v2)
tet.GetPointIds().SetId(3, v9)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v7)
tet.GetPointIds().SetId(1, v8)
tet.GetPointIds().SetId(2, v9)
tet.GetPointIds().SetId(3, v3)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v6)
tet.GetPointIds().SetId(1, v9)
tet.GetPointIds().SetId(2, v7)
tet.GetPointIds().SetId(3, v5)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v7)
tet.GetPointIds().SetId(1, v5)
tet.GetPointIds().SetId(2, v9)
tet.GetPointIds().SetId(3, v8)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v6)
tet.GetPointIds().SetId(1, v4)
tet.GetPointIds().SetId(2, v5)
tet.GetPointIds().SetId(3, v7)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
tet.GetPointIds().SetId(0, v4)
tet.GetPointIds().SetId(1, v5)
tet.GetPointIds().SetId(2, v7)
tet.GetPointIds().SetId(3, v8)
mesh.InsertNextCell(tet.GetCellType(), tet.GetPointIds())
return mesh
def numpy2vtkDataArray(npa, mystr):
# print npa[1][0]
size0, size1 = npa.shape
data = vtk.vtkDoubleArray()
data.SetNumberOfComponents(3)
data.SetName(mystr)
for i in range(size0):
x = npa[i, 0]
y = npa[i, 1]
z = npa[i, 2]
data.InsertNextTuple3(x, y, z)
return data
def numpy2vtkDataArray1(npa, mystr):
# print npa[1][0]
size0 = npa.shape[0]
data = vtk.vtkDoubleArray()
data.SetNumberOfComponents(1)
data.SetName(mystr)
for i in range(size0):
data.InsertNextValue(npa[i])
return data
def numpy2vtkDataArray9(npa, mystr):
# print npa[1][0]
size0, size1 = npa.shape
data = vtk.vtkDoubleArray()
data.SetNumberOfComponents(9)
data.SetName(mystr)
for i in range(size0):
data.InsertNextTuple9(npa[i, 0], npa[i, 1], npa[i, 2], npa[i, 3], npa[i, 4], npa[i, 5], npa[i, 6], npa[i, 7], npa[i, 8])
return data
def numpy2vtkDataArrayInt(npa):
# print npa[1][0]
size0, size1 = npa.shape
data = vtk.vtkIdTypeArray()
data.SetNumberOfComponents(4)
# data.SetName("CELLS")
for i in range(size0):
n0 = int(npa[i, 0])
n1 = int(npa[i, 1])
n2 = int(npa[i, 2])
n3 = int(npa[i, 3])
data.InsertNextTuple4(n0, n1, n2, n3)
return data
|
beauof/FSIViewer
|
organiseData.py
|
Python
|
lgpl-3.0
| 6,548
|
[
"VTK"
] |
8946166b052361570fa1e7a9172e933993fb872d5d7945a8aac6b10e5a49b8df
|
from random import random
from ase.io import write
from ase.optimize import BFGS
from ase.calculators.emt import EMT
from ase.ga.data import DataConnection
from ase.ga.population import Population
from ase.ga.standard_comparators import InteratomicDistanceComparator
from ase.ga.cutandsplicepairing import CutAndSplicePairing
from ase.ga.utilities import closest_distances_generator
from ase.ga.utilities import get_all_atom_types
from ase.ga.offspring_creator import OperationSelector
from ase.ga.standardmutations import MirrorMutation
from ase.ga.standardmutations import RattleMutation
from ase.ga.standardmutations import PermutationMutation
# Change the following three parameters to suit your needs
population_size = 5
mutation_probability = 0.3
n_to_test = 5
# Initialize the different components of the GA
da = DataConnection('gadb.db')
atom_numbers_to_optimize = da.get_atom_numbers_to_optimize()
n_to_optimize = len(atom_numbers_to_optimize)
slab = da.get_slab()
all_atom_types = get_all_atom_types(slab, atom_numbers_to_optimize)
blmin = closest_distances_generator(all_atom_types,
ratio_of_covalent_radii=0.7)
comp = InteratomicDistanceComparator(n_top=n_to_optimize,
pair_cor_cum_diff=0.015,
pair_cor_max=0.7,
dE=0.02,
mic=False)
pairing = CutAndSplicePairing(slab, n_to_optimize, blmin)
mutations = OperationSelector([1., 1., 1.],
[MirrorMutation(blmin, n_to_optimize),
RattleMutation(blmin, n_to_optimize),
PermutationMutation(n_to_optimize)])
# Relax all unrelaxed structures (e.g. the starting population)
while da.get_number_of_unrelaxed_candidates() > 0:
a = da.get_an_unrelaxed_candidate()
a.set_calculator(EMT())
print('Relaxing starting candidate {0}'.format(a.info['confid']))
dyn = BFGS(a, trajectory=None, logfile=None)
dyn.run(fmax=0.05, steps=100)
a.set_raw_score(-a.get_potential_energy())
da.add_relaxed_step(a)
# create the population
population = Population(data_connection=da,
population_size=population_size,
comparator=comp)
# test n_to_test new candidates
for i in xrange(n_to_test):
print('Now starting configuration number {0}'.format(i))
a1, a2 = population.get_two_candidates()
a3, desc = pairing.get_new_individual([a1, a2])
if a3 == None:
continue
da.add_unrelaxed_candidate(a3, description=desc)
# Check if we want to do a mutation
if random() < mutation_probability:
a3_mut, desc = mutations.get_new_individual([a3])
if a3_mut != None:
da.add_unrelaxed_step(a3_mut, desc)
a3 = a3_mut
# Relax the new candidate
a3.set_calculator(EMT())
dyn = BFGS(a3, trajectory=None, logfile=None)
dyn.run(fmax=0.05, steps=100)
a3.set_raw_score(-a3.get_potential_energy())
da.add_relaxed_step(a3)
population.update()
write('all_candidates.traj', da.get_all_relaxed_candidates())
|
askhl/ase
|
ase/test/ga/basic_example_main_run.py
|
Python
|
gpl-2.0
| 3,181
|
[
"ASE"
] |
b7b21e09f7c967e5e0c3ca9d8764f18ef469d92e15f49f9d5b4f14545da5b90e
|
#!/usr/bin/env python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Run on a single gbs file to get a periodic table printout or on two to compare gbs contents."""
from __future__ import print_function
import os
import sys
import subprocess
qcdb_module = os.path.normpath(os.path.dirname(os.path.abspath(__file__)) + '../../../../../driver')
sys.path.append(qcdb_module)
import qcdb
from qcdb.libmintsbasissetparser import Gaussian94BasisSetParser
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def bas_sanitize(fl):
if fl[-4] == '.gbs':
fl = fl[:-4]
return fl.lower().replace('+', 'p').replace('*', 's').replace('(', '_').replace(')', '_').replace(',', '_')
parser = Gaussian94BasisSetParser()
elements = qcdb.periodictable._temp_symbol
os.system("echo '#differing basis sets' > basisdunningdiffer.txt")
with open(sys.argv[1], 'r') as basfile:
bascontents = basfile.readlines()
bname = bas_sanitize(sys.argv[1])
isdiff = False
if len(sys.argv) > 2:
isdiff = True
with open(sys.argv[2], 'r') as reffile:
refcontents = reffile.readlines()
rname = bas_sanitize(os.path.basename(sys.argv[2]))
if isdiff:
if bname != rname:
print('%s / %s' % (bname, rname), end='')
else:
print('%-40s' % (bname), end='')
else:
print('%-40s' % (bname), end='')
anychange = False
forbiddenchange = False
postKr = False
for el in elements:
if el.upper() == "RB":
postKr = True
shells, msg, ecp_shells, ecp_msg, ecp_ncore = parser.parse(el, bascontents)
if isdiff:
rshells, rmsg, recp_shells, recp_msg, recp_ncore = parser.parse(el, refcontents)
if not shells and not rshells:
print('%s' % ('' if postKr else ' '), end='')
continue
if shells and not rshells:
print(bcolors.OKBLUE + '{:3}'.format(el.upper()) + bcolors.ENDC, end='')
anychange = True
if not shells and rshells:
print(bcolors.FAIL + '{:3}'.format(el.upper()) + bcolors.ENDC, end='')
anychange = True
forbiddenchange = True
if shells and rshells:
mol = qcdb.Molecule("""\n{}\n""".format(el))
mol.update_geometry()
mol.set_basis_all_atoms(bname, role='BASIS')
bdict = {bname: ''.join(bascontents)}
rdict = {bname: ''.join(refcontents)}
bs, msg, ecp = qcdb.BasisSet.construct(parser, mol, 'BASIS', None, bdict, False)
rbs, rmsg, recp = qcdb.BasisSet.construct(parser, mol, 'BASIS', None, rdict, False)
if bs == rbs:
print('{:3}'.format(el.lower()), end='')
else:
print(bcolors.WARNING + '{:3}'.format(el.upper()) + bcolors.ENDC, end='')
anychange = True
tbs = bs.print_detail(out='tmpB.txt')
rtbs = rbs.print_detail(out='tmpR.txt')
try:
outdiff = subprocess.check_output("diff -bwy -W 180 tmpB.txt tmpR.txt >> basisdunningdiffer.txt", shell=True)
#outdiff = subprocess.check_output("diff -bw --context=1 tmpB.txt tmpR.txt >> basisdunningdiffer.txt", shell=True)
except subprocess.CalledProcessError:
pass
else:
if not shells:
print('%s' % ('' if postKr else ' '), end='')
else:
print('{:3}'.format(el.lower()), end='')
print('')
if anychange and not forbiddenchange:
os.system("echo 'mv {} ../' >> basisdunningfiles.txt".format(sys.argv[1]))
|
jH0ward/psi4
|
psi4/share/psi4/basis/primitives/diff_gbs.py
|
Python
|
lgpl-3.0
| 4,565
|
[
"Psi4"
] |
6a43893b963d53cd96bf8b034c8a15068676536cf941c07a55aff4cf6cf9c9e3
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from flask import Flask, render_template, request
from google.cloud import ndb
import redis
app = Flask(__name__)
ds_client = ndb.Client()
HOUR = 3600
REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')
REDIS_PORT = os.environ.get('REDIS_PORT', '6379')
REDIS = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)
class Visit(ndb.Model):
'Visit entity registers visitor IP address & timestamp'
visitor = ndb.StringProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
def store_visit(remote_addr, user_agent):
'create new Visit entity in Datastore'
with ds_client.context():
Visit(visitor='{}: {}'.format(remote_addr, user_agent)).put()
def fetch_visits(limit):
'get most recent visits'
with ds_client.context():
return Visit.query().order(-Visit.timestamp).fetch(limit)
@app.route('/')
def root():
'main application (GET) handler'
# check for (hour-)cached visits
ip_addr, usr_agt = request.remote_addr, request.user_agent
visitor = '{}: {}'.format(ip_addr, usr_agt)
rsp = REDIS.get('visits')
visits = pickle.loads(rsp) if rsp else None
# register visit & run DB query if cache empty or new visitor
if not visits or visits[0].visitor != visitor:
store_visit(ip_addr, usr_agt)
visits = list(fetch_visits(10))
REDIS.set('visits', pickle.dumps(visits), ex=HOUR)
return render_template('index.html', visits=visits)
|
googlecodelabs/migrate-python2-appengine
|
mod13b-memorystore/main.py
|
Python
|
apache-2.0
| 2,028
|
[
"VisIt"
] |
5712e161ef4ff920a1c5f73c24b7e8a2f5a97e71b09899c1931b7fb87a975c62
|
#!/bin/env python
""" Show request given its ID, a jobID or a transformation and a task """
__RCSID__ = "$Id: $"
import datetime
def convertDate( date ):
try:
value = datetime.datetime.strptime( date, '%Y-%m-%d' )
return value
except:
pass
try:
value = datetime.datetime.utcnow() - datetime.timedelta( hours = int( 24 * float( date ) ) )
except:
gLogger.fatal( "Invalid date", date )
value = None
return value
from DIRAC.Core.Base import Script
Script.registerSwitch( '', 'Job=', ' = JobID' )
Script.registerSwitch( '', 'Transformation=', ' = transID' )
Script.registerSwitch( '', 'Tasks=', ' Associated to --Transformation, list of taskIDs' )
Script.registerSwitch( '', 'Verbose', ' Print more information' )
Script.registerSwitch( '', 'Terse', ' Only print request status' )
Script.registerSwitch( '', 'Full', ' Print full request' )
Script.registerSwitch( '', 'Status=', ' Select all requests in a given status' )
Script.registerSwitch( '', 'Since=', ' Associated to --Status, start date yyyy-mm-dd or nb of days (default= -one day' )
Script.registerSwitch( '', 'Until=', ' Associated to --Status, end date (default= now' )
Script.registerSwitch( '', 'All', ' (if --Status Failed) all requests, otherwise exclude irrecoverable failures' )
Script.registerSwitch( '', 'Reset', ' Reset Failed files to Waiting if any' )
Script.setUsageMessage( '\n'.join( [ __doc__,
'Usage:',
' %s [option|cfgfile] requestID/requestName(if unique)' % Script.scriptName,
'Arguments:',
' requestID: a request ID' ] ) )
# # execution
if __name__ == "__main__":
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import DIRAC
from DIRAC import gLogger
jobs = []
requestID = 0
transID = None
taskIDs = None
tasks = None
requests = []
full = False
verbose = False
status = None
until = None
since = None
terse = False
allR = False
reset = False
for switch in Script.getUnprocessedSwitches():
if switch[0] == 'Job':
try:
jobs = [int( job ) for job in switch[1].split( ',' )]
except:
gLogger.fatal( "Invalid jobID", switch[1] )
elif switch[0] == 'Transformation':
try:
transID = int( switch[1] )
except:
gLogger.fatal( 'Invalid transID', switch[1] )
elif switch[0] == 'Tasks':
try:
taskIDs = [int( task ) for task in switch[1].split( ',' )]
except:
gLogger.fatal( 'Invalid tasks', switch[1] )
elif switch[0] == 'Full':
full = True
elif switch[0] == 'Verbose':
verbose = True
elif switch[0] == 'Terse':
terse = True
elif switch[0] == 'All':
allR = True
elif switch[0] == 'Reset':
reset = True
elif switch[0] == 'Status':
status = switch[1].capitalize()
elif switch[0] == 'Since':
since = convertDate( switch[1] )
elif switch[0] == 'Until':
until = convertDate( switch[1] )
if reset:
status = 'Failed'
if terse:
verbose = True
if status:
if not until:
until = datetime.datetime.utcnow()
if not since:
since = until - datetime.timedelta( hours = 24 )
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.ReqClient import printRequest, recoverableRequest
reqClient = ReqClient()
if transID:
if not taskIDs:
gLogger.fatal( "If Transformation is set, a list of Tasks should also be set" )
Script.showHelp()
DIRAC.exit( 2 )
# In principle, the task name is unique, so the request name should be unique as well
# If ever this would not work anymore, we would need to use the transformationClient
# to fetch the ExternalID
requests = ['%08d_%08d' % ( transID, task ) for task in taskIDs]
allR = True
elif not jobs:
args = Script.getPositionalArgs()
if len( args ) == 1:
allR = True
requests = [reqID for reqID in args[0].split( ',' ) if reqID]
else:
res = reqClient.getRequestIDsForJobs( jobs )
if not res['OK']:
gLogger.fatal( "Error getting request for jobs", res['Message'] )
DIRAC.exit( 2 )
if res['Value']['Failed']:
gLogger.error( "No request found for jobs %s" % str( res['Value']['Failed'].keys() ) )
requests = sorted( res['Value']['Successful'].values() )
if requests:
allR = True
if status and not requests:
allR = allR or status != 'Failed'
res = reqClient.getRequestIDsList( [status], limit = 999999999, since = since, until = until )
if not res['OK']:
gLogger.error( "Error getting requests:", res['Message'] )
DIRAC.exit( 2 )
requests = [reqID for reqID, _st, updTime in res['Value'] if updTime > since and updTime <= until and reqID]
gLogger.always( 'Obtained %d requests %s between %s and %s' % ( len( requests ), status, since, until ) )
if not requests:
gLogger.always( 'No request selected....' )
Script.showHelp()
DIRAC.exit( 2 )
okRequests = []
warningPrinted = False
for reqID in requests:
# We allow reqID to be the requestName if it is unique
try:
requestID = int( reqID )
except ValueError:
requestID = reqClient.getRequestIDForName( reqID )
if not requestID['OK']:
gLogger.always( requestID['Message'] )
continue
requestID = requestID['Value']
request = reqClient.peekRequest( requestID )
if not request["OK"]:
gLogger.error( request["Message"] )
DIRAC.exit( -1 )
request = request["Value"]
if not request:
gLogger.error( "no such request %s" % requestID )
continue
if status and request.Status != status:
if not warningPrinted:
gLogger.always( "Some requests are not in status %s" % status )
warningPrinted = True
continue
if allR or recoverableRequest( request ):
okRequests.append( str( requestID ) )
if reset:
gLogger.always( '============ Request %s =============' % requestID )
ret = reqClient.resetFailedRequest( requestID, allR = allR )
if not ret['OK']:
gLogger.error( "Error resetting request %s" % requestID, ret['Message'] )
else:
if len( requests ) > 1:
gLogger.always( '\n===================================' )
dbStatus = reqClient.getRequestStatus( requestID ).get( 'Value', 'Unknown' )
printRequest( request, status = dbStatus, full = full, verbose = verbose, terse = terse )
if status and okRequests:
from DIRAC.Core.Utilities.List import breakListIntoChunks
gLogger.always( '\nList of %d selected requests:' % len( okRequests ) )
for reqs in breakListIntoChunks( okRequests, 100 ):
gLogger.always( ','.join( reqs ) )
|
coberger/DIRAC
|
RequestManagementSystem/scripts/dirac-rms-show-request.py
|
Python
|
gpl-3.0
| 6,894
|
[
"DIRAC"
] |
1adea7a50eeaf547775b838f277ecea86abe4af16a79e00a8f785c2dbf4e9c75
|
"""
BNL NSLS-2 Computer Vision Module
Author: William Watson
Date: 2016-7-14
This Module contains numerous functions designed to
help in the data manipulation of images on the Beam-lines within NSLS-2.
It Uses the numpy and cv2 libraries to construct a simple module to
make image processing and computer vision easier, faster, and more compact.
"""
# DEPENDENCIES
import cv2
import numpy as np
import sys
import time
from matplotlib import pyplot as plt
import matplotlib
#from epics import caget, caput
# VERSION
__version__ = "0.5.4.1"
__opencv__ = cv2.__version__
__npversion__ = np.version.version
__sysver__ = sys.version
__matplotlibver__ = matplotlib.__version__
# GLOBAL VARS
colorMap_flag = {"autumn":0, "bone":1, "jet":2, "winter":3, "rainbow":4, "ocean":5, "summer":6, "spring":7, "cool":8, "hsv":9, "pink":10, "hot":11}
border_flag = {"constant":0, "reflect":2, "reflect101":4, "replicate":1, "default":4, "wrap":3}
EPICSTYPE = {1 : np.uint16 , 0 : np.uint8, 2:np.uint32, 3: np.uint64}
EPICSCOLOR = {0: "gray", 1: "bayer", 2: "RBG1"}
#######################################################################################
# EXPERIMENTAL SECTION - ALL NEW METHODS GO HERE FIRST FOR TESTING
#FIX FIX FIX
# METHODS KNOWN TO BE BROKEN / Need Further Looking!!!!!!!!!
def backgroundSubtract(img, flag=0):
"""
EXPERIMENTAL
Background Subtraction Methods
Params:
* img - Image
* flag - OPTIONAL - algorithm select; <0 - MOG2, 0- MOG, >0-GMG; def: 0
Returns:
* Background Subtracted Mask
"""
fgbg = cv2.BackgroundSubtractorMOG()
fgmask = fgbg.apply(img)
return fgmask
# Is this really enhancing? Need to figure out...
def enhance(img, window=30):
"""
EXPERMIENTAL
Enhances an Img
Params:
* img - Image
* window - OPTIONAL - Window Size used for High Pass Filter
Returns:
* Enhanced Img
"""
hp = highPassFilter(img, window=window)
tmp = grayscale(img) + laplacian(img)
return tmp
def watershed(img):
"""
EXPERIMENTAL
Image Segmenting - Watershed Algorithm
Params:
* img - image
Returns:
* Segmented img
"""
tmp = img.copy()
gray = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
kernel = np.ones((3,3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
sure_bg = cv2.dilate(opening, kernel, iterations=3)
dist_transform = cv2.distanceTransform(opening, cv2.cv.CV_DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0)
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
ret, markers = cv2.connectedComponents(sure_fg) #IS THIS REALLY NOT IMPLEMENTED IN PYTHON?
markers = markers+1
markers[unknown==255] = 0
markers = cv2.watershed(tmp, markers)
tmp[markers == -1] = [255,0,0]
return tmp
def drawHarrisSubPixel(img, blockSize=2, ksize=3, k=0.04, color1=(0,0,255), color2=(0,255,0)):
"""
EXPERIMENTAL
Harris Corner Detection with SubPixel Accuracy
Params:
* img - image
* blockSize - OPTIONAL - size of neighborhood considered for corner detection
* ksize - OPTIONAL - Aperture parameter of Sobel derivative used
* k - OPTIONAL - Harris detector free parameter in equation
* color1 - OPTIONAL - def is (0,0,255)
* color1 - OPTIONAL - def is (0,255,0)
Returns:
* Image with corners marked.
"""
tmp = img.copy()
gray = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, blockSize, ksize, k)
dst = cv2.dilate(dst, None)
ret, dst = cv2.threshold(dst, 0.01*dst.max(), 255, 0)
dst = np.uint8(dst)
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv2.cornerSubPix(gray, np.float32(centroids), (5,5), (-1,-1), criteria)
res = np.hstack((centroids, corners))
res = np.int0(res)
tmp[res[:,1], res[:,0]] = color1
tmp[res[:,3], res[:,2]] = color2
return tmp
def depthImg(imgL, imgR, ndisparities=16, blockSize=16):
"""
EXPERIMENTAL
Returns a depth map of an img
Params:
* imgL - first image in pair
* imgR - second image in pair
* ndisparities - OPTIONAL - def: 16
* blockSize - OPTIONAL - def:16
Returns:
* Depth map of an image
"""
stereo = cv2.StereoBM(cv2.STEREO_BM_BASIC_PRESET, ndisparities=ndisparities, SADWindowSize=blockSize)
disparity = stereo.compute(imgL, imgR)
return disparity
def sharpen(img, ker = (9,9), sigX=10.0):
"""
EXPERIMENTAL
Returns a sharpebded image using the Unsharp Img Algorithm
Params:
* img - image
* ker - OPTIONAL - kernel size tuple; def:(9,9)
* sigX - OPTIONAL - Gaussian kernel standard deviation in X dir; def:10.0
Returns:
* sharpened img
"""
gaus = cv2.GaussianBlur(img, ker, sigX)
unsharp = cv2.addWeighted(img, 1.5, gaus, -0.5, 0, img)
return unsharp
"""
###EXPERIMETNAL###
def fetchImgEXP(SYS, DEV):
\"""
EXPERIMENTAL FETCH IMG METHOD
\"""
SYSDEV = str(SYS) + "{" + str(DEV) + "}"
data = caget(SYSDEV + "image1:ArrayData")
rows = caget(SYSDEV + "image1:ArraySize1_RBV")
cols = caget(SYSDEV + "image1:ArraySize0_RBV")
dtype = caget(SYSDEV + "cam1:DataType_RBV")
color = caget(SYSDEV + "cam1:ColorMode_RBV")
count = 0
img = []
row = []
dtype = EPICSTYPE[caget(SYSDEV + "cam1:DataType_RBV")]
#print dtype
color = caget(SYSDEV + "cam1:ColorMode_RBV")
#print color
for i in range(rows):
for j in range(cols):
row.append(data[count])
count = count + 1
r = np.array(row, dtype)
img.append(r)
row = []
npra = np.array(img, dtype)
#display(npra)
save(npra, "fetchImg.jpg")
save(npra, "fetchImg.tiff") # Might need to change file type
save(npra, "fetchImg.jp2") # Might need to change file type
save(npra, "fetchImg.png") # Might need to change file type
img = load("fetchImg.jpg") #, getColorFlag(color))
return img
"""
################################################################################
def coherenceFilter(img, sigma=11, str_sigma=11, blend=0.5, iter_n=4):
"""
Applies a Coherence-enhancing filter onto an img
Params:
* img- image
* sigma - OPTIONAL - def: 11
* str_sigma - OPTIONAL - def: 11
* blend - OPTIONAL - def: 0.5
* iter_n - OPTIONAL - number of iterations; def: 4
Returns:
* Filtered Img
"""
h, w = img.shape[:2]
tmp = img.copy()
for i in xrange(iter_n):
gray = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2]
x, y = eigen[:,:,1,0], eigen[:,:,1,1]
gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
m = gvv < 0
ero = cv2.erode(tmp, None)
dil = cv2.dilate(tmp, None)
img1 = ero
img1[m] = dil[m]
tmp = np.uint8(tmp*(1.0 - blend) + img1*blend)
return tmp
def gaborFilter(img, ksize=31):
"""
Gabor Filter
Uses the Gabor Filter Convolutions to get Fractalius-like image effect
Params:
* img - image
Returns:
* Gabor Filtered Image Effect
"""
filters = []
#ksize = 31
for theta in np.arange(0, np.pi, np.pi / 16):
kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F)
kern /= 1.5*kern.sum()
filters.append(kern)
accum = np.zeros_like(img)
for ker in filters:
fimg = cv2.filter2D(img, cv2.CV_8UC3, ker)
np.maximum(accum, fimg, accum)
return accum
def floodFill(img, seedPoint, val=(255,255,255), lo=25, hi=25, fixedRng=False, connectivity=8):
"""
Flood Fill Algorithm
Params:
* img - image
* seedPoint - startPoint
* val -OPTIONAL - New Value; 255,255,255
* lo - OPTIONAL - Max lower birghtness/color diff; def: 20
* hi - OPTIONAL - Max upper birghtness/color diff; def: 20
* fixedRng - OPTIONAL - TRUE=FIXED diff btw curr and see; FALSE=MASK only fills mask def:False
* connectivity - OPTIONAL - 4 or 8 bit neightborrhood, def:8
Returns:
* Flood Filles Img
"""
flooded = img.copy()
h, w = img.shape[:2]
mask = np.zeros((h+2,w+2), np.uint8)
flags = connectivity
if fixedRng:
flags |= cv2.FLOODFILL_FIXED_RANGE
cv2.floodFill(flooded, mask, seedPoint, val, (lo,)*3, (hi,)*3, flags)
return flooded
def imfill(img, threshVal = 220):
"""
Imfill - Creates a mask for an image by removing holes, Isolates shape of object
Params:
* img - image
* threshVal - OPTIONAL - Threshold Value; def: 220
Returns:
* Image with Holes Filled
"""
tmp = grayscale(img)
ret, thresh = cv2.threshold(tmp, threshVal, 255, cv2.THRESH_BINARY_INV)
flood = thresh.copy()
h, w = thresh.shape[:2]
mask = np.zeros((h+2,w+2), np.uint8)
cv2.floodFill(flood, mask, (0,0), 255)
invert = cv2.bitwise_not(flood)
output = thresh | invert
return output
def fourierCV(img):
"""
Performs a Fourier Transform using OpenCV Methods
Params:
* img - Image
Returns:
* Magnitude Spectrum of Image
"""
gray = grayscale(img)
dft = cv2.dft(np.float32(gray), flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0], dft_shift[:,:,1]))
return magnitude_spectrum
def lowPassFilter(img, window=30):
"""
Performs a Low Pass Filter Operation on the Image
Params:
* img - image
* window - OPTIONAL - window size used for masking in spectrum; def: 30 - results in 60x60 window
Returns:
* Low Pass Filtered Image
"""
gray = grayscale(img)
dft = cv2.dft(np.float32(gray), flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
rows, cols = gray.shape
crow, ccol = rows/2, cols/2
mask = np.zeros((rows, cols, 2), np.uint8)
mask[crow-window:crow+window, ccol-window:ccol+window] = 1
fshift = dft_shift*mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:,:,0], img_back[:,:,1])
return img_back
def fourier(img):
"""
Performs a Fourier Transform using CV
This is a wrapper fourier function for generic fourier.
More specific fncs are provided as well.
Params:
* img - Image
Returns:
* Magnitude Spectrum of Image
"""
return fourierCV(img)
def fourierNP(img):
"""
Performs a Fourier Transform using Numpy Methods
Params:
* img - Image
Returns:
* Magnitude Spectrum of Image
"""
gray = grayscale(img)
f = np.fft.fft2(gray)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift)) # RETURN THIS
return magnitude_spectrum
def highPassFilter(img, window=30):
"""
Performs a High Pass Filter Operation on the Image
Params:
* img - image
* window - OPTIONAL - window size used for masking in spectrum; def: 30 - results in 60x60 window
Returns:
* High Pass Filtered Image
"""
gray = grayscale(img)
f = np.fft.fft2(gray)
fshift = np.fft.fftshift(f)
rows, cols = gray.shape
crow, ccol = rows/2, cols/2
fshift[crow-window:crow+window, ccol-window:ccol+window] = 0
f_ishift = np.fft.ifftshift(fshift)
img_back = np.fft.ifft2(f_ishift)
img_back = np.abs(img_back)
return img_back
def matplotlibDisplay(img, title="Image", colorFlag = 'gray'):
"""
Displays an image using MatPlotLib
Useful for displaying all images and Magnitude Spectrums
Params:
* img - image
* title - OPTIONAL - name of Image
* colorFlag - OPTIONAL - color flag for imshow; def: 'gray'
"""
plt.imshow(img, colorFlag)
plt.title(title)
plt.xticks([])
plt.yticks([])
plt.show()
def matplotlibDisplayMulti(imgs, titles=None, colorFlag='gray'):
"""
Displays multiple images in a matplotlib window
Params:
* img - image
* title - OPTIONAL - name of Image
* colorFlag - OPTIONAL - color flag for imshow; def: 'gray'
"""
if titles is None:
titles = []
for i in range(len(imgs)):
titles.append("IMAGE " + str(i))
for i in range(len(imgs)):
plt.subplot(1, len(imgs), 1+i)
plt.imshow(imgs[i], colorFlag)
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
def version():
"""
Prints the version codes for cvlib, OpenCV, and Numpy. For Refrence
"""
print "Cvlib Version: " + str(__version__)
print "OpenCV Version: " + str(__opencv__)
print "Numpy Version: " + str(__npversion__)
print "Matplotlib Ver: " + str(__matplotlibver__)
print "Python Version: " + str(__sysver__)
def transpose(matrix):
"""
Transposes a matrix
Params:
* matrix - matrix
Returns:
* transpose of matrix
"""
return cv2.transpose(matrix)
def dictToLst(dictionary):
"""
Returns a dictionary into two lists of [[key][values]]
Params:
* dictionary - Dictionary to be split
Returns:
* [keys, values]
"""
keys = []
values = []
for key, value in dictionary.iteritems():
keys.append(key)
values.append(value)
return [keys, values]
def lstToDict(key, value):
"""
Turns two lists into a dictionary
Params:
* key - List of Keys
* value - List of Values
Returns:
* Dictionary of Key/Values
"""
return dict(zip(key, value))
def meanVal(img):
"""
Returns the Mean Color (Regular Img) / Mean Intensity (Grayscale)
Params:
* img - image
Returns:
* Mean Color / Mean Image
"""
mean = cv2.mean(img)
if img is None:
print "ERROR: MeanValue: Sent in None-Type Object"
return -1
if len(img.shape) == 3:
return (mean[0], mean[1], mean[2])
elif len(img.shape) == 2:
return (mean[0])
else:
return mean
def flip(img, code=0):
"""
Flips a 2D array around vertical, horizontal, or both axes
Params:
* img - image
* code - OPTIONAL - flip code
flip code == 0 -> Vertical
flip code > 0 -> Horizontal
flip code < 0 -> Diagonal
Returns:
* flipped image
"""
return cv2.flip(img, flipCode=code)
def mask(img, invert=False):
"""
Finds the mask for an image
Params:
* img - image
* invert - OPTIONAL - def False; if def, returns mask, if True, returns bitwise not of mask
Returns:
* mask
"""
imgray = grayscale(img) #.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(imgray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
if not invert:
return mask
else:
return mask_inv
def copy(img):
"""
Copy an image, removes all refrences to original image
Params:
* img- image to be copied
Returns:
* copied img
"""
return img.copy()
def filterCnts(cnts, threshold = 5):
"""
Removes contours that contain less points than threshold, EXCLUSIVE
Params:
* cnts - list of contours
* threshold - OPTIONAL - Removes all contours less than it,
i.e only allows for contours greater than it;
def: 5
Returns:
* new list of contours filtered
"""
c = []
for item in cnts:
if threshold < len(item):
c.append(item)
return c
def cntInfo(img, cnt):
"""
Returns pertinent information regarding a contour
Params:
* img - image
* cnt - contour
Returns:
* dictionary of cnt info
"""
pts = extremePoints(cnt)
roi = crop(img, pts["L"][0], pts["T"][1], pts["R"][0], pts["B"][1])
m = minMaxLoc(roi)
m["minLoc"] = (m["minLoc"][0] + pts["L"][0], m["minLoc"][1] + pts["T"][1])
m["maxLoc"] = (m["maxLoc"][0] + pts["L"][0], m["maxLoc"][1] + pts["T"][1])
cross = abs(pts["L"][0] - pts["R"][0])
height = abs(pts["T"][1] - pts["B"][1])
cent = centroid(cnt)
angle = orientation(cnt)
areaVal = area(cnt)
per = perimeter(cnt)
ar = aspectRatio(cnt)
ext = extent(cnt)
sold = solidity(cnt)
eqD = equivalentDiameter(cnt)
me = meanVal(grayscale(roi))
su = sumPixel(grayscale(roi))
d = {"sum intensity":su, "mean intensity":me, "area":areaVal, "perimeter":per, "aspect ratio":ar, "extent":ext,"solidity":sold, "equivalent diameter":eqD, "width": cross, "height" : height, "centroid" : cent, "extrema" : pts, "min":m["minLoc"], "max":m["maxLoc"], "orientation" : angle}
return d
def sumPixel(img):
"""
Sums all of the pixels in an image
Best for Grayscale
Params:
* image
Returns:
* sum of pixel values
"""
ret = cv2.sumElems(img)
if len(img.shape) == 2:
return ret[0]
else:
return (ret[0], ret[1], ret[2])
def poi(img, cnt):
"""
Returns only points of interest for a contour
Params:
* img - image
* cnt- contour
Returns:
* points of interest (max/min, extremas, center)
"""
m = cntInfo(img, cnt)
d = {"max":m["max"],"B":m["extrema"]["B"],"T":m["extrema"]["T"],"R":m["extrema"]["R"],"L":m["extrema"]["L"],"min":m["min"],"centroid":m["centroid"]}
return d
def scalarInfo(img, cnt):
"""
Returns only scalar information for a contour
Params:
* img- image
* cnt- contour
Returns:
* scalar contour information
"""
m = cntInfo(img, cnt)
d = {"perimeter":m["perimeter"], "oreientation":m["orientation"], "solidity":m["solidity"],"height":m["height"], "extent":m["extent"], "aspect ratio":m["aspect ratio"], "area":m["area"], "sum intensity":m["sum intensity"], "width":m["width"], "equivalent diameter": m["equivalent diameter"], "mean intensity": m["mean intensity"]}
return d
def printCntInfo(img, cnt):
"""
Prints the contour information line by line via key: value format
Params:
* image - image
* cnt - contour
Returns:
* dicitonary of Contour Info
"""
m = cntInfo(img, cnt)
lst = dictToLst(m)
for i in range(len(lst[0])):
print str(lst[0][i]) + ": " + str(lst[1][i])
return m
def printDic(m):
"""
Prints a Dictionary in Key: Value format
Params:
* m - Dictionary to print
"""
lst = dictToList(m)
for i in range(len(lst[0])):
print str(lst[0][i]) + ": " + str(lst[1][i])
def plotPOI(img, cnt, radius = 3, color=(100,100,255)):
"""
Plots the points of interest of a cnt
Params:
* img - image
* cnt - contour
* radius - OPTIONAL - radius of point; def: 3
* color - OPTIONAL - color of pt; def: (100, 100, 255)
"""
m = poi(img, cnt)
for key, value in m.iteritems():
plotPoint(img, value, radius = radius, color = color)
def cntInfoMult(img, cnts):
"""
Returns a list of all cnt info for a list of contours
Here, cnts[0] -> cntProp[0]; i.e. all contour indexes map to info index
Params:
* img - image containing contours
* cnts - list of contours
Returns:
* contour information in list format
"""
cntProp = []
for item in cnts:
cntProp.append(cntInfo(img, item))
return cntProp
def pixelPoints(img, cnt):
"""
Finds all the points that comprises an object (Pixel Points)
Params:
* img - image
* cnt - contour
Returns:
* pixelpoints
"""
m = np.zeros(grayscale(img).shape, np.uint8)
cv2.drawContours(m, [cnt], 0, 255, -1)
pixelpoints = cv2.findNonZero(m)
return pixelpoints
def fillContour(img, cnt, color = (255,255,0)):
"""
Fill a Contour - Fills a contour with color onto img
Params:
* img - image
* cnt - contour
* color - OPTIONAL - color of fill, def: (255,255,0)
"""
cv2.drawContours(img, [cnt], 0, color, -1)
def minMaxLoc(img):
"""
Returns the Max Val, Min Val, and Locations
Params:
* img - image
Returns:
* (minVal, maxVal, minLoc, maxLoc) in dicitonary format
"""
maskVar = mask(img)
pt = cv2.minMaxLoc(grayscale(img), maskVar)
d = {"minVal": pt[0], "maxVal":pt[1], "minLoc":pt[2], "maxLoc":pt[3]}
return d
def findMax(img):
"""
Returns the Max value and Location
Params:
* img - image
Returns:
* (MaxVal, MaxLoc) in dictionary format
"""
d = minMaxLoc(img)
return {"maxVal":d["maxVal"], "maxLoc":d["maxLoc"]}
def findMin(img):
"""
Returns the Min value and Location
Params:
* img - image
Returns:
* (MinVal, MinLoc) in dictionary format
"""
d = minMaxLoc(img)
return {"minVal":d["minVal"], "minLoc":d["minLoc"]}
def add(img1, img2):
"""
Image Addition - add two images
Params:
* img1 - image 1
* img2 - image 2 or a scalar quantity
Returns:
* Addition of img1 and img2
"""
return cv2.add(img1, img2)
def addWeight(img1, wt1, img2, wt2, gamma=0):
"""
Image Blending - Added Weighting
Params:
* img1 - first image
* wt1 - weight of first image
* img2 - second img
* wt2 - weight of second image
* gamma - OPTIONAL - def is 0
Returns:
* weighted addition of imgs
"""
dst = cv2.addWeight(img1, wt1, img2, wt2, gamma)
return dst
def bitAnd(img1, img2=None, maskVar = None):
"""
Bitwise Ops - And
Params:
* img1 - first input array/scalar
* img2 - OPTIONAL - second input array/scalar; def is usually img1
* maskVar - OPTIONAL - mask of img; def of None
Returns:
* Bitwise And of imgs
"""
if img2 is None:
img2 = img1
return cv2.bitwise_and(img1, img2, mask=maskVar)
def bitOr(img1, img2=None, mask = None):
"""
Bitwise Ops - Or
Params:
* img1 - first input array/scalar
* img2 - OPTIONAL - second input array/scalar; def is usually img1
* mask - OPTIONAL - mask of img; def is None
Returns:
* Bitwise Or of imgs
"""
if img2 is None:
img2 = img1
return cv2.bitwise_or(img1, img2, mask=mask)
def bitXor(img1, img2=None, mask = None):
"""
Bitwise Ops - Exclusive Or
Params:
* img1 - first input array/scalar
* img2 - OPTIONAL - second input array/scalar; def is usually img1
* mask - OPTIONAL - mask of img; def is None
Returns:
* Bitwise Exclusive Or of imgs
"""
if img2 is None:
img2 = img1
return cv2.bitwise_xor(img1, img2, mask=mask)
def bitNot(img):
"""
Bitwise Ops - Not
Params:
* img - input img
Returns:
* Bitwise Not of imgs
"""
return cv2.bitwise_not(img)
def shape(img):
"""
Image Dimensions
Params:
* img - Image
Returns:
* (x, y, c) - x pixels, y pixels, channels
"""
if len(img.shape) == 3:
y, x, c = img.shape
return (x, y, c)
else:
y, x = img.shape
return (x, y)
def crop(img, x0, y0, x1, y1):
"""
Image Crop: Returns a Crop Img
Can be Used for ROI
Params:
* img - image
* x0 - X Position (start)
* y0 - Y Position (start)
* x1 - X End Position
* y1 - Y End Position
Returns:
* imgROI - from [y:y+h, x:x+w]
"""
crop = img[y0:y1, x0:x1]
return crop
def cropPt(img, start, end):
"""
Image Cropping/ROI for PTS or Tuples as coordinates
Params:
* img - image
* start - (x, y) tuple for start pt
* end - (x, y) tuple for end pt
Returns:
* Cropped Image ROI in rectangle bounded by start:end
"""
x0, y0 = start
x1, y1 = end
return crop(img, x0, y0, x1, y1)
def roi(img, x, y, w, h):
"""
Image ROI: Returns a Region of Interest
Can be Used for Cropping
Params:
* img - image
* x - X Position (start)
* y - Y Position(start)
* w - Width - How much region spans after X
* h - Height - How much region spans after Y
Returns:
* imgROI - from [x:x+w, y:y+h]
"""
roi = img[y:y+h, x:x+w]
return roi
def cropCnt(img, cnt):
"""
Crops a contour based on extrema
Params:
* img - Image
* cnt - contour
Returns:
* cropped image to contour
"""
pts = extremePoints(cnt)
roi = crop(img, pts["L"][0], pts["T"][1], pts["R"][0], pts["B"][0])
return roi
def hsv(img):
"""
Returns a hsv version of img
Params:
* img - image
Returns:
* HSV image if orginial is RBG
"""
if img is None:
print "Img is None"
sys.exit()
if len(img.shape) > 2:
return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
return None
def grayscale(img):
"""
Returns a grayscale version of img
Params:
* img - image
Returns:
* Grayscale image
"""
if img is None:
print "Img is None"
sys.exit()
if len(img.shape) > 2:
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
return img
def cvtColor(img, flag):
"""
Wrapper Function for cv2.cvtColor()
Used to change Color-space of Image
WARNING: No error checking is performed
Params:
* img - image
* flag - color flag, use colorFlags() to see list of flags available, cv2.FLAG
Returns:
* Image in new colorspace
"""
return cv2.cvtColor(img, flag)
def applyColorMap(img, flag):
"""
Apply a Color map to an img
Params:
* img - image
* flag - string flag or corresponding int found in getColorMapFlags()
Returns:
* img in color map
"""
if isinstance(flag, basestring):
flag = flag.lower()
gray = grayscale(img)
res = cv2.applyColorMap(gray, colorMap_flag[flag])
return res
else:
gray = grayscale(img)
return cv2.applyColorMap(gray, flag)
def applyJET(img):
"""
Apply a JET Color Map to img
Params:
* img - image
Returns:
* JET img
"""
return applyColorMap(img, "jet")
def applyHSV(img):
"""
Apply a HSV Color Map to img
Params:
* img - image
Returns:
* HSV img
"""
return applyColorMap(img, "hsv")
def getColorMapFlags():
"""
Returns the flags and corresponding int values
Returns:
* dicitonary of values to int codes
"""
return colorMap_flag
def bgr2hsv(BGR):
"""
Finds the HSV value from a BGR value, good for using ot track
Params:
* BGR - BGR value i.e. [X, Y, X]
Returns:
* HSV - HSV value corresponding to it.
"""
color = np.uint8([[BGR]])
hsv_color = cv2.cvtColor(color, cv2.COLOR_BGR2HSV)
return hsv_color
def colorFlags(filterStr=""):
"""
Returns the list of acceptable flags for color conversion
used for change colormap fnc
Params:
* filter - OPTIONAL - filters flags to specified string
Returns:
* List of Color Flags
"""
filterStr = filterStr.upper()
flags = [i for i in dir(cv2) if i.startswith('COLOR_') and filterStr in i]
return flags
def trackObject(img, lower, upper):
"""
Isolates a Color within an Image for tracking
Useful for tracking certain colors
Use within while loop for video
Params:
* img - Color Image
* lower - Lower Threshold of Color, [X, Y, Z] format
* upper - Upper Threshold of Color, [X, Y, Z] format
Returns:
* Isolated img
"""
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_col = np.array(lower)
upper_col = np.array(upper)
mask = cv2.inRange(hsv, lower_col, upper_col)
res = cv2.bitwise_and(img, img, mask=mask)
return res
def inRangeThresh(img, lower, upper):
"""
Thresholds an image between two color ranges, or scalars for grayscale
Params:
* img - Image to threshold
* lower - lower bound, either [X, Y, Z] for color or scalar for gray
* upper - upper bound, either [X, Y, Z] for color or scalar for gray
Retuns:
* In Range Thresholded Img
"""
if len(img.shape) == 2 and isinstance(lower, (int, long)) and isinstance(upper, (int, long)):
mask = cv2.inRange(img, np.array(lower), np.array(upper))
res = cv2.bitwise_and(img, img, mask=mask)
return res
elif len(img.shape) == 3 and isinstance(lower, (list, tuple)) and isinstance(upper, (list, tuple)):
lower_col = np.array(lower)
upper_col = np.array(upper)
mask = cv2.inRange(img, lower_col, upper_col)
res = cv2.bitwise_and(img, img, mask=mask)
return res
else:
print "ERROR: InRangeThresh: Invalid format for lower/upper"
sys.exit()
def display(img, name="IMAGE", wait=0):
"""
Displays an Image onto the screen and waits for user to close
Params:
* img - image to display
* name - OPTIONAL - string name of window, def is IMAGE
* wait - OPTIONAL - time in ms for screen to wait, def:0 - INDEFINITE
"""
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.imshow(name, img)
cv2.waitKey(wait) & 0xFF
cv2.destroyAllWindows()
def centroid(cnt):
"""
Returns the centroid to a given contour
Params:
* cnt - contour
Returns:
* (cx, cy) -> Pixel Position of the Centroid
"""
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
return (cx, cy)
def centroidFloat(cnt):
"""
Returns the centroid as a float for exactness
Params:
* cnt - contour
Returns:
* (cx, cy) -> centroid coord, as a float for exactness
"""
M = cv2.moments(cnt)
cx = M['m10']/M['m00']
cy = M['m01']/M['m00']
return (cx, cy)
def plotCentroid(img, cnt, radius = 3, color=(255, 255, 0)):
"""
Plots a centroid onto an image from a given contour
Params:
* cnt - contour
* img - image to modify
* color - OPTIONAL - specify color of centroid cross
Returns:
* Centroid Pixel Coord
"""
cx, cy = centroid(cnt)
drawCircle(img, (cx, cy), radius = radius, color = color)
return (cx, cy)
def sort(contours):
"""
Sorts a list of contours by number of points, in descending order
Params:
* contours - list of contours to sort
Returns:
* Sorted list of contour objects such that the largest contour is first
"""
return sorted(contours, reverse=True, key=len)
def sortList(lst, reverse=False, key=None):
"""
Sorts a given list
Params:
* lst - List to be sorted
* reverse - OPTIONAL - TRUE=List Reversed, FALSE = List Normal; def: False
* key - OPTIONAL - function used as comparision; def: None
Returns:
* Sorted List
"""
return sorted(lst, key=key, reverse=reverse)
def findContours(img, thresh=127, val=255):
"""
Returns the contours to a given image, sorted by size of contour
Params:
* img - Image to find contours in
* thresh - OPTIONAL - threhsold value used
* val - OPTIONAL - value to set items in threshold
Returns:
* Sorted List of Contours found in img
"""
gray = grayscale(img)
ret, binary = cv2.threshold(gray, thresh, val, cv2.THRESH_BINARY_INV)
bina, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
errorCheckContour(contours)
return sort(contours)
def findCntHierarchy(img, thresh=127, val=255):
"""
Returns the contours to a given image, sorted by size of contour
Params:
* img - Image to find contours in
* thresh - OPTIONAL - threhsold value used
* val - OPTIONAL - value to set items in threshold
Returns:
* Hierarchy of Contours found in img
"""
gray = grayscale(img)
ret, binary = cv2.threshold(gray, thresh, val, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return hierarchy
def errorCheckContour(contours):
"""
Error checks the contours in images and stops program if something is wrong
Params:
* contours - list of contours to error check
"""
if contours is None:
print "ERROR: FindContours: Missing objects: No contours found, please check image...\n"
sys.exit() # maybe return false?
def contourApprox(cnt, epsilon = 0.005):
"""
Approimates a contour to a given epsilon
Params:
* cnt - contour to approximate
* epsilon - OPTIONAL- percentage of arc length allowed as max distance from contour to approx
Returns:
* approximated contour
"""
epsilon = epsilon*cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
return approx
def aspectRatio(cnt):
"""
Returns the Aspect Ratio (Ratio of width to height of bounding rectangle)
Params:
* cnt - contour
Returns:
* Aspect Ratio of cnt
"""
x, y, w, h = cv2.boundingRect(cnt)
return float(w) / h
def extent(cnt):
"""
Returns the Extent (Ratio of contour area to bounding rectangle area)
Params:
* cnt - contour
Returns:
* Extent of cnt
"""
area = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
rect_area = w*h
return float(area)/rect_area
def boundingRect(cnt):
"""
Calculates the up-right bounding rectangle of a point set / cnt
Params:
* cnt - Contour
Returns:
* Bounding Rectangle
"""
x, y, w, h = cv2.boundingRect(cnt)
return {"x":x, "y": y, "w": w, "h": h}
def boundingRectPoints(cnt):
"""
Returns the points to a rectangle bounding cnt
Params:
* cnt - Contour
Returns:
* points to bouding rectangle
"""
x, y, w, h = cv2.boundingRect(cnt)
first = (x, y)
end = (x+w, y+h)
return {"top-left": first, "bottom-right":end}
def minEncloseCircle(cnt):
"""
Minimum Enclosing Circle
Params:
* cnt - Contour
Returns:
* Min Enclose Circle info
"""
(x, y), radius = cv2.minEnclosingCircle(cnt)
center = (int(x), int(y))
radius = int(radius)
return {"center" : center, "radius": radius}
def solidity(cnt):
"""
Returns the Solidity (Ratio of area to convex hull area)
Params:
* cnt - contour
Returns:
* Solidity of cnt
"""
area = cv2.contourArea(cnt)
hull = cv2.convexHull(cnt)
hull_area = cv2.contourArea(hull)
return float(area) / hull_area
def area(cnt):
"""
Returns area of cnt
Params:
* cnt - contour
Returns:
* Area of cnt
"""
return cv2.contourArea(cnt)
def moments(cnt):
"""
Returns the moments of a cnt
Params:
* cnt - contour
Returns:
* Dictionary of moments for a cnt
"""
return cv2.moments(cnt)
def huMoments(cnt):
"""
Returns the Hu Moments from moments
Params:
* cnt - Contour
Returns:
* Hu Moments
"""
return cv2.HuMoments(moments(cnt))
def perimeter(cnt):
"""
Returns the Contour Perimeter
Params:
* cnt - contour
Returns:
* Perimeter of cnt
"""
return cv2.arcLength(cnt, True)
def equivalentDiameter(cnt):
"""
Returns the Equivalent Diameter (Circle whose area is same as cnt area)
Params:
* cnt = contour
Returns:
* Equivalent Diameter
"""
return np.sqrt(4 * (cv2.contourArea(cnt)) / np.pi)
def fitEllipse(cnt):
"""
Finds the nearest fitting ellippse to a contour
Params:
* cnt - contour
Returns:
* (center, Major, Minor, angle) - in dicitionary format
"""
(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)
return {"center":(x,y), "major":MA, "minor":ma, "angle":angle}
def orientation(cnt):
"""
Returns the Orientation of the Object
Params:
* cnt - Contour
Returns:
* Angle at which object is directed
"""
(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)
return angle
def axis(cnt):
"""
Returns the Major and Minor axis lengths of the nearest fitting ellipse for the contour
Params:
* cnt - contour
Returns:
* (Major Axis, Minor Axis) - lengths of Major and Minor Axis ellipse for a given contour
"""
(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)
return {"major":MA, "minor":ma}
def drawContour(img, cnt, color=(0, 255, 0), thickness=2):
"""
Draws a contour onto an image
Params:
* img - image to draw on
* cnt - contour to draw
* color - OPTIONAL - color of cnt, default is (0, 255, 0)
* thickness - OPTIONAL - thickness of cnt line, default is 2
"""
cv2.drawContours(img, [cnt], 0, color, thickness)
def drawContours(img, cnt, color=(0, 255, 0), thickness=2):
"""
Draws all contours from a list
Params:
* img - image to draw on
* cnt - contours to draw
* color - OPTIONAL - color of cnt, default is (0, 255, 0)
* thickness - OPTIONAL - thickness of cnt line, default is 2
"""
cv2.drawContours(img, cnt, -1, color, thickness)
def extremePointsTup(cnt):
"""
Returns the Extreme Points of a contour
Params:
* cnt - contour
Returns:
* (LeftMost, RightMost, TopMost, BottomMost)
"""
leftmost = tuple(cnt[cnt[:,:,0].argmin()][0])
rightmost = tuple(cnt[cnt[:,:,0].argmax()][0])
topmost = tuple(cnt[cnt[:,:,1].argmin()][0])
bottommost = tuple(cnt[cnt[:,:,1].argmax()][0])
return (leftmost, rightmost, topmost, bottommost)
def extremePoints(cnt):
"""
Extreme Points Dictionary
Params:
* cnt - contour
Returns:
* Dictionary of extrema, such that L = Left, R = Right, T = Top, B = Bottom
"""
pts = extremePointsTup(cnt)
d = {"L":pts[0], "R":pts[1], "T":pts[2], "B":pts[3]}
return d
def cropCnt(img, cnt):
"""
Crops A Contour to a ROI by using extreme points to map values
Params:
* img - image
* cnt - contour
Returns:
* Crop ROI with Extrema as base points
"""
d = extremePoints(cnt)
roi = cropPt(img, (d["L"][0], d["T"][1]), (d["R"][0], d["B"][1]))
return roi
def plotPoint(img, point, radius = 3, color = (0, 0, 255)):
"""
Plots a single point onto an image
Params:
* img - image
* point - pixel tuple to plot at (x, y)
* radius - OPTIONAL - radius of point to be plotted
* color - OPTIONAL - color of pixel to be plotted
"""
drawCircle(img, point, radius = radius, color=color)
def plotPoints(img, points, radius = 3, color= (0, 0, 255)):
"""
Plots a list of points onto an img
Params:
* img - image
* points - list of points to plot
* radius - OPTIONAL - radius of points to be plotted
* color - OPTIONAL - color of points to plot, default is (0, 0, 255)
"""
for pt in points:
drawCircle(img, pt, radius = radius, color = color)
def drawCircle(img, center, radius = 3, color = (0,0,255), fill = -1):
"""
Draws a circle at a point with given radius onto img.
Params:
* img - image
* center - (x,y) center of circle
* radius - OPTIONAL - radius of circle, default is 3
* color - OPTIONAL - default is (0, 0, 255) - RED
* fill - OPTIONAL - default is -1, change for outline thickness
"""
cv2.circle(img, center, radius, color, fill)
def drawLine(img, start, end, color = (0,0,255), thickness = 3):
"""
Draws a line btw 2 points onto img.
Params:
* img - image
* start - start point
* end - end point
* color - OPTIONAL - default is (0, 0, 255) - RED
* thickness - OPTIONAL - default is 3, change for thickness
"""
cv2.line(img, start, end, color, thickness)
def drawRectangle(img, top_left, bottom_right, color = (0,0,255), thickness = 3):
"""
Draws a rectangle btw 2 points (Top Left and Bottom Right) onto img.
Params:
* img - image
* top_left - top left point of rectangle
* bottom_right - bottom right point of rectangle
* color - OPTIONAL - default is (0, 0, 255) - RED
* thickness - OPTIONAL - default is 3, change for thickness
"""
cv2.rectangle(img, top_left, bottom_right, color, thickness)
def drawEllipse(img, center, axes, angle, startAngle=0, endAngle=360, color = (0,0,255), fill = -1):
"""
Draws an ellipse onto img.
Params:
* img - image
* center - center of ellipse (x,y)
* (Major Axis Len, Minor Axis Len)
* Angle - angle of rotation
* startAngle - OPTIONAL - def is 0
* endAngle - OPTIONAL - def is 360
* color - OPTIONAL - default is (0, 0, 255) - RED
* fill - OPTIONAL - default is -1, change for thickness
"""
cv2.ellipse(img, center, axes, angle, startAngle, endAngle, color, fill)
def drawFitEllipse(img, cnt):
"""
Draws a nearest fitting ellipse to a contour
Params:
* img - image to draw on
* cnt - contour
"""
d = fitEllipse(cnt)
cv2.ellipse(img, d["center"], (d["major"], d["minor"]), d["angle"])
def imgProp(img):
"""
Returns properties of an image in a dictionary format
Params:
* img - image
Returns:
* dictionary of properties "shape, rows, columns, channels, size, dtype"
"""
d = {}
d["shape"] = img.shape
d["rows"] = img.shape[0]
d["columns"] = img.shape[1]
if len(img.shape) is 3:
d["channels"] = img.shape[2]
d["size"] = img.size
d["dtype"] = img.dtype
return d
def size(img):
"""
Returns the Number of pixels in an image
Params:
* img - image
Returns:
* size of image in pixels
"""
return img.size
def moments(cnt):
"""
Returns the moments of a contour
Params:
* cnt - contour
Returns:
* dictionary of moments
"""
return cv2.moments(cnt)
def pointPolygonTest(cnt, point, distance = True):
"""
Point Polygon Test - Finds the Shortest Distance between a point in the image and a contour
Params:
* cnt - contour
* point - point coordinates
* distance - OPTIONAL - True if signed distance, False for Inside/Outside/On cnt
Returns:
* Relation of point to cnt
"""
return cv2.pointPolygonTest(cnt, point, distance)
def matchShapesImages(img1, img2):
"""
Takes two images and compares them.
Params:
* img1 - first image
* img2 - second image
Returns:
* metric showing similarity, lower the result, better the match
"""
cnt1 = findContours(img1)[0]
cnt2 = findContours(img2)[0]
ret = cv2.matchShapes(cnt1, cnt2, 1, 0.0)
return ret
def matchShapes(cnt1, cnt2):
"""
Takes two contours and compares them.
Params:
* cnt1 - first contour
* cnt2 - second contour
Returns:
* metric showing similarity, lower the result, better the match
"""
ret = cv2.matchShapes(cnt1, cnt2, 1, 0.0)
return ret
def binaryThreshold(img, threshVal = 127, maxVal = 255, invert=True):
"""
Returns a binary threshold of the image
Params:
* img - image to threshold
* threshVal - OPTIONAL - threshold value to classify pixels Default 127
* maxVal - OPTIONAL - value to be given if pixel over threshVal. Default 255
* invert - OPTIONAL - True means inverts binary threshold (white to black, vice versa), false otherwise. Default True
Returns:
* threshold image
"""
gray = grayscale(img)
if invert:
ret, thresh = cv2.threshold(img, threshVal, maxVal, cv2.THRESH_BINARY_INV)
else:
ret, thresh = cv2.threshold(img, threshVal, maxVal, cv2.THRESH_BINARY)
return thresh
def adaptiveMeanThreshold(img):
"""
Returns an img after Adaptive Mean Thresholding
Params:
* img - image
Returns:
* Image that has undergone Adaptive Mean Thresholding
"""
gray = grayscale(img)
gray = cv2.medianBlur(gray, 5)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
return thresh
def adaptiveGaussianThreshold(img):
"""
Returns an img after Adaptive Gaussian Thresholding
Params:
* img - image
Returns:
* Image that has undergone Adaptive Gaussian Thresholding
"""
gray = grayscale(img)
gray = cv2.medianBlur(gray, 5)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
return thresh
def otsu(img, invert=False):
"""
Returns an img after Otsu Binarization
Params:
* img - image
* invert - OPTIONAL - uses inverse binary if True, def: False
Returns:
* An Otsu Binarization of the img
"""
gray = grayscale(img)
blur = cv2.GaussianBlur(gray, (5,5), 0)
if invert:
ret, thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
return thresh
else:
ret, thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
return thresh
def rotate(img, deg, center=None):
"""
Rotates the image by a given degree
Params:
* img - image
* deg - degrees to rotate
* center - OPTIONAL - coordinate to rotate around, def is center of img
Returns:
* rotated img
"""
gray = grayscale(img)
rows, cols = gray.shape
if center is None:
M = cv2.getRotationMatrix2D((cols/2, rows/2), deg, 1)
else:
M = cv2.getRotationMatrix2D(center, deg, 1)
dst = cv2.warpAffine(gray, M, (cols, rows))
return dst
def edgeDetect(img, minVal=100, maxVal=200):
"""
Canny Edge Detection - Returns an img with only edges
Params:
* img - image
* minVal - OPTIONAL - Minimum Threshold
* maxVal - OPTIONAL - Maximum Threshold
Returns:
* Image with only Edges
"""
gray = grayscale(img)
edges = cv2.Canny(gray, minVal, maxVal, True)
return edges
def eqHist(img):
"""
Histogram Equalization - Improves the contrast of an image via global contrast
Params:
* img - image
Returns:
* Equalized Image
"""
gray = grayscale(img)
equ = cv2.equalizeHist(gray)
return equ
def adaptiveEqHist(img, clipLimit=2.0, tileGridSize=(8,8)):
"""
CLAHE (Contrast Limited Adaptive Histogram Equalization) - Improves contrast via Adaptive Histogram Equalization
Params:
* img - image
* clipLimit - OPTIONAL - contrast limit to clip pixels before hist eq, def is 2.0
* tileGridSize - (row, col) blocks that are equalized - def is (8,8)
Returns:
* Equalized Image
"""
gray = grayscale(img)
clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
cl1 = clahe.apply(gray)
return cl1
def load(filename, flag=None):
"""
Loads an image from a file name and returns it in color mode (default).
Safety check if img fails to load.
Params:
* filename - filename of image
* flag - OPTIONAL - open mode: 1 = COLOR, 0 = GRAYSCALE, -1 = UNCHANGED
Returns:
* Loaded Img if sucessful
"""
if flag is None:
img = cv2.imread(filename)
elif flag is 1 or flag is 0 or flag is -1:
img = cv2.imread(filename, flag)
else:
print "ERROR: Load: Incorrect flag parameter: " + str(flag) + "\n"
sys.exit()
if img is None:
print "ERROR: Load: Image not found/supported at: " + str(filename) + "\n"
sys.exit()
else:
return img
"""
def fetchImg(SYS, DEV):
\"""
Loads an image from an EPICS PV Value
Params:
* SYS - System String
* DEV - Device String
Returns:
* Loaded Image
\"""
SYSDEV = str(SYS) + "{" + str(DEV) + "}"
data = caget(SYSDEV + "image1:ArrayData")
rows = caget(SYSDEV + "image1:ArraySize1_RBV")
cols = caget(SYSDEV + "image1:ArraySize0_RBV")
dtype = caget(SYSDEV + "cam1:DataType_RBV")
color = caget(SYSDEV + "cam1:ColorMode_RBV")
count = 0
img = []
row = []
dtype = EPICSTYPE[caget(SYSDEV + "cam1:DataType_RBV")]
color = caget(SYSDEV + "cam1:ColorMode_RBV")
for i in range(rows):
for j in range(cols):
row.append(data[count])
count = count + 1
r = np.array(row, dtype)
img.append(r)
row = []
npra = np.array(img, dtype)
save(npra, "fetchImg.jpg") # Might need to change file type
img = load("fetchImg.jpg") # Might need to change file type
return npra
def epicscaget(PV):
\"""
Retrieves and Returns the value of the named PV
Params:
* PV - PV name to get
Returns:
* Value of PV
\"""
return caget(PV)
def epicscaput(PV, value):
\"""
Sets the Value of the Named PV with Value
Params:
* PV - PV to set
* value - Value to set PV to
\"""
caput(PV, value)
def epicscainfo(PV):
\"""
Returns a string of info about a PV
Params:
* PV - The PV Value to retrieve info from
Returns:
* String of info about PV
\"""
return cainfo(PV, False)
"""
def getColorFlag(color):
"""
Retrieves the color flags associated with img with Epics
Params:
* color - COLOR CODE
Returns:
* CV Color Code
"""
if color == 0: # MONO
return 0
elif color == 1: # BAYER
return -1
elif color == 2: # AS IS RBG
return 1
def backProjection(roi, target):
"""
Backprojection - Finds objects of interest in an image
Params:
* roi - Region of Interest Img
* target - target img
Returns:
* Img of Result
"""
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
hsvt = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)
roihist = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
cv2.normalize(roihist, roihist, 0, 255, cv2.NORM_MINMAX)
dst = cv2.calcBackProject([hsvt], [0, 1], roihist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 50, 255, 0)
thresh = cv2.merge((thresh, thresh, thresh))
res = cv2.bitwise_and(target, thresh)
return res
def templateMatchSingle(img, template):
"""
Template Matching - Single Object, will find the points to draw a rectangle over given template
Params:
* img - image
* template - template image to match
Returns:
* (top left, bottom right) - points of a rectangle
"""
img = grayscale(img)
template = grayscale(template)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
return top_left, bottom_right
def drawMatch(img, template, color=(255,255,0), thickness=2):
"""
Draws a rectangle over a matching pattern in img from template
Params:
* img - image
* template - pattern to search for
* color - OPTIONAL - color; def: (255,255,0)
* thickness - OPTIONAL - thickness of rectangle; def: 2
Returns:
* img with rectangle over object
"""
tmp = img.copy()
tl, br = templateMatchSingle(tmp, template)
cv2.rectangle(tmp, tl, br, color, thickness)
return tmp
def save(img, filename=None):
"""
Saves an image to a file
Params:
* img - image to save
* filename - OPTIONAL - file to save to, def is TYEARMONTHDAY.jpg
"""
if filename is None:
date = time.strftime("%Y%m%d")
filename = "T" + str(date) + ".jpg"
cv2.imwrite(filename, img)
else:
cv2.imwrite(filename, img)
def getSupportedFileFormats():
"""
Returns Supported File Formats for Reading Images
Returns:
* Dictionary of Supported File Formats and Extensions
"""
return {"Bitmap":["*.bmp", "*.dib"], "JPEG": ["*.jpeg", "*.jpg", "*.jpe"], "JPEG 2000": ["*.jp2"],"Portable Network Graphics" : ["*.png"], "WebP": ["*.webp"], "Portable Image Formats":["*.pbm", "*.pgm", "*.ppm"], "Sun Rasters":["*.sr", "*.ras"], "TIFF Files": ["*.tiff","*.tif"] }
def saveImgs(img, filename=None):
"""
Saves a list of images to a file
Params:
* img - images to save
* filename - OPTIONAL - file names to save to, def is TYEARMONTHDAYITEM#.jpg
"""
if filename is None:
date = time.strftime("%Y%m%d")
filename = "T" + str(date)
jpg = ".jpg"
count = 0
for item in img:
name = filename + str(count) + jpg
cv2.imwrite(name, item)
count += 1
else:
for i in range(0, len(img)):
cv2.imwrite(filename[i], img[i])
def templateMatchMulti(img, template):
"""
Template Matching - Multiple Objects
Params:
* img - image
* template - image to search for
Returns:
* lst of points in [(tl, br), (tl+w, br+h)] format for rectangles
"""
gray = grayscale(img)
temp = grayscale(template)
w, h = temp.shape[::-1]
res = cv2.matchTemplate(gray, temp, cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where(res >= threshold)
pts = []
for pt in zip(*loc[::-1]):
rect = [pt, (pt[0] + w, pt[1] + h)]
pts.append(rect)
return pts
def drawMatchMulti(img, template, color = (0,0,255), thickness = 2):
"""
Draws a rectangle over each instance of an object it finds
Params:
* img - image
* template - template to search for
* color - OPTIONAL - def (0,0,255)
* thickness - OPTIONAL - def 2
"""
tmp = img.copy()
gray = grayscale(img)
temp = grayscale(template)
w, h = temp.shape[::-1]
res = cv2.matchTemplate(gray, temp, cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(tmp, pt, (pt[0] + w, pt[1] + h), color, thickness)
return tmp
def resize(img, width, height):
"""
Resize an Image
Params:
* img - image
* width - new width
* height - new height
Returns:
* resized image
"""
tmp = img.copy()
res = cv2.resize(tmp, (width, height), interpolation = cv2.INTER_CUBIC)
return res
def translate(img, shift):
"""
Translates a image by shift (x, y)
Params:
* img - image
* shift - (x, y) tuple that has the shift direction in the x and y directions
Returns:
* Translated image
"""
gray = grayscale(img)
tmp = img.copy()
rows, cols = gray.shape
M = np.float32([[1, 0, shift[0]], [0, 1, shift[1]]]) # Translation Matrix
dst = cv2.warpAffine(tmp, M, (cols, rows))
return dst
def affineTransform(img, pts, newPts):
"""
Affine Transformation - translates a list of points to a new set of points while keeping
all parallel lines in original img parallel in output
Params:
* img - image
* pts - points of reference in original image
* newPts - points to be translated to
Returns:
* Shifted img under an Affine Transformation
"""
tmp = img.copy()
if len(img.shape) is 3:
rows, cols, ch = img.shape
else:
rows, cols = img.shape
pts1 = np.float32(pts)
pts2 = np.float32(newPts)
M = cv2.getAffineTransform(pts1, pts2)
dst = cv2.warpAffine(tmp, M, (cols, rows))
return dst
def perspectiveTransform(img, pts, newPts, size=None):
"""
Perspective Transformation - Translates pts to newPts while keeping all lines straight
Params:
* img - image
* pts - 4 points on input image
* newPts - 4 points to be shifted to
* size - OPTIONAL - new size of output image (x,y), def is max newPts
Returns:
Shifted img under Perspective Transformation
"""
args = len(img.shape)
tmp = img.copy()
if args is 3:
rows, cols, ch = img.shape
else:
rows, cols = img.shape
pts1 = np.float32(pts)
pts2 = np.float32(newPts)
if size is None:
xy = zip(*pts)
pt = map(max, xy)
size =(pt[0], pt[1])
M = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(tmp, M, size)
return dst
def displayImgs(imgs, titles = None, wait=0):
"""
Displays a list of images
Parms:
* imgs - list of images
* titles - OPTIONAL - list of titles of images
* wait - OPTIONAL - wait time in ms ffor screen; def: 0 - INDEFINITE
"""
if len(imgs) > 100:
print "WARNING: DisplayImgs: List is of length " + str(len(imgs))
print "Please reduce list size to avoid improper display"
return
if titles is None:
count = 1
for i in imgs:
cv2.namedWindow("IMAGE" + str(count), cv2.WINDOW_NORMAL)
cv2.imshow("IMAGE" + str(count), i)
count += 1
else:
count = 0
for i in imgs:
cv2.namedWindow(titles[count], cv2.WINDOW_NORMAL)
cv2.imshow(titles[count], i)
count += 1
cv2.waitKey(wait) & 0xFF
cv2.destroyAllWindows()
def filter2D(img, kernel = (5,5)):
"""
2D Convolution - Image Filtering
Params:
* img - image to be filtered
* kernel - OPTIONAL - size of average filtering kernel, def is (5,5)
Returns:
* Filtered Image
"""
tmp = img.copy()
k = np.ones((kernel[0], kernel[1]), np.float32) / (kernel[0]*kernel[1])
dst = cv2.filter2D(tmp, -1, k)
return dst
def blur(img, kernel = (5,5)):
"""
Image Blur
Params:
* img - image
* kernel - OPTIONAL - size of kernel, def is (5,5)
Returns:
* Blurred Image
"""
tmp = img.copy()
blur = cv2.blur(tmp, kernel)
return blur
def gaussianBlur(img, kernel = (5,5)):
"""
Gaussian Filtering/Blur
Params:
* img - image
* kernel - OPTIONAL - kernel size, def is (5,5)
Returns:
* Gaussian Blurred Image
"""
tmp = img.copy()
blur = cv2.GaussianBlur(tmp, kernel, 0)
return blur
def medianBlur(img):
"""
Median Filtering
Params:
* img - Image
Returns:
* Median Filtered Img
"""
tmp = img.copy()
return cv2.medianBlur(tmp, 5)
def bilateralFilter(img, d=9, sigmaColor=75, sigmaSpace=75):
"""
Bilateral Filtering
Params:
* img - Image
* d - OPTIONAL - diameter of each pixel neighborhood. def: 9
* sigmaColor - OPTIONAL - Filter sigma in color space. def: 75
* sigmaSpace - OPTIONAL - Filter sigma in coord space. def: 75
Returns:
* Bilateral Filtered Img
"""
tmp = img.copy()
return cv2.bilateralFilter(tmp, d, sigmaColor, sigmaSpace)
def adaptiveBilateralFilter(img, ksize=(5,5), sigmaSpace=None):
"""
Adaptive Bilateral Filtering
Params:
* img - image
* ksize - OPTIONAL - Kernal Size, def: (5,5)
* sigmaSpace - OPTIONAL - Filter sigma in coord space. def: None
"""
if sigmaSpace is None:
return cv2.adaptiveBilateralFilter(img, ksize)
else:
return cv2.adaptiveBilateralFilter(img, ksize, sigmaSpace=sigmaSpace)
def erode(img, kernel = (5,5), iterations = 1):
"""
Erosion - Discard boundary pixels depending on kernel size
Params:
* img - image
* kernel - OPTIONAL - kernel size in tuple, def is (5,5)
* iterations - OPTIONAL - number of iterations for erosion, def is 1
Returns:
* eroded img
"""
tmp = grayscale(img)
k = np.ones(kernel, np.uint8)
erosion = cv2.erode(tmp, k, iterations= iterations)
return erosion
def dilate(img, kernel = (5,5), iterations = 1):
"""
Dilation - Increases white region, size of foreground increases
Params:
* img - image
* kernel - OPTIONAL - kernel size in tuple, def is (5,5)
* iterations - OPTIONAL - number of iterations for dilate, def is 1
Returns:
* dilated img
"""
tmp = grayscale(img)
k = np.ones(kernel, np.uint8)
dilation = cv2.dilate(tmp, k, iterations = iterations)
return dilation
def opening(img, kernel = (5,5)):
"""
Opening - Erosion followed by dilation. Useful for removing noise
Params:
* img - image
* kernel - OPTIONAL - kernel size in tuple, def is (5,5)
Returns:
* opened img
"""
tmp = grayscale(img)
k = np.ones(kernel, np.uint8)
return cv2.morphologyEx(tmp, cv2.MORPH_OPEN, k)
def closing(img, kernel = (5,5)):
"""
Closing - Dilation followed by Erosion.
Useful for closing small holes inside foreground objects,
or small black points on object
Params:
* img - image
* kernel - OPTIONAL - kernel size in tuple, def is (5,5)
Returns:
* closed img
"""
tmp = grayscale(img)
k = np.ones(kernel, np.uint8)
return cv2.morphologyEx(tmp, cv2.MORPH_CLOSE, k)
def gradient(img, kernel = (5,5)):
"""
Morphological Gradient - Difference btw Dilation and Erosion of img.
Usually results in outline of object
Params:
* img - image
* kernel - OPTIONAL - kernel size in tuple, def is (5,5)
Returns:
* Gradient Img
"""
tmp = grayscale(img)
k = np.ones(kernel, np.uint8)
return cv2.morphologyEx(tmp, cv2.MORPH_GRADIENT, k)
def tophat(img, kernel = (5,5)):
"""
Top Hat - Difference btw input img and Opening of img
Params:
* img - image
* kernel - OPTIONAL - kernel size in tuple, def is (5,5)
Returns:
* TopHat Img
"""
tmp = grayscale(img)
k = np.ones(kernel, np.uint8)
return cv2.morphologyEx(tmp, cv2.MORPH_TOPHAT, k)
def blackhat(img, kernel = (5,5)):
"""
Black Hat - Difference btw closing of img and input img
Params:
* img - image
* kernel - OPTIONAL - kernel size in tuple, def is (5,5)
Returns:
* BlackHat Img
"""
tmp = grayscale(img)
k = np.ones(kernel, np.uint8)
return cv2.morphologyEx(tmp, cv2.MORPH_BLACKHAT, k)
def hitAndMiss(img):
"""
Hit And Miss Morphological Transform
Params:
* img - Image
Returns:
* HitAndMiss Img
"""
tmp = img.copy()
return cv2.morphologyEx(tmp, cv2.MORPH_HITMISS)
def getRectangularKernel(size = (5,5)):
"""
Get a rectangular kernel
Params:
* size - tuple of size of requested kernel, def is (5,5)
Returns:
* desired kernel
"""
return cv2.getStructuringElement(cv2.MORPH_RECT, size)
def getEllipticalKernel(size = (5,5)):
"""
Get a elliptical kernel
Params:
* size - tuple of size of requested kernel, def is (5,5)
Returns:
* desired kernel
"""
return cv2.getStructuringElement(cv2.MORPH_ELLIPSE, size)
def getCrossKernel(size = (5,5)):
"""
Get a cross-shaped kernel
Params:
* size - tuple of size of requested kernel, def is (5,5)
Returns:
* desired kernel
"""
return cv2.getStructuringElement(cv2.MORPH_CROSS, size)
def laplacian(img):
"""
Laplacian Image Gradient
Params:
* img -image
Returns:
* Laplacian Image
"""
gray = grayscale(img)
return cv2.Laplacian(gray, cv2.CV_64F)
def sobelx(img, ksize=5):
"""
Sobel X Image Gradient
Params:
* img -image
* ksize - OPTIONAL - Kernel Size, def is 5
Returns:
* Sobel X Image
"""
gray = grayscale(img)
return cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize= ksize)
def sobely(img, ksize=5):
"""
Sobel Y Image Gradient
Params:
* img -image
* ksize - OPTIONAL - Kernel Size, def is 5
Returns:
* Sobel Y Image
"""
gray = grayscale(img)
return cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize= ksize)
def sobelxy(img, ksize=5):
"""
Sobel XY Image Gradient
Params:
* img -image
* ksize - OPTIONAL - Kernel Size, def is 5
Returns:
* Sobel XY Image
"""
gray = grayscale(img)
return cv2.Sobel(gray, cv2.CV_64F, 1, 1, ksize= ksize)
def drawHoughLines(img, rho=1, theta=np.pi/180, color=(0,0,255), threshold=200, thickness=2):
"""
Draws the Found Lines onto the Image
NOTE: Play around with params to get what you need, each img has different req params
Params:
* img - image
* rho- OPTIONAL - radius, measured in pixels, def is 1
* theta - OPTIONAL - angle, measured in radians, def is np.pi/180
* color - OPTIONAL - def (0,0,255)
* threshold - OPTIONAL - Minimum Length of Line, def 200
* thickness - OPTIONAL - def 2
Returns:
* Image with lines plotted
"""
tmp = img.copy()
gray = grayscale(img)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, rho, theta, threshold)
if lines is None:
print "No lines found, please adjust params...\n"
return None
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0=a*rho
y0=b*rho
x1 = int(x0 + img.shape[1]*(-b))
y1 = int(y0 + img.shape[1]*(a))
x2 = int(x0 - img.shape[1]*(-b))
y2 = int(y0 - img.shape[1]*(a))
cv2.line(tmp, (x1, y1), (x2,y2), color, thickness)
return tmp
def houghLines(img, rho=1, theta=np.pi/180, threshold=200):
"""
Hough Lines Detection Method
NOTE: Play around with params to get what you need, each img has different req params
Params:
* img - image
* rho- OPTIONAL - radius, measured in pixels, def is 1
* theta - OPTIONAL - angle, measured in radians, def is np.pi/180
* threshold - OPTIONAL - Minimum Length of Line, def 200
Returns:
* List of points in [(start, end)] format
"""
tmp = img.copy()
gray = grayscale(img)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, rho, theta, threshold)
if lines is None:
print "No lines found, please adjust params...\n"
return None
pts = []
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0=a*rho
y0=b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
pts.append([(x1,y1), (x2,y2)])
return pts
def drawHoughLinesProb(img, minLineLength=100, maxLineGap = 10, color=(0,255,0), thickness=2):
"""
Draws the result of Probabilistic Hough Line Transform onto img
NOTE: Play around with params to get what you need, each img has different req params
Params:
* img - image
* minLineLength - OPTIONAL - Min length of line, any lines shorter rejected, def is 100
* maxLineGap - OPTIONAL - Max allowed gap btw line segments to treat as single line, def is 10
* color - OPTIONAL - color of line, def is (0,255,0)
* thickness - OPTIONAL - def is 2
Returns:
* img with lines draw onto it
"""
gray = grayscale(img)
tmp = img.copy()
edges = cv2.Canny(gray, 50, 150, apetureSize = 3)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength, maxLineGap)
if lines is None:
print "No lines found, please adjust params...\n"
return None
for x1, y1, x2, y2 in lines[0]:
cv2.line(tmp, (x1,y1), (x2,y2), color, thickness)
return tmp
def houghLinesProb(img, minLineLength=100, maxLineGap = 10):
"""
Returns the result of Probabilistic Hough Line Transform from img
NOTE: Play around with params to get what you need, each img has different req params
Params:
* img - image
* minLineLength - OPTIONAL - Min length of line, any lines shorter rejected, def is 100
* maxLineGap - OPTIONAL - Max allowed gap btw line segments to treat as single line, def is 10
Returns:
* List of lines in [(start, end)] format
"""
gray = grayscale(img)
tmp = img.copy()
edges = cv2.Canny(gray, 50, 150, apetureSize = 3)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength, maxLineGap)
if lines is None:
print "No lines found, please adjust params...\n"
return None
pts = []
for x1, y1, x2, y2 in lines[0]:
pts.append([(x1,y1), (x2,y2)])
return pts
def drawHoughCircles(img, minDist=20, param1=50, param2=30, minRadius=0, maxRadius=0 ,colorCircle=(0,255,0), colorCenter=(0,0,255), centerRadius=2 , thickness=2):
"""
Hough Circle Transform - Finds Circles in an image and draws it
NOTE: Play around with params to get what you need, each img has different req params
Params:
* img - image
* minDist - OPTIONAL - Minimum Distance btw centers of detected circles, def is 20
* param1 - OPTIONAL - Higher Threshold of two sent to Canny edge detector, def is 50
* param2 - OPTIONAL - Accumulator Threshold for the circle centers at detection stage, def is 30
* minRadius - OPTIONAL - Minimum Circle Radius, def is 0
* maxRadius - OPTIONAL - Maximum Circle Radius, def is 0
* colorCircle - OPTIONAL - color of circle, def is (0,255,0)
* colorCenter - OPTIONAL - color of circle's center, def is (0,0,255)
* centerRadius - OPTIONAL - radius of center
* thickness - OPTIONAL - thickness of circles
Returns:
* Image with circles and their centers drawn
"""
tmp = grayscale(img)
tmp = cv2.medianBlur(tmp, 5)
cimg = cv2.cvtColor(tmp, cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(tmp, cv2.HOUGH_GRADIENT, 1, minDist, param1=param1, param2=param2, minRadius=minRadius, maxRadius=maxRadius)
if circles is None:
print "No circles found, please adjust params...\n"
return None
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
cv2.circle(cimg, (i[0],i[1]),i[2], colorCircle, thickness)
cv2.circle(cimg, (i[0],i[1]), centerRadius, colorCenter, thickness)
return cimg
def houghCircles(img, minDist=20, param1=50, param2=30, minRadius=0, maxRadius=0):
"""
Hough Circle Transform - Finds Circles in an image and draws it
NOTE: Play around with params to get what you need, each img has different req params
Params:
* img - image
* minDist - OPTIONAL - Minimum Distance btw centers of detected circles, def is 20
* param1 - OPTIONAL - Higher Threshold of two sent to Canny edge detector, def is 50
* param2 - OPTIONAL - Accumulator Threshold for the circle centers at detection stage, def is 30
* minRadius - OPTIONAL - Minimum Circle Radius, def is 0
* maxRadius - OPTIONAL - Maximum Circle Radius, def is 0
Returns:
* List of circles in (x, y, radius) format
"""
tmp = grayscale(img)
tmp = cv2.medianBlur(tmp, 5)
cimg = cv2.cvtColor(tmp, cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(tmp, cv2.HOUGH_GRADIENT, 1, minDist, param1=param1, param2=param2, minRadius=minRadius, maxRadius=maxRadius)
if circles is None:
print "No circles found, please adjust params...\n"
return None
circles = np.uint16(np.around(circles))
return circles
def orb(img):
"""
ORB Corner Detection - Detects Corners
Params:
* img - image
Returns:
* key points
"""
gray = grayscale(img)
orb = cv2.ORB()
kp = orb.detect(gray, None)
kp, des = orb.compute(img, kp)
return kp
def drawOrb(img, color = (0,255,0)):
"""
ORB Corner Detection
Params:
* img - image
Returns:
* img with key points drawn on
* color- OPTIONAL - color, def is (0,255,0)
"""
gray = grayscale(img)
orb = cv2.ORB()
kp = orb.detect(gray, None)
kp, des = orb.compute(img, kp)
img2 = cv2.drawKeypoints(gray, kp, color=color, flags=0)
return img2
def harrisCorner(img, blockSize=2, ksize=3, k=0.04, color=(0,0,255)):
"""
Harris Corner Detection
Params:
* img - image
* blockSize - OPTIONAL - size of neighborhood considered for corner detection
* ksize - OPTIONAL - Aperture parameter of Sobel derivative used
* k - OPTIONAL - Harris detector free parameter in equation
* color - OPTIONAL - def is (0,0,255)
Returns:
* Image with corners marked.
"""
tmp = img.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,blockSize, ksize, k)
dst = cv2.dilate(dst, None)
tmp[dst>0.01*dst.max()] = color
return tmp
def harrisSubPixel(img, blockSize=2, ksize=3, k=0.04):
"""
Harris Corner Detection with SubPixel Accuracy
Params:
* img - image
* blockSize - OPTIONAL - size of neighborhood considered for corner detection
* ksize - OPTIONAL - Aperture parameter of Sobel derivative used
* k - OPTIONAL - Harris detector free parameter in equation
Returns:
* Corners.
"""
tmp = img.copy()
gray = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, blockSize, ksize, k)
dst = cv2.dilate(dst, None)
ret, dst = cv2.threshold(dst, 0.01*dst.max(), 255, 0)
dst = np.uint8(dst)
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv2.cornerSubPix(gray, np.float32(centroids), (5,5), (-1,-1), criteria)
return corners
def goodFeaturesToTrack(img, numCorners=25, quality=0.01, minDist=10, radius=3, color=(0,0,255)):
"""
Good Features to Track Corner Detection Shi Tomasi
Params:
* img - image
* numCorners - OPTIONAL - Number of Corners, def is 25
* quality - OPTIONAL - min quality of corner, btw 0-1, def is 0.01
* minDist - OPTIONAL - Min Euclidean Dist btw Corners Detected
* radius - OPTIONAL - radius, def is 3
* color - OPTIONAL - def is (0,0,255)
Returns:
* img with corners marked
"""
tmp = img.copy()
gray = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray, numCorners, quality, minDist)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(tmp, (x,y), radius, color, -1)
return tmp
def goodFeaturesToTrackPts(img, numCorners=25, quality=0.01, minDist=10, radius=3, color=(0,0,255)):
"""
Good Features to Track Corner Detection Shi Tomasi
Params:
* img - image
* numCorners - OPTIONAL - Number of Corners, def is 25
* quality - OPTIONAL - min quality of corner, btw 0-1, def is 0.01
* minDist - OPTIONAL - Min Euclidean Dist btw Corners Detected
* radius - OPTIONAL - radius, def is 3
* color - OPTIONAL - def is (0,0,255)
Returns:
* corners detected
"""
tmp = img.copy()
gray = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray, numCorners, quality, minDist)
corners = np.int0(corners)
return corners
def fast(img, nonmaxSuppression = True):
"""
FAST Algorithm for Corner Detection - Returns Key Points
Params:
* img - image
* nonmaxSuppression - OPTIONAL - Suppresses number of points if True, def is True
Returns:
* Key Points
"""
gray = grayscale(img)
fast = cv2.FastFeatureDetector()
if not nonmaxSuppression:
fast.setBool('nonmaxSuppression', 0)
kp = fast.detect(img, None)
return kp
def drawFast(img, nonmaxSuppression=True, color=(255,0,0)):
"""
FAST Algorithm for Corner Detection - Returns Key Points
Params:
* img - image
* nonmaxSuppression - OPTIONAL - Suppresses number of points if True, def is True
* color - OPTIONAL - def: (255,0,0)
Returns:
* Key Points drawn onto img
"""
tmp = img.copy()
kp = fast(tmp, nonmaxSuppression=nonmaxSuppression)
tmp = cv2.drawKeypoints(tmp, kp, color=color)
return tmp
def drawKeyPoints(img, kp, color=(255,0,0)):
"""
Draw Key Points
Params:
* img - image
* kp - Key Pints List
* color - OPTIONAL - def: (255,0,0)
Returns:
* img with keypoints
"""
tmp = img.copy()
tmp = cv2.drawKeypoints(tmp, kp, color = color)
return tmp
def denoise(img, h=10, hForColor=None, templateWindowSize=7, searchWindowSize=21):
"""
Image De-noising - Both Colored and Grayscale imgs
Params:
* img - image
* h - OPTIONAL - filter strength; def: 10
* hForColor - OPTIONAL - used if img is color; same as h; def: h
* templateWindowSize - OPTIONAL - odd num; def: 7
* searchWindowSize - OPTIONAL - odd num; def: 21
Returns:
* De-noised img
"""
if hForColor is None:
hForColor=h
tmp = img.copy()
if len(img.shape) != 3:
dst = cv2.fastNlMeansDenoising(tmp, None, h, templateWindowSize, searchWindowSize)
else:
dst = cv2.fastNlMeansDenoisingColored(img, None, h, hForColor, templateWindowSize, searchWindowSize)
return dst
def BFMatch(img1, img2):
"""
Brute-Force Matcher - Matches
Params:
* img1 - Query Image
* img2 - Train Image
Returns:
* Matches
"""
gray1 = grayscale(img1)
gray2 = grayscale(img2)
orb = cv2.ORB()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key = lambda x:x.distance)
return matches
def drawBFMatch(img1, img2, numMatches=None):
"""
Brute-Force Matcher - Draw Img
Params:
* img1 - Query Image
* img2 - Train Image
* numMatches - OPTIONAL - Number of Matches to Display; def: All
Returns:
* Img with matches drawn
"""
matches = BFMatch(img1, img2)
if numMatches is None:
img3 = cv2.drawMatches(img1, kp1m, img2, kp2, matches, flags=2)
else:
img3 = cv2.drawMatches(img1, kp1m, img2, kp2, matches[:numMatches], flags=2)
return img3
def slope(start, end):
"""
Slope Calculator from two points
Params:
* start - start point (x,y)
* end - end point (x,y)
Returns:
* slope of line connecting two pts, None if undefined
"""
x1 = start[0]
y1 = start[1]
x2 = end[0]
y2 = end[1]
top = float(y2 - y1)
bot = float(x2 - x1)
if bot == 0:
return None
else:
return top / bot
def distance(pt1, pt2):
"""
Distance Between Two Points
Params:
* pt1 - first point, (x,y)
* pt2 - second point, (x,y)
Returns:
* Distance Between Two Points
"""
x1, y1 = pt1
x2, y2 = pt2
x = x2 - x1
y = y2 - y1
s = x**2 + y**2
return np.sqrt(s)
def eventList(filterStr=""):
"""
Returns a List of all Events handled by OpenCV
Params:
* filter - OPTIONAL - filter list results
Returns:
* list of events
"""
filterStr = filterStr.upper()
events = [i for i in dir(cv2) if 'EVENT' in i and filterStr in i]
return events
def addBorder(img, flag=0, top=10, bottom=10, left=10, right=10, color = (255,0,0)):
"""
Adds a border to the image
Params:
* img - image to add border
* flag - OPTIONAL - border type flag, def is CONSTANT (0)
* top - OPTIONAL - top pixel width
* bottom - OPTIONAL - bottom pixel width
* right - OPTIONAL - right pixel width
* left - OPTIONAL - left pixel width
* color - OPTIONAL - only used if flag == cv2.BORDER_CONSTANT
Returns:
* img with specified border
"""
if flag != 0:
borderImg = cv2.copyMakeBorder(img, top, bottom, left, right, flag)
return borderImg
elif flag == 0:
borderImg = cv2.copyMakeBorder(img, top, bottom, left, right, flag, value=color)
return borderImg
else:
print "ERROR: AddBorder: Invalid Flag"
sys.exit()
def getBorderFlags():
"""
Returns border flags and int values
Returns:
* Border values
"""
return border_flag
def integral(img, sqSum = False, tilted = False):
"""
Integral of an Img
Params:
* img - image
* sqSum - OPTIONAL - if True, returns integral for squared pixel values
* tilted - OPTIONAL - if True, returns integral for img rotated by 45 deg
Returns:
* integral
* sq pixel integral
* tilted integral
"""
if sqSum is False and tilted is False:
return cv2.integral(img)
elif sqSum is True and tilted is False:
return cv2.integral2(img)
elif sqSum is True and tilted is True:
return cv2.integral3(img)
elif sqSum is False and tilted is True:
su, sqsu, tilt = cv2.integral3(img)
return su, tilt
else:
return cv2.integral(img)
#### END OF IMAGE METHODS ####
#### START OF VIDEO METHODS - EXPERIMENTAL ####
def captureDisplay(title="Frame"):
"""
Capture and display an image from Camera
Params:
* title - OPTIONAL - name of display pop up, def: 'Frame'
"""
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
cv2.namedWindow(title, cv2.WINDOW_NORMAL)
cv2.imshow(title, frame)
cap.release()
cv2.waitKey(0)
cv2.destroyAllWindows()
def capture():
"""
Captures and returns an image from Camera
Returns:
* Captured Img
"""
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
cap.release()
cv2.destroyAllWindows()
return frame
def objectTrackVid(lower, upper):
"""
Object Tracking Video
Params:
* lower - Lower Bound
* upper - Upper Bound
"""
cap = cv2.VideoCapture(0)
print "Please hit ESC key when done to ensure camera closure..."
while(1):
_, frame = cap.read()
res = trackObject(frame, lower, upper)
cv2.imshow('Frame', frame)
cv2.imshow('Result', res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
# End of File
|
nextBillyonair/compVision
|
XPD/cvlibNoEpics.py
|
Python
|
mit
| 82,776
|
[
"Gaussian"
] |
a39e497f2e7bb5669589970325c64376cf0b3460ef101838022134f69148067b
|
""" FileReport module defines the FileReport class, to report file status to the transformation DB
"""
import copy
from DIRAC import S_OK
from DIRAC.Core.Utilities import DEncode
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.RequestManagementSystem.Client.Operation import Operation
__RCSID__ = "$Id$"
class FileReport( object ):
""" A stateful object for reporting to TransformationDB
"""
def __init__( self, server = 'Transformation/TransformationManager' ):
""" c'tor
self.transClient is a TransformationClient object
"""
self.transClient = TransformationClient()
self.transClient.setServer( server )
self.statusDict = {}
self.transformation = None
self.force = False
def setFileStatus( self, transformation, lfn, status, sendFlag = False ):
""" Set file status in the context of the given transformation
"""
if not self.transformation:
self.transformation = transformation
if isinstance( lfn, ( list, dict, tuple ) ):
self.statusDict.update( dict.fromkeys( lfn, status ) )
else:
self.statusDict[lfn] = status
if sendFlag:
return self.commit()
return S_OK()
def setCommonStatus( self, status ):
""" Set common status for all files in the internal cache
"""
for lfn in self.statusDict:
self.statusDict[lfn] = status
return S_OK()
def getFiles( self ):
""" Get the statuses of the files already accumulated in the FileReport object
"""
return copy.deepcopy( self.statusDict )
def commit( self ):
""" Commit pending file status update records
"""
if not self.statusDict:
return S_OK( {} )
result = self.transClient.setFileStatusForTransformation( self.transformation, self.statusDict, force = self.force )
if result['OK']:
self.statusDict = {}
return result
def generateForwardDISET( self ):
""" Commit the accumulated records and generate request eventually
"""
result = self.commit()
commitOp = None
if not result['OK']:
# Generate Request
commitOp = Operation()
commitOp.Type = 'SetFileStatus'
commitOp.Arguments = DEncode.encode( {'transformation':self.transformation, 'statusDict':self.statusDict, 'force':self.force} )
return S_OK( commitOp )
|
andresailer/DIRAC
|
TransformationSystem/Client/FileReport.py
|
Python
|
gpl-3.0
| 2,385
|
[
"DIRAC"
] |
7002c9812a9cc8e9db4ba96eaf5aadd3534084084c391dbdabef83d6e251e98b
|
import os
import itertools
import math
import re
import numpy as np
import scipy.linalg as spla
import hail as hl
import hail.expr.aggregators as agg
from hail.expr import construct_expr, construct_variable
from hail.expr.expressions import (expr_float64, matrix_table_source, expr_ndarray,
check_entry_indexed, expr_tuple, expr_array, expr_int32, expr_int64)
from hail.ir import (BlockMatrixWrite, BlockMatrixMap2, ApplyBinaryPrimOp, F64,
BlockMatrixBroadcast, ValueToBlockMatrix, BlockMatrixRead,
BlockMatrixMap, ApplyUnaryPrimOp, BlockMatrixDot, BlockMatrixCollect,
tensor_shape_to_matrix_shape, BlockMatrixAgg, BlockMatrixRandom,
BlockMatrixToValueApply, BlockMatrixToTable, BlockMatrixFilter,
TableFromBlockMatrixNativeReader, TableRead, BlockMatrixSlice,
BlockMatrixSparsify, BlockMatrixDensify, RectangleSparsifier,
RowIntervalSparsifier, BandSparsifier, PerBlockSparsifier)
from hail.ir.blockmatrix_reader import (BlockMatrixNativeReader,
BlockMatrixBinaryReader, BlockMatrixPersistReader)
from hail.ir.blockmatrix_writer import (BlockMatrixBinaryWriter,
BlockMatrixNativeWriter, BlockMatrixRectanglesWriter, BlockMatrixPersistWriter)
from hail.ir import ExportType
from hail.table import Table
from hail.typecheck import (typecheck, typecheck_method, nullable, oneof,
sliceof, sequenceof, lazy, enumeration, numeric, tupleof, func_spec,
sized_tupleof)
from hail.utils import (new_temp_file, new_local_temp_file, local_path_uri,
storage_level, with_local_temp_file)
from hail.utils.java import Env
block_matrix_type = lazy()
class BlockMatrix(object):
"""Hail's block-distributed matrix of :py:data:`.tfloat64` elements.
.. include:: ../_templates/experimental.rst
A block matrix is a distributed analogue of a two-dimensional
`NumPy ndarray
<https://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html>`__ with
shape ``(n_rows, n_cols)`` and NumPy dtype ``float64``.
Import the class with:
>>> from hail.linalg import BlockMatrix
Under the hood, block matrices are partitioned like a checkerboard into
square blocks with side length a common block size. Blocks in the final row
or column of blocks may be truncated, so block size need not evenly divide
the matrix dimensions. Block size defaults to the value given by
:meth:`default_block_size`.
**Operations and broadcasting**
The core operations are consistent with NumPy: ``+``, ``-``, ``*``, and
``/`` for element-wise addition, subtraction, multiplication, and division;
``@`` for matrix multiplication; ``T`` for transpose; and ``**`` for
element-wise exponentiation to a scalar power.
For element-wise binary operations, each operand may be a block matrix, an
ndarray, or a scalar (:obj:`int` or :obj:`float`). For matrix
multiplication, each operand may be a block matrix or an ndarray. If either
operand is a block matrix, the result is a block matrix. Binary operations
between block matrices require that both operands have the same block size.
To interoperate with block matrices, ndarray operands must be one or two
dimensional with dtype convertible to ``float64``. One-dimensional ndarrays
of shape ``(n)`` are promoted to two-dimensional ndarrays of shape ``(1,
n)``, i.e. a single row.
Block matrices support broadcasting of ``+``, ``-``, ``*``, and ``/``
between matrices of different shapes, consistent with the NumPy
`broadcasting rules
<https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`__.
There is one exception: block matrices do not currently support element-wise
"outer product" of a single row and a single column, although the same
effect can be achieved for ``*`` by using ``@``.
Warning
-------
For binary operations, if the first operand is an ndarray and the
second operand is a block matrix, the result will be a ndarray of block
matrices. To achieve the desired behavior for ``+`` and ``*``, place the
block matrix operand first; for ``-``, ``/``, and ``@``, first convert
the ndarray to a block matrix using :meth:`.from_numpy`.
Warning
-------
Block matrix multiplication requires special care due to each block
of each operand being a dependency of multiple blocks in the product.
The :math:`(i, j)`-block in the product ``a @ b`` is computed by summing
the products of corresponding blocks in block row :math:`i` of ``a`` and
block column :math:`j` of ``b``. So overall, in addition to this
multiplication and addition, the evaluation of ``a @ b`` realizes each
block of ``a`` as many times as the number of block columns of ``b``
and realizes each block of ``b`` as many times as the number of
block rows of ``a``.
This becomes a performance and resilience issue whenever ``a`` or ``b``
is defined in terms of pending transformations (such as linear
algebra operations). For example, evaluating ``a @ (c @ d)`` will
effectively evaluate ``c @ d`` as many times as the number of block rows
in ``a``.
To limit re-computation, write or cache transformed block matrix
operands before feeding them into matrix multiplication:
>>> c = BlockMatrix.read('c.bm') # doctest: +SKIP
>>> d = BlockMatrix.read('d.bm') # doctest: +SKIP
>>> (c @ d).write('cd.bm') # doctest: +SKIP
>>> a = BlockMatrix.read('a.bm') # doctest: +SKIP
>>> e = a @ BlockMatrix.read('cd.bm') # doctest: +SKIP
**Indexing and slicing**
Block matrices also support NumPy-style 2-dimensional
`indexing and slicing <https://docs.scipy.org/doc/numpy/user/basics.indexing.html>`__,
with two differences.
First, slices ``start:stop:step`` must be non-empty with positive ``step``.
Second, even if only one index is a slice, the resulting block matrix is still
2-dimensional.
For example, for a block matrix ``bm`` with 10 rows and 10 columns:
- ``bm[0, 0]`` is the element in row 0 and column 0 of ``bm``.
- ``bm[0:1, 0]`` is a block matrix with 1 row, 1 column,
and element ``bm[0, 0]``.
- ``bm[2, :]`` is a block matrix with 1 row, 10 columns,
and elements from row 2 of ``bm``.
- ``bm[:3, -1]`` is a block matrix with 3 rows, 1 column,
and the first 3 elements of the last column of ``bm``.
- ``bm[::2, ::2]`` is a block matrix with 5 rows, 5 columns,
and all evenly-indexed elements of ``bm``.
Use :meth:`filter`, :meth:`filter_rows`, and :meth:`filter_cols` to
subset to non-slice subsets of rows and columns, e.g. to rows ``[0, 2, 5]``.
**Block-sparse representation**
By default, block matrices compute and store all blocks explicitly.
However, some applications involve block matrices in which:
- some blocks consist entirely of zeroes.
- some blocks are not of interest.
For example, statistical geneticists often want to compute and manipulate a
banded correlation matrix capturing "linkage disequilibrium" between nearby
variants along the genome. In this case, working with the full correlation
matrix for tens of millions of variants would be prohibitively expensive,
and in any case, entries far from the diagonal are either not of interest or
ought to be zeroed out before downstream linear algebra.
To enable such computations, block matrices do not require that all blocks
be realized explicitly. Implicit (dropped) blocks behave as blocks of
zeroes, so we refer to a block matrix in which at least one block is
implicitly zero as a **block-sparse matrix**. Otherwise, we say the matrix
is block-dense. The property :meth:`is_sparse` encodes this state.
Dropped blocks are not stored in memory or on :meth:`write`. In fact,
blocks that are dropped prior to an action like :meth:`export` or
:meth:`to_numpy` are never computed in the first place, nor are any blocks
of upstream operands on which only dropped blocks depend! In addition,
linear algebra is accelerated by avoiding, for example, explicit addition of
or multiplication by blocks of zeroes.
Block-sparse matrices may be created with
:meth:`sparsify_band`,
:meth:`sparsify_rectangles`,
:meth:`sparsify_row_intervals`,
and :meth:`sparsify_triangle`.
The following methods naturally propagate block-sparsity:
- Addition and subtraction "union" realized blocks.
- Element-wise multiplication "intersects" realized blocks.
- Transpose "transposes" realized blocks.
- :meth:`abs` and :meth:`sqrt` preserve the realized blocks.
- :meth:`sum` along an axis realizes those blocks for which at least one
block summand is realized.
- Matrix slicing, and more generally :meth:`filter`, :meth:`filter_rows`,
and :meth:`filter_cols`.
These following methods always result in a block-dense matrix:
- :meth:`fill`
- Addition or subtraction of a scalar or broadcasted vector.
- Matrix multiplication, ``@``.
The following methods fail if any operand is block-sparse, but can be forced
by first applying :meth:`densify`.
- Element-wise division between two block matrices.
- Multiplication by a scalar or broadcasted vector which includes an
infinite or ``nan`` value.
- Division by a scalar or broadcasted vector which includes a zero, infinite
or ``nan`` value.
- Division of a scalar or broadcasted vector by a block matrix.
- Element-wise exponentiation by a negative exponent.
- Natural logarithm, :meth:`log`.
"""
def __init__(self, bmir):
self._bmir = bmir
@classmethod
@typecheck_method(path=str)
def read(cls, path):
"""Reads a block matrix.
Parameters
----------
path: :class:`str`
Path to input file.
Returns
-------
:class:`.BlockMatrix`
"""
return cls(BlockMatrixRead(BlockMatrixNativeReader(path)))
@classmethod
@typecheck_method(uri=str,
n_rows=int,
n_cols=int,
block_size=nullable(int))
def fromfile(cls, uri, n_rows, n_cols, block_size=None):
"""Creates a block matrix from a binary file.
Examples
--------
>>> import numpy as np
>>> a = np.random.rand(10, 20)
>>> a.tofile('/local/file') # doctest: +SKIP
To create a block matrix of the same dimensions:
>>> bm = BlockMatrix.fromfile('file:///local/file', 10, 20) # doctest: +SKIP
Notes
-----
This method, analogous to `numpy.fromfile
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.fromfile.html>`__,
reads a binary file of float64 values in row-major order, such as that
produced by `numpy.tofile
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.tofile.html>`__
or :meth:`BlockMatrix.tofile`.
Binary files produced and consumed by :meth:`.tofile` and
:meth:`.fromfile` are not platform independent, so should only be used
for inter-operating with NumPy, not storage. Use
:meth:`BlockMatrix.write` and :meth:`BlockMatrix.read` to save and load
block matrices, since these methods write and read blocks in parallel
and are platform independent.
A NumPy ndarray must have type float64 for the output of
func:`numpy.tofile` to be a valid binary input to :meth:`.fromfile`.
This is not checked.
The number of entries must be less than :math:`2^{31}`.
Parameters
----------
uri: :class:`str`, optional
URI of binary input file.
n_rows: :obj:`int`
Number of rows.
n_cols: :obj:`int`
Number of columns.
block_size: :obj:`int`, optional
Block size. Default given by :meth:`default_block_size`.
See Also
--------
:meth:`.from_numpy`
"""
if not block_size:
block_size = BlockMatrix.default_block_size()
return cls(BlockMatrixRead(BlockMatrixBinaryReader(uri, [n_rows, n_cols], block_size)))
@classmethod
@typecheck_method(ndarray=np.ndarray,
block_size=nullable(int))
def from_numpy(cls, ndarray, block_size=None):
"""Distributes a `NumPy ndarray
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html>`__
as a block matrix.
Examples
--------
>>> import numpy as np
>>> a = np.random.rand(10, 20)
>>> bm = BlockMatrix.from_numpy(a)
Notes
-----
The ndarray must have two dimensions, each of non-zero size.
The number of entries must be less than :math:`2^{31}`.
Parameters
----------
ndarray: :class:`numpy.ndarray`
ndarray with two dimensions, each of non-zero size.
block_size: :obj:`int`, optional
Block size. Default given by :meth:`default_block_size`.
Returns
-------
:class:`.BlockMatrix`
"""
if not block_size:
block_size = BlockMatrix.default_block_size()
if any(i == 0 for i in ndarray.shape):
raise ValueError(f'from_numpy: ndarray dimensions must be non-zero, found shape {ndarray.shape}')
nd = _ndarray_as_2d(ndarray)
nd = _ndarray_as_float64(nd)
n_rows, n_cols = nd.shape
path = new_local_temp_file()
uri = local_path_uri(path)
nd.tofile(path)
return cls.fromfile(uri, n_rows, n_cols, block_size)
@classmethod
@typecheck_method(entry_expr=expr_float64,
mean_impute=bool,
center=bool,
normalize=bool,
axis=nullable(enumeration('rows', 'cols')),
block_size=nullable(int))
def from_entry_expr(cls, entry_expr, mean_impute=False, center=False, normalize=False, axis='rows', block_size=None):
"""Creates a block matrix using a matrix table entry expression.
Examples
--------
>>> mt = hl.balding_nichols_model(3, 25, 50)
>>> bm = BlockMatrix.from_entry_expr(mt.GT.n_alt_alleles())
Notes
-----
This convenience method writes the block matrix to a temporary file on
persistent disk and then reads the file. If you want to store the
resulting block matrix, use :meth:`write_from_entry_expr` directly to
avoid writing the result twice. See :meth:`write_from_entry_expr` for
further documentation.
Warning
-------
If the rows of the matrix table have been filtered to a small fraction,
then :meth:`.MatrixTable.repartition` before this method to improve
performance.
If you encounter a Hadoop write/replication error, increase the
number of persistent workers or the disk size per persistent worker,
or use :meth:`write_from_entry_expr` to write to external storage.
This method opens ``n_cols / block_size`` files concurrently per task.
To not blow out memory when the number of columns is very large,
limit the Hadoop write buffer size; e.g. on GCP, set this property on
cluster startup (the default is 64MB):
``--properties 'core:fs.gs.io.buffersize.write=1048576``.
Parameters
----------
entry_expr: :class:`.Float64Expression`
Entry expression for numeric matrix entries.
mean_impute: :obj:`bool`
If true, set missing values to the row mean before centering or
normalizing. If false, missing values will raise an error.
center: :obj:`bool`
If true, subtract the row mean.
normalize: :obj:`bool`
If true and ``center=False``, divide by the row magnitude.
If true and ``center=True``, divide the centered value by the
centered row magnitude.
axis: :class:`str`
One of "rows" or "cols": axis by which to normalize or center.
block_size: :obj:`int`, optional
Block size. Default given by :meth:`.BlockMatrix.default_block_size`.
"""
path = new_temp_file()
cls.write_from_entry_expr(entry_expr, path, overwrite=False, mean_impute=mean_impute,
center=center, normalize=normalize, axis=axis, block_size=block_size)
return cls.read(path)
@classmethod
@typecheck_method(n_rows=int,
n_cols=int,
block_size=nullable(int),
seed=nullable(int),
gaussian=bool)
def random(cls, n_rows, n_cols, block_size=None, seed=None, gaussian=True) -> 'BlockMatrix':
"""Creates a block matrix with standard normal or uniform random entries.
Examples
--------
Create a block matrix with 10 rows, 20 columns, and standard normal entries:
>>> bm = BlockMatrix.random(10, 20)
Parameters
----------
n_rows: :obj:`int`
Number of rows.
n_cols: :obj:`int`
Number of columns.
block_size: :obj:`int`, optional
Block size. Default given by :meth:`default_block_size`.
seed: :obj:`int`
Random seed.
gaussian: :obj:`bool`
If ``True``, entries are drawn from the standard
normal distribution. If ``False``, entries are drawn from
the uniform distribution on [0,1].
Returns
-------
:class:`.BlockMatrix`
"""
if not block_size:
block_size = BlockMatrix.default_block_size()
seed = seed if seed is not None else Env.next_seed()
rand = BlockMatrixRandom(seed, gaussian, [n_rows, n_cols], block_size)
return BlockMatrix(rand)
@classmethod
@typecheck_method(n_rows=int,
n_cols=int,
value=numeric,
block_size=nullable(int))
def fill(cls, n_rows, n_cols, value, block_size=None):
"""Creates a block matrix with all elements the same value.
Examples
--------
Create a block matrix with 10 rows, 20 columns, and all elements equal to ``1.0``:
>>> bm = BlockMatrix.fill(10, 20, 1.0)
Parameters
----------
n_rows: :obj:`int`
Number of rows.
n_cols: :obj:`int`
Number of columns.
value: :obj:`float`
Value of all elements.
block_size: :obj:`int`, optional
Block size. Default given by :meth:`default_block_size`.
Returns
-------
:class:`.BlockMatrix`
"""
if not block_size:
block_size = BlockMatrix.default_block_size()
bmir = BlockMatrixBroadcast(_to_bmir(value, block_size),
[], [n_rows, n_cols],
block_size)
return BlockMatrix(bmir)
@classmethod
@typecheck_method(n_rows=int,
n_cols=int,
data=sequenceof(float),
block_size=nullable(int))
def _create(cls, n_rows, n_cols, data, block_size=None):
"""Private method for creating small test matrices."""
if block_size is None:
block_size = BlockMatrix.default_block_size()
return BlockMatrix(ValueToBlockMatrix(hl.literal(data)._ir, [n_rows, n_cols], block_size))
@classmethod
@typecheck_method(ndarray_expression=expr_ndarray(), block_size=int)
def from_ndarray(cls, ndarray_expression, block_size=4096):
"""Create a BlockMatrix from an ndarray"""
if ndarray_expression.dtype.element_type != hl.tfloat64:
raise ValueError("BlockMatrix.from_ndarray expects an ndarray of type float64")
shape = hl.eval(ndarray_expression.shape)
if shape is None:
raise ValueError("Cannot make a BlockMatrix from a missing NDArray")
return BlockMatrix(ValueToBlockMatrix(ndarray_expression._ir, shape, block_size))
@staticmethod
def default_block_size():
"""Default block side length."""
# This should match BlockMatrix.defaultBlockSize in the Scala backend.
return 4096 # 32 * 1024 bytes
@property
def element_type(self):
"""The type of the elements"""
return self._bmir.typ.element_type
@property
def n_rows(self):
"""Number of rows.
Returns
-------
:obj:`int`
"""
return self.shape[0]
@property
def n_cols(self):
"""Number of columns.
Returns
-------
:obj:`int`
"""
return self.shape[1]
@property
def _n_block_rows(self):
return (self.n_rows + self.block_size - 1) // self.block_size
@property
def _n_block_cols(self):
return (self.n_cols + self.block_size - 1) // self.block_size
@property
def shape(self):
"""Shape of matrix.
Returns
-------
(:obj:`int`, :obj:`int`)
Number of rows and number of columns.
"""
return tensor_shape_to_matrix_shape(self._bmir)
@property
def block_size(self):
"""Block size.
Returns
-------
:obj:`int`
"""
return self._bmir.typ.block_size
@property
def _last_col_block_width(self):
remainder = self.n_cols % self.block_size
return remainder if remainder != 0 else self.block_size
@property
def _last_row_block_height(self):
remainder = self.n_rows % self.block_size
return remainder if remainder != 0 else self.block_size
@typecheck_method(path=str,
overwrite=bool,
force_row_major=bool,
stage_locally=bool)
def write(self, path, overwrite=False, force_row_major=False, stage_locally=False):
"""Writes the block matrix.
.. include:: ../_templates/write_warning.rst
Parameters
----------
path: :class:`str`
Path for output file.
overwrite : :obj:`bool`
If ``True``, overwrite an existing file at the destination.
force_row_major: :obj:`bool`
If ``True``, transform blocks in column-major format
to row-major format before writing.
If ``False``, write blocks in their current format.
stage_locally: :obj:`bool`
If ``True``, major output will be written to temporary local storage
before being copied to ``output``.
"""
writer = BlockMatrixNativeWriter(path, overwrite, force_row_major, stage_locally)
Env.backend().execute(BlockMatrixWrite(self._bmir, writer))
@typecheck_method(path=str,
overwrite=bool,
force_row_major=bool,
stage_locally=bool)
def checkpoint(self, path, overwrite=False, force_row_major=False, stage_locally=False):
"""Checkpoint the block matrix.
.. include:: ../_templates/write_warning.rst
Parameters
----------
path: :class:`str`
Path for output file.
overwrite : :obj:`bool`
If ``True``, overwrite an existing file at the destination.
force_row_major: :obj:`bool`
If ``True``, transform blocks in column-major format
to row-major format before checkpointing.
If ``False``, checkpoint blocks in their current format.
stage_locally: :obj:`bool`
If ``True``, major output will be written to temporary local storage
before being copied to ``output``.
"""
self.write(path, overwrite, force_row_major, stage_locally)
return BlockMatrix.read(path)
@staticmethod
@typecheck(entry_expr=expr_float64,
path=str,
overwrite=bool,
mean_impute=bool,
center=bool,
normalize=bool,
axis=nullable(enumeration('rows', 'cols')),
block_size=nullable(int))
def write_from_entry_expr(entry_expr, path, overwrite=False, mean_impute=False,
center=False, normalize=False, axis='rows', block_size=None):
"""Writes a block matrix from a matrix table entry expression.
Examples
--------
>>> mt = hl.balding_nichols_model(3, 25, 50)
>>> BlockMatrix.write_from_entry_expr(mt.GT.n_alt_alleles(),
... 'output/model.bm')
Notes
-----
The resulting file can be loaded with :meth:`BlockMatrix.read`.
Blocks are stored row-major.
If a pipelined transformation significantly downsamples the rows of the
underlying matrix table, then repartitioning the matrix table ahead of
this method will greatly improve its performance.
By default, this method will fail if any values are missing (to be clear,
special float values like ``nan`` are not missing values).
- Set `mean_impute` to replace missing values with the row mean before
possibly centering or normalizing. If all values are missing, the row
mean is ``nan``.
- Set `center` to shift each row to have mean zero before possibly
normalizing.
- Set `normalize` to normalize each row to have unit length.
To standardize each row, regarded as an empirical distribution, to have
mean 0 and variance 1, set `center` and `normalize` and then multiply
the result by ``sqrt(n_cols)``.
Warning
-------
If the rows of the matrix table have been filtered to a small fraction,
then :meth:`.MatrixTable.repartition` before this method to improve
performance.
This method opens ``n_cols / block_size`` files concurrently per task.
To not blow out memory when the number of columns is very large,
limit the Hadoop write buffer size; e.g. on GCP, set this property on
cluster startup (the default is 64MB):
``--properties 'core:fs.gs.io.buffersize.write=1048576``.
Parameters
----------
entry_expr: :class:`.Float64Expression`
Entry expression for numeric matrix entries.
path: :class:`str`
Path for output.
overwrite : :obj:`bool`
If ``True``, overwrite an existing file at the destination.
mean_impute: :obj:`bool`
If true, set missing values to the row mean before centering or
normalizing. If false, missing values will raise an error.
center: :obj:`bool`
If true, subtract the row mean.
normalize: :obj:`bool`
If true and ``center=False``, divide by the row magnitude.
If true and ``center=True``, divide the centered value by the
centered row magnitude.
axis: :class:`str`
One of "rows" or "cols": axis by which to normalize or center.
block_size: :obj:`int`, optional
Block size. Default given by :meth:`.BlockMatrix.default_block_size`.
"""
if not block_size:
block_size = BlockMatrix.default_block_size()
check_entry_indexed('BlockMatrix.write_from_entry_expr', entry_expr)
mt = matrix_table_source('BlockMatrix.write_from_entry_expr', entry_expr)
if not (mean_impute or center or normalize):
if entry_expr in mt._fields_inverse:
field = mt._fields_inverse[entry_expr]
mt.select_entries(field)._write_block_matrix(path, overwrite, field, block_size)
else:
field = Env.get_uid()
mt.select_entries(**{field: entry_expr})._write_block_matrix(path, overwrite, field, block_size)
else:
mt = mt.select_entries(__x=entry_expr).unfilter_entries()
compute = {
'__count': agg.count_where(hl.is_defined(mt['__x'])),
'__sum': agg.sum(mt['__x']),
'__sum_sq': agg.sum(mt['__x'] * mt['__x'])
}
if axis == 'rows':
n_elements = mt.count_cols()
mt = mt.select_rows(**compute)
else:
n_elements = mt.count_rows()
mt = mt.select_cols(**compute)
compute = {
'__mean': mt['__sum'] / mt['__count'],
'__centered_length': hl.sqrt(mt['__sum_sq']
- (mt['__sum'] ** 2) / mt['__count']),
'__length': hl.sqrt(mt['__sum_sq']
+ (n_elements - mt['__count'])
* ((mt['__sum'] / mt['__count']) ** 2))
}
if axis == 'rows':
mt = mt.select_rows(**compute)
else:
mt = mt.select_cols(**compute)
expr = mt['__x']
if normalize:
if center:
expr = (expr - mt['__mean']) / mt['__centered_length']
if mean_impute:
expr = hl.or_else(expr, 0.0)
else:
if mean_impute:
expr = hl.or_else(expr, mt['__mean'])
expr = expr / mt['__length']
else:
if center:
expr = expr - mt['__mean']
if mean_impute:
expr = hl.or_else(expr, 0.0)
else:
if mean_impute:
expr = hl.or_else(expr, mt['__mean'])
field = Env.get_uid()
mt.select_entries(**{field: expr})._write_block_matrix(path, overwrite, field, block_size)
@staticmethod
def _check_indices(indices, size):
if len(indices) == 0:
raise ValueError('index list must be non-empty')
elif not all(x < y for x, y in zip(indices, indices[1:])):
raise ValueError('index list must be strictly increasing')
elif indices[0] < 0:
raise ValueError(f'index list values must be in range [0, {size}), found {indices[0]}')
elif indices[-1] >= size:
raise ValueError(f'index list values must be in range [0, {size}), found {indices[-1]}')
@typecheck_method(rows_to_keep=sequenceof(int))
def filter_rows(self, rows_to_keep):
"""Filters matrix rows.
Parameters
----------
rows_to_keep: :obj:`list` of :obj:`int`
Indices of rows to keep. Must be non-empty and increasing.
Returns
-------
:class:`.BlockMatrix`
"""
BlockMatrix._check_indices(rows_to_keep, self.n_rows)
return BlockMatrix(BlockMatrixFilter(self._bmir, [rows_to_keep, []]))
@typecheck_method(cols_to_keep=sequenceof(int))
def filter_cols(self, cols_to_keep):
"""Filters matrix columns.
Parameters
----------
cols_to_keep: :obj:`list` of :obj:`int`
Indices of columns to keep. Must be non-empty and increasing.
Returns
-------
:class:`.BlockMatrix`
"""
BlockMatrix._check_indices(cols_to_keep, self.n_cols)
return BlockMatrix(BlockMatrixFilter(self._bmir, [[], cols_to_keep]))
@typecheck_method(rows_to_keep=sequenceof(int),
cols_to_keep=sequenceof(int))
def filter(self, rows_to_keep, cols_to_keep):
"""Filters matrix rows and columns.
Notes
-----
This method has the same effect as :meth:`BlockMatrix.filter_cols`
followed by :meth:`BlockMatrix.filter_rows` (or vice versa), but
filters the block matrix in a single pass which may be more efficient.
Parameters
----------
rows_to_keep: :obj:`list` of :obj:`int`
Indices of rows to keep. Must be non-empty and increasing.
cols_to_keep: :obj:`list` of :obj:`int`
Indices of columns to keep. Must be non-empty and increasing.
Returns
-------
:class:`.BlockMatrix`
"""
BlockMatrix._check_indices(rows_to_keep, self.n_rows)
BlockMatrix._check_indices(cols_to_keep, self.n_cols)
return BlockMatrix(BlockMatrixFilter(self._bmir, [rows_to_keep, cols_to_keep]))
@staticmethod
def _pos_index(i, size, name, allow_size=False):
if 0 <= i < size or (i == size and allow_size):
return i
elif 0 <= i + size < size:
return i + size
else:
raise ValueError(f'invalid {name} {i} for axis of size {size}')
@staticmethod
def _range_to_keep(idx, size):
if isinstance(idx, int):
pos_idx = BlockMatrix._pos_index(idx, size, 'index')
return slice(pos_idx, pos_idx + 1, 1)
assert isinstance(idx, slice)
if idx.step and idx.step <= 0:
raise ValueError(f'slice step must be positive, found {idx.step}')
start = 0 if idx.start is None else BlockMatrix._pos_index(idx.start, size, 'start index')
stop = size if idx.stop is None else BlockMatrix._pos_index(idx.stop, size, 'stop index', allow_size=True)
step = 1 if idx.step is None else idx.step
if start < stop:
return slice(start, stop, step)
else:
raise ValueError(f'slice {start}:{stop}:{step} is empty')
@typecheck_method(indices=tupleof(oneof(int, sliceof(nullable(int), nullable(int), nullable(int)))))
def __getitem__(self, indices):
if len(indices) != 2:
raise ValueError(f'tuple of indices or slices must have length two, found {len(indices)}')
row_idx, col_idx = indices
if isinstance(row_idx, int) and isinstance(col_idx, int):
i = BlockMatrix._pos_index(row_idx, self.n_rows, 'row index')
j = BlockMatrix._pos_index(col_idx, self.n_cols, 'col index')
return Env.backend().execute(BlockMatrixToValueApply(self._bmir,
{'name': 'GetElement', 'index': [i, j]}))
rows_to_keep = BlockMatrix._range_to_keep(row_idx, self.n_rows)
cols_to_keep = BlockMatrix._range_to_keep(col_idx, self.n_cols)
return BlockMatrix(BlockMatrixSlice(self._bmir, [rows_to_keep, cols_to_keep]))
@typecheck_method(lower=int, upper=int, blocks_only=bool)
def sparsify_band(self, lower=0, upper=0, blocks_only=False):
r"""Filter to a diagonal band.
Examples
--------
Consider the following block matrix:
>>> import numpy as np
>>> nd = np.array([[ 1.0, 2.0, 3.0, 4.0],
... [ 5.0, 6.0, 7.0, 8.0],
... [ 9.0, 10.0, 11.0, 12.0],
... [13.0, 14.0, 15.0, 16.0]])
>>> bm = BlockMatrix.from_numpy(nd, block_size=2)
Filter to a band from one below the diagonal to
two above the diagonal and collect to NumPy:
>>> bm.sparsify_band(lower=-1, upper=2).to_numpy() # doctest: +SKIP_OUTPUT_CHECK
array([[ 1., 2., 3., 0.],
[ 5., 6., 7., 8.],
[ 0., 10., 11., 12.],
[ 0., 0., 15., 16.]])
Set all blocks fully outside the diagonal to zero
and collect to NumPy:
>>> bm.sparsify_band(lower=0, upper=0, blocks_only=True).to_numpy() # doctest: +SKIP_OUTPUT_CHECK
array([[ 1., 2., 0., 0.],
[ 5., 6., 0., 0.],
[ 0., 0., 11., 12.],
[ 0., 0., 15., 16.]])
Notes
-----
This method creates a block-sparse matrix by zeroing out all blocks
which are disjoint from a diagonal band. By default,
all elements outside the band but inside blocks that overlap the
band are set to zero as well.
The band is defined in terms of inclusive `lower` and `upper` indices
relative to the diagonal. For example, the indices -1, 0, and 1
correspond to the sub-diagonal, diagonal, and super-diagonal,
respectively. The diagonal band contains the elements at positions
:math:`(i, j)` such that
.. math::
\mathrm{lower} \leq j - i \leq \mathrm{upper}.
`lower` must be less than or equal to `upper`, but their values may
exceed the dimensions of the matrix, the band need not include the
diagonal, and the matrix need not be square.
Parameters
----------
lower: :obj:`int`
Index of lowest band relative to the diagonal.
upper: :obj:`int`
Index of highest band relative to the diagonal.
blocks_only: :obj:`bool`
If ``False``, set all elements outside the band to zero.
If ``True``, only set all blocks outside the band to blocks
of zeros; this is more efficient.
Returns
-------
:class:`.BlockMatrix`
Sparse block matrix.
"""
if lower > upper:
raise ValueError(f'sparsify_band: lower={lower} is greater than upper={upper}')
bounds = hl.literal((lower, upper), hl.ttuple(hl.tint64, hl.tint64))
return BlockMatrix(BlockMatrixSparsify(self._bmir, bounds._ir, BandSparsifier(blocks_only)))
@typecheck_method(lower=bool, blocks_only=bool)
def sparsify_triangle(self, lower=False, blocks_only=False):
"""Filter to the upper or lower triangle.
Examples
--------
Consider the following block matrix:
>>> import numpy as np
>>> nd = np.array([[ 1.0, 2.0, 3.0, 4.0],
... [ 5.0, 6.0, 7.0, 8.0],
... [ 9.0, 10.0, 11.0, 12.0],
... [13.0, 14.0, 15.0, 16.0]])
>>> bm = BlockMatrix.from_numpy(nd, block_size=2)
Filter to the upper triangle and collect to NumPy:
>>> bm.sparsify_triangle().to_numpy() # doctest: +SKIP_OUTPUT_CHECK
array([[ 1., 2., 3., 4.],
[ 0., 6., 7., 8.],
[ 0., 0., 11., 12.],
[ 0., 0., 0., 16.]])
Set all blocks fully outside the upper triangle to zero
and collect to NumPy:
>>> bm.sparsify_triangle(blocks_only=True).to_numpy() # doctest: +SKIP_OUTPUT_CHECK
array([[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 0., 0., 11., 12.],
[ 0., 0., 15., 16.]])
Notes
-----
This method creates a block-sparse matrix by zeroing out all blocks
which are disjoint from the (non-strict) upper or lower triangle. By
default, all elements outside the triangle but inside blocks that
overlap the triangle are set to zero as well.
Parameters
----------
lower: :obj:`bool`
If ``False``, keep the upper triangle.
If ``True``, keep the lower triangle.
blocks_only: :obj:`bool`
If ``False``, set all elements outside the triangle to zero.
If ``True``, only set all blocks outside the triangle to
blocks of zeros; this is more efficient.
Returns
-------
:class:`.BlockMatrix`
Sparse block matrix.
"""
if lower:
lower_band = 1 - self.n_rows
upper_band = 0
else:
lower_band = 0
upper_band = self.n_cols - 1
return self.sparsify_band(lower_band, upper_band, blocks_only)
@typecheck_method(intervals=expr_tuple([expr_array(expr_int64), expr_array(expr_int64)]),
blocks_only=bool)
def _sparsify_row_intervals_expr(self, intervals, blocks_only=False):
return BlockMatrix(
BlockMatrixSparsify(self._bmir, intervals._ir,
RowIntervalSparsifier(blocks_only)))
@typecheck_method(indices=expr_array(expr_int32))
def _sparsify_blocks(self, indices):
return BlockMatrix(
BlockMatrixSparsify(self._bmir, indices._ir,
PerBlockSparsifier()))
@typecheck_method(starts=oneof(sequenceof(int), np.ndarray),
stops=oneof(sequenceof(int), np.ndarray),
blocks_only=bool)
def sparsify_row_intervals(self, starts, stops, blocks_only=False):
"""Creates a block-sparse matrix by filtering to an interval for each row.
Examples
--------
Consider the following block matrix:
>>> import numpy as np
>>> nd = np.array([[ 1.0, 2.0, 3.0, 4.0],
... [ 5.0, 6.0, 7.0, 8.0],
... [ 9.0, 10.0, 11.0, 12.0],
... [13.0, 14.0, 15.0, 16.0]])
>>> bm = BlockMatrix.from_numpy(nd, block_size=2)
Set all elements outside the given row intervals to zero
and collect to NumPy:
>>> (bm.sparsify_row_intervals(starts=[1, 0, 2, 2],
... stops= [2, 0, 3, 4])
... .to_numpy()) # doctest: +SKIP_OUTPUT_CHECK
array([[ 0., 2., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 11., 0.],
[ 0., 0., 15., 16.]])
Set all blocks fully outside the given row intervals to
blocks of zeros and collect to NumPy:
>>> (bm.sparsify_row_intervals(starts=[1, 0, 2, 2],
... stops= [2, 0, 3, 4],
... blocks_only=True)
... .to_numpy()) # doctest: +SKIP_OUTPUT_CHECK
array([[ 1., 2., 0., 0.],
[ 5., 6., 0., 0.],
[ 0., 0., 11., 12.],
[ 0., 0., 15., 16.]])
Notes
-----
This method creates a block-sparse matrix by zeroing out all blocks
which are disjoint from all row intervals. By default, all elements
outside the row intervals but inside blocks that overlap the row
intervals are set to zero as well.
`starts` and `stops` must both have length equal to the number of
rows. The interval for row ``i`` is ``[starts[i], stops[i])``. In
particular, ``0 <= starts[i] <= stops[i] <= n_cols`` is required
for all ``i``.
This method requires the number of rows to be less than :math:`2^{31}`.
Parameters
----------
starts: :obj:`list` of :obj:`int`, or :class:`numpy.ndarray` of :obj:`int`
Start indices for each row (inclusive).
stops: :obj:`list` of :obj:`int`, or :class:`numpy.ndarray` of :obj:`int`
Stop indices for each row (exclusive).
blocks_only: :obj:`bool`
If ``False``, set all elements outside row intervals to zero.
If ``True``, only set all blocks outside row intervals to blocks
of zeros; this is more efficient.
Returns
-------
:class:`.BlockMatrix`
Sparse block matrix.
"""
if isinstance(starts, np.ndarray):
if not (starts.dtype == np.int32 or starts.dtype == np.int64):
raise ValueError("sparsify_row_intervals: starts ndarray must have dtype 'int32' or 'int64'")
starts = [int(s) for s in starts]
if isinstance(stops, np.ndarray):
if not (stops.dtype == np.int32 or stops.dtype == np.int64):
raise ValueError("sparsify_row_intervals: stops ndarray must have dtype 'int32' or 'int64'")
stops = [int(s) for s in stops]
n_rows = self.n_rows
n_cols = self.n_cols
if n_rows >= (1 << 31):
raise ValueError(f'n_rows must be less than 2^31, found {n_rows}')
if len(starts) != n_rows or len(stops) != n_rows:
raise ValueError(f'starts and stops must both have length {n_rows} (the number of rows)')
if any([start < 0 for start in starts]):
raise ValueError('all start values must be non-negative')
if any([stop > self.n_cols for stop in stops]):
raise ValueError(f'all stop values must be less than or equal to {n_cols} (the number of columns)')
if any([starts[i] > stops[i] for i in range(0, n_rows)]):
raise ValueError('every start value must be less than or equal to the corresponding stop value')
return self._sparsify_row_intervals_expr((starts, stops), blocks_only)
@typecheck_method(uri=str)
def tofile(self, uri):
"""Collects and writes data to a binary file.
Examples
--------
>>> import numpy as np
>>> bm = BlockMatrix.random(10, 20)
>>> bm.tofile('file:///local/file') # doctest: +SKIP
To create a :class:`numpy.ndarray` of the same dimensions:
>>> a = np.fromfile('/local/file').reshape((10, 20)) # doctest: +SKIP
Notes
-----
This method, analogous to `numpy.tofile
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.tofile.html>`__,
produces a binary file of float64 values in row-major order, which can
be read by functions such as `numpy.fromfile
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.fromfile.html>`__
(if a local file) and :meth:`BlockMatrix.fromfile`.
Binary files produced and consumed by :meth:`.tofile` and
:meth:`.fromfile` are not platform independent, so should only be used
for inter-operating with NumPy, not storage. Use
:meth:`BlockMatrix.write` and :meth:`BlockMatrix.read` to save and load
block matrices, since these methods write and read blocks in parallel
and are platform independent.
The number of entries must be less than :math:`2^{31}`.
Parameters
----------
uri: :class:`str`, optional
URI of binary output file.
See Also
--------
:meth:`.to_numpy`
"""
_check_entries_size(self.n_rows, self.n_cols)
writer = BlockMatrixBinaryWriter(uri)
Env.backend().execute(BlockMatrixWrite(self._bmir, writer))
@typecheck_method(_force_blocking=bool)
def to_numpy(self, _force_blocking=False):
"""Collects the block matrix into a `NumPy ndarray
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html>`__.
Examples
--------
>>> bm = BlockMatrix.random(10, 20)
>>> a = bm.to_numpy()
Notes
-----
The resulting ndarray will have the same shape as the block matrix.
Returns
-------
:class:`numpy.ndarray`
"""
if self.n_rows * self.n_cols > 1 << 31 or _force_blocking:
path = new_temp_file()
self.export_blocks(path, binary=True)
return BlockMatrix.rectangles_to_numpy(path, binary=True)
with with_local_temp_file() as path:
uri = local_path_uri(path)
self.tofile(uri)
return np.fromfile(path).reshape((self.n_rows, self.n_cols))
def to_ndarray(self):
"""Collects a BlockMatrix into a local hail ndarray expression on driver. This should not
be done for large matrices.
Returns
-------
:class:`.NDArrayExpression`
"""
ir = BlockMatrixCollect(self._bmir)
return construct_expr(ir, hl.tndarray(hl.tfloat64, 2))
@property
def is_sparse(self):
"""Returns ``True`` if block-sparse.
Notes
-----
A block matrix is block-sparse if at least of its blocks is dropped,
i.e. implicitly a block of zeros.
Returns
-------
:obj:`bool`
"""
return Env.backend()._to_java_blockmatrix_ir(self._bmir).typ().isSparse()
@property
def T(self):
"""Matrix transpose.
Returns
-------
:class:`.BlockMatrix`
"""
if self.n_rows == 1 and self.n_cols == 1:
return self
if self.n_rows == 1:
index_expr = [0]
elif self.n_cols == 1:
index_expr = [1]
else:
index_expr = [1, 0]
return BlockMatrix(BlockMatrixBroadcast(self._bmir, index_expr, [self.n_cols, self.n_rows], self.block_size))
def densify(self):
"""Restore all dropped blocks as explicit blocks of zeros.
Returns
-------
:class:`.BlockMatrix`
"""
return BlockMatrix(BlockMatrixDensify(self._bmir))
def cache(self):
"""Persist this block matrix in memory.
Notes
-----
This method is an alias for :meth:`persist("MEMORY_ONLY") <hail.linalg.BlockMatrix.persist>`.
Returns
-------
:class:`.BlockMatrix`
Cached block matrix.
"""
return self.persist('MEMORY_ONLY')
@typecheck_method(storage_level=storage_level)
def persist(self, storage_level='MEMORY_AND_DISK'):
"""Persists this block matrix in memory or on disk.
Notes
-----
The :meth:`.BlockMatrix.persist` and :meth:`.BlockMatrix.cache`
methods store the current block matrix on disk or in memory temporarily
to avoid redundant computation and improve the performance of Hail
pipelines. This method is not a substitution for
:meth:`.BlockMatrix.write`, which stores a permanent file.
Most users should use the "MEMORY_AND_DISK" storage level. See the `Spark
documentation
<http://spark.apache.org/docs/latest/programming-guide.html#rdd-persistence>`__
for a more in-depth discussion of persisting data.
Parameters
----------
storage_level : str
Storage level. One of: NONE, DISK_ONLY,
DISK_ONLY_2, MEMORY_ONLY, MEMORY_ONLY_2, MEMORY_ONLY_SER,
MEMORY_ONLY_SER_2, MEMORY_AND_DISK, MEMORY_AND_DISK_2,
MEMORY_AND_DISK_SER, MEMORY_AND_DISK_SER_2, OFF_HEAP
Returns
-------
:class:`.BlockMatrix`
Persisted block matrix.
"""
id = Env.get_uid()
Env.backend().execute(BlockMatrixWrite(self._bmir, BlockMatrixPersistWriter(id, storage_level)))
return BlockMatrix(BlockMatrixRead(BlockMatrixPersistReader(id, self._bmir)))
def unpersist(self):
"""Unpersists this block matrix from memory/disk.
Notes
-----
This function will have no effect on a block matrix that was not previously
persisted.
Returns
-------
:class:`.BlockMatrix`
Unpersisted block matrix.
"""
if isinstance(self._bmir, BlockMatrixRead) and isinstance(self._bmir.reader, BlockMatrixPersistReader):
Env.backend().unpersist_block_matrix(self._bmir.reader.id)
return self._bmir.reader.unpersisted()
return self
def __pos__(self):
return self
def __neg__(self):
"""Negation: -a.
Returns
-------
:class:`.BlockMatrix`
"""
return self._apply_map(lambda x: construct_expr(ApplyUnaryPrimOp('-', x._ir), hl.tfloat64), needs_dense=False)
@staticmethod
def _binary_op(op):
return lambda l, r: construct_expr(ApplyBinaryPrimOp(op, l._ir, r._ir), hl.tfloat64)
@typecheck_method(f=func_spec(1, expr_float64), needs_dense=bool)
def _apply_map(self, f, needs_dense):
uid = Env.get_uid()
bmir = self._bmir
if needs_dense:
bmir = BlockMatrixDensify(bmir)
return BlockMatrix(BlockMatrixMap(bmir, uid, f(construct_variable(uid, hl.tfloat64))._ir, needs_dense))
@typecheck_method(f=func_spec(2, expr_float64),
other=oneof(numeric, np.ndarray, block_matrix_type),
sparsity_strategy=str,
reverse=bool)
def _apply_map2(self, f, other, sparsity_strategy, reverse=False):
if not isinstance(other, BlockMatrix):
other = BlockMatrix(_to_bmir(other, self.block_size))
self_shape, other_shape = list(self.shape), list(other.shape)
result_shape = _shape_after_broadcast(self_shape, other_shape)
self_bmir = self._bmir if self_shape == result_shape else _broadcast_to_shape(self._bmir, result_shape)
other_bmir = other._bmir if other_shape == result_shape else _broadcast_to_shape(other._bmir, result_shape)
if reverse:
left, right = other_bmir, self_bmir
else:
left, right = self_bmir, other_bmir
lv = Env.get_uid()
rv = Env.get_uid()
f_ir = f(construct_variable(lv, hl.tfloat64), construct_variable(rv, hl.tfloat64))._ir
return BlockMatrix(BlockMatrixMap2(left, right, lv, rv, f_ir, sparsity_strategy))
@typecheck_method(b=oneof(numeric, np.ndarray, block_matrix_type))
def __add__(self, b):
"""Addition: a + b.
Parameters
----------
b: :obj:`int` or :obj:`float` or :class:`numpy.ndarray` or :class:`BlockMatrix`
Returns
-------
:class:`.BlockMatrix`
"""
if isinstance(b, (int, float)):
return self._map_dense(lambda entry: entry + b)
return self._apply_map2(BlockMatrix._binary_op('+'), b, sparsity_strategy="Union")
@typecheck_method(b=oneof(numeric, np.ndarray, block_matrix_type))
def __sub__(self, b):
"""Subtraction: a - b.
Parameters
----------
b: :obj:`int` or :obj:`float` or :class:`numpy.ndarray` or :class:`BlockMatrix`
Returns
-------
:class:`.BlockMatrix`
"""
if isinstance(b, (int, float)):
return self._map_dense(lambda entry: entry - b)
return self._apply_map2(BlockMatrix._binary_op('-'), b, sparsity_strategy="Union")
@typecheck_method(b=oneof(numeric, np.ndarray, block_matrix_type))
def __mul__(self, b):
"""Element-wise multiplication: a * b.
Parameters
----------
b: :obj:`int` or :obj:`float` or :class:`numpy.ndarray` or :class:`BlockMatrix`
Returns
-------
:class:`.BlockMatrix`
"""
if isinstance(b, (int, float)):
# sparse since multiplying by zero is zero
return self._map_sparse(lambda entry: entry * b)
return self._apply_map2(BlockMatrix._binary_op('*'), b, sparsity_strategy="Intersection")
@typecheck_method(b=oneof(numeric, np.ndarray, block_matrix_type))
def __truediv__(self, b):
"""Element-wise division: a / b.
Parameters
----------
b: :obj:`int` or :obj:`float` or :class:`numpy.ndarray` or :class:`BlockMatrix`
Returns
-------
:class:`.BlockMatrix`
"""
if isinstance(b, (int, float)):
# sparse since dividing by zero is zero
return self._map_sparse(lambda entry: entry / b)
return self._apply_map2(BlockMatrix._binary_op('/'), b, sparsity_strategy="NeedsDense")
@typecheck_method(b=numeric)
def __radd__(self, b):
return self._apply_map2(BlockMatrix._binary_op('+'), b, sparsity_strategy="Union", reverse=True)
@typecheck_method(b=numeric)
def __rsub__(self, b):
return self._apply_map2(BlockMatrix._binary_op('-'), b, sparsity_strategy="Union", reverse=True)
@typecheck_method(b=numeric)
def __rmul__(self, b):
return self._apply_map2(BlockMatrix._binary_op('*'), b, sparsity_strategy="Intersection", reverse=True)
@typecheck_method(b=numeric)
def __rtruediv__(self, b):
return self._apply_map2(BlockMatrix._binary_op('/'), b, sparsity_strategy="NeedsDense", reverse=True)
@typecheck_method(block_row_range=sized_tupleof(int, int), block_col_range=sized_tupleof(int, int))
def _select_blocks(self, block_row_range, block_col_range):
start_brow, stop_brow = block_row_range
start_bcol, stop_bcol = block_col_range
start_row = start_brow * self.block_size
stop_row = (stop_brow - 1) * self.block_size + (self._last_row_block_height if stop_brow == self._n_block_rows else self.block_size)
start_col = start_bcol * self.block_size
stop_col = (stop_bcol - 1) * self.block_size + (self._last_col_block_width if stop_bcol == self._n_block_cols else self.block_size)
return self[start_row:stop_row, start_col:stop_col]
@typecheck_method(b=oneof(np.ndarray, block_matrix_type))
def __matmul__(self, b):
"""Matrix multiplication: a @ b.
Parameters
----------
b: :class:`numpy.ndarray` or :class:`BlockMatrix`
Returns
-------
:class:`.BlockMatrix`
"""
if isinstance(b, np.ndarray):
b = BlockMatrix(_to_bmir(b, self.block_size))
if self.n_cols != b.n_rows:
raise ValueError(f'incompatible shapes for matrix multiplication: {self.shape} and {b.shape}')
return BlockMatrix(BlockMatrixDot(self._bmir, b._bmir))
@typecheck_method(b=oneof(np.ndarray, block_matrix_type), splits=int, path_prefix=nullable(str))
def tree_matmul(self, b, *, splits, path_prefix=None):
"""Matrix multiplication in situations with large inner dimension.
This function splits a single matrix multiplication into `split_on_inner` smaller matrix multiplications,
does the smaller multiplications, checkpoints them with names defined by `file_name_prefix`, and adds them
together. This is useful in cases when the multiplication of two large matrices results in a much smaller matrix.
Parameters
----------
b: :class:`numpy.ndarray` or :class:`BlockMatrix`
splits: :obj:`int` (keyword only argument)
The number of smaller multiplications to do.
path_prefix: :class:`str` (keyword only argument)
The prefix of the path to write the block matrices to. If unspecified, writes to a tmpdir.
Returns
-------
:class:`.BlockMatrix`
"""
if isinstance(b, np.ndarray):
b = BlockMatrix(_to_bmir(b, self.block_size))
if self.n_cols != b.n_rows:
raise ValueError(f'incompatible shapes for matrix multiplication: {self.shape} and {b.shape}')
if path_prefix is None:
path_prefix = new_temp_file("tree_matmul_tmp")
if splits != 1:
inner_brange_size = int(math.ceil(self._n_block_cols / splits))
split_points = list(range(0, self._n_block_cols, inner_brange_size)) + [self._n_block_cols]
inner_ranges = list(zip(split_points[:-1], split_points[1:]))
blocks_to_multiply = [(self._select_blocks((0, self._n_block_rows), (start, stop)),
b._select_blocks((start, stop), (0, b._n_block_cols))) for start, stop in inner_ranges]
intermediate_multiply_exprs = [b1 @ b2 for b1, b2 in blocks_to_multiply]
hl.experimental.write_block_matrices(intermediate_multiply_exprs, path_prefix)
read_intermediates = [BlockMatrix.read(f"{path_prefix}_{i}") for i in range(0, len(intermediate_multiply_exprs))]
return sum(read_intermediates)
return BlockMatrix(BlockMatrixDot(self._bmir, b._bmir))
@typecheck_method(x=numeric)
def __pow__(self, x):
"""Element-wise exponentiation: a ** x.
Parameters
----------
x: :obj:`int` or :obj:`float`
Exponent.
Returns
-------
:class:`.BlockMatrix`
"""
return self._apply_map(lambda i: i ** x, needs_dense=False)
def _map_dense(self, func):
return self._apply_map(func, True)
def _map_sparse(self, func):
return self._apply_map(func, False)
def sqrt(self):
"""Element-wise square root.
Returns
-------
:class:`.BlockMatrix`
"""
return self._apply_map(hl.sqrt, needs_dense=False)
def ceil(self):
"""Element-wise ceiling.
Returns
-------
:class:`.BlockMatrix`
"""
return self._apply_map(hl.ceil, needs_dense=False)
def floor(self):
"""Element-wise floor.
Returns
-------
:class:`.BlockMatrix`
"""
return self._apply_map(hl.floor, needs_dense=False)
def abs(self):
"""Element-wise absolute value.
Returns
-------
:class:`.BlockMatrix`
"""
return self._apply_map(hl.abs, needs_dense=False)
def log(self):
"""Element-wise natural logarithm.
Returns
-------
:class:`.BlockMatrix`
"""
return self._apply_map(lambda x: hl.log(x), needs_dense=True)
def diagonal(self):
"""Extracts diagonal elements as a row vector.
Returns
-------
:class:`.BlockMatrix`
"""
diag_bmir = BlockMatrixBroadcast(self._bmir,
[0, 0],
[1, min(self.n_rows, self.n_cols)],
self.block_size)
return BlockMatrix(diag_bmir)
@typecheck_method(axis=nullable(int))
def sum(self, axis=None):
"""Sums array elements over one or both axes.
Examples
--------
>>> import numpy as np
>>> nd = np.array([[ 1.0, 2.0, 3.0],
... [ 4.0, 5.0, 6.0]])
>>> bm = BlockMatrix.from_numpy(nd)
>>> bm.sum()
21.0
>>> bm.sum(axis=0).to_numpy()
array([[5., 7., 9.]])
>>> bm.sum(axis=1).to_numpy()
array([[ 6.],
[15.]])
Parameters
----------
axis: :obj:`int`, optional
Axis over which to sum.
By default, sum all elements.
If ``0``, sum over rows.
If ``1``, sum over columns.
Returns
-------
:obj:`float` or :class:`BlockMatrix`
If None, returns a float.
If ``0``, returns a block matrix with a single row.
If ``1``, returns a block matrix with a single column.
"""
if axis is None:
bmir = BlockMatrixAgg(self._bmir, [0, 1])
return BlockMatrix(bmir)[0, 0]
elif axis == 0 or axis == 1:
out_index_expr = [axis]
bmir = BlockMatrixAgg(self._bmir, out_index_expr)
return BlockMatrix(bmir)
else:
raise ValueError(f'axis must be None, 0, or 1: found {axis}')
def entries(self, keyed=True):
"""Returns a table with the indices and value of each block matrix entry.
Examples
--------
>>> import numpy as np
>>> block_matrix = BlockMatrix.from_numpy(np.array([[5, 7], [2, 8]]), 2)
>>> entries_table = block_matrix.entries()
>>> entries_table.show()
+-------+-------+----------+
| i | j | entry |
+-------+-------+----------+
| int64 | int64 | float64 |
+-------+-------+----------+
| 0 | 0 | 5.00e+00 |
| 0 | 1 | 7.00e+00 |
| 1 | 0 | 2.00e+00 |
| 1 | 1 | 8.00e+00 |
+-------+-------+----------+
Notes
-----
The resulting table may be filtered, aggregated, and queried, but should only be
directly exported to disk if the block matrix is very small.
For block-sparse matrices, only realized blocks are included. To force inclusion
of zeroes in dropped blocks, apply :meth:`densify` first.
The resulting table has the following fields:
- **i** (:py:data:`.tint64`, key field) -- Row index.
- **j** (:py:data:`.tint64`, key field) -- Column index.
- **entry** (:py:data:`.tfloat64`) -- Value of entry.
Returns
-------
:class:`.Table`
Table with a row for each entry.
"""
t = Table(BlockMatrixToTable(self._bmir))
if keyed:
t = t.key_by('i', 'j')
return t
@typecheck_method(n_partitions=nullable(int), maximum_cache_memory_in_bytes=nullable(int))
def to_table_row_major(self, n_partitions=None, maximum_cache_memory_in_bytes=None):
"""Returns a table where each row represents a row in the block matrix.
The resulting table has the following fields:
- **row_idx** (:py:data.`tint64`, key field) -- Row index
- **entries** (:py:class:`.tarray` of :py:data:`.tfloat64`) -- Entries for the row
Examples
--------
>>> import numpy as np
>>> block_matrix = BlockMatrix.from_numpy(np.array([[1, 2], [3, 4], [5, 6]]), 2)
>>> t = block_matrix.to_table_row_major()
>>> t.show()
+---------+---------------------+
| row_idx | entries |
+---------+---------------------+
| int64 | array<float64> |
+---------+---------------------+
| 0 | [1.00e+00,2.00e+00] |
| 1 | [3.00e+00,4.00e+00] |
| 2 | [5.00e+00,6.00e+00] |
+---------+---------------------+
Parameters
----------
n_partitions : int or None
Number of partitions of the table.
maximum_cache_memory_in_bytes : int or None
The amount of memory to reserve, per partition, to cache rows of the
matrix in memory. This value must be at least large enough to hold
one row of the matrix in memory. If this value is exactly the size of
one row, then a partition makes a network request for every row of
every block. Larger values reduce the number of network requests. If
memory permits, setting this value to the size of one output
partition permits one network request per block per partition.
Notes
-----
Does not support block-sparse matrices.
Returns
-------
:class:`.Table`
Table where each row corresponds to a row in the block matrix.
"""
path = new_temp_file()
if maximum_cache_memory_in_bytes and maximum_cache_memory_in_bytes > (1 << 31) - 1:
raise ValueError(
f'maximum_cache_memory_in_bytes must be less than 2^31 -1, was: {maximum_cache_memory_in_bytes}')
self.write(path, overwrite=True, force_row_major=True)
reader = TableFromBlockMatrixNativeReader(path, n_partitions, maximum_cache_memory_in_bytes)
return Table(TableRead(reader))
@typecheck_method(n_partitions=nullable(int), maximum_cache_memory_in_bytes=nullable(int))
def to_matrix_table_row_major(self, n_partitions=None, maximum_cache_memory_in_bytes=None):
"""Returns a matrix table with row key of `row_idx` and col key `col_idx`, whose
entries are structs of a single field `element`.
Parameters
----------
n_partitions : int or None
Number of partitions of the matrix table.
maximum_cache_memory_in_bytes : int or None
The amount of memory to reserve, per partition, to cache rows of the
matrix in memory. This value must be at least large enough to hold
one row of the matrix in memory. If this value is exactly the size of
one row, then a partition makes a network request for every row of
every block. Larger values reduce the number of network requests. If
memory permits, setting this value to the size of one output
partition permits one network request per block per partition.
Notes
-----
Does not support block-sparse matrices.
Returns
-------
:class:`.MatrixTable`
Matrix table where each entry corresponds to an entry in the block matrix.
"""
t = self.to_table_row_major(n_partitions, maximum_cache_memory_in_bytes)
t = t.transmute(entries=t.entries.map(lambda i: hl.struct(element=i)))
t = t.annotate_globals(cols=hl.range(self.n_cols).map(lambda i: hl.struct(col_idx=hl.int64(i))))
return t._unlocalize_entries('entries', 'cols', ['col_idx'])
@staticmethod
@typecheck(path_in=str,
path_out=str,
delimiter=str,
header=nullable(str),
add_index=bool,
parallel=nullable(ExportType.checker),
partition_size=nullable(int),
entries=enumeration('full', 'lower', 'strict_lower', 'upper', 'strict_upper'))
def export(path_in, path_out, delimiter='\t', header=None, add_index=False, parallel=None,
partition_size=None, entries='full'):
"""Exports a stored block matrix as a delimited text file.
Examples
--------
Consider the following matrix.
>>> import numpy as np
>>> nd = np.array([[1.0, 0.8, 0.7],
... [0.8, 1.0 ,0.3],
... [0.7, 0.3, 1.0]])
>>> BlockMatrix.from_numpy(nd).write('output/example.bm', overwrite=True, force_row_major=True)
Export the full matrix as a file with tab-separated values:
>>> BlockMatrix.export('output/example.bm', 'output/example.tsv')
Export the upper-triangle of the matrix as a block gzipped file of
comma-separated values.
>>> BlockMatrix.export(path_in='output/example.bm',
... path_out='output/example.csv.bgz',
... delimiter=',',
... entries='upper')
Export the full matrix with row indices in parallel as a folder of
gzipped files, each with a header line for columns ``idx``, ``A``,
``B``, and ``C``.
>>> BlockMatrix.export(path_in='output/example.bm',
... path_out='output/example.gz',
... header='\t'.join(['idx', 'A', 'B', 'C']),
... add_index=True,
... parallel='header_per_shard',
... partition_size=2)
This produces two compressed files which uncompress to:
.. code-block:: text
idx A B C
0 1.0 0.8 0.7
1 0.8 1.0 0.3
.. code-block:: text
idx A B C
2 0.7 0.3 1.0
Warning
-------
The block matrix must be stored in row-major format, as results
from :meth:`.BlockMatrix.write` with ``force_row_major=True`` and from
:meth:`.BlockMatrix.write_from_entry_expr`. Otherwise,
:meth:`export` will fail.
Notes
-----
The five options for `entries` are illustrated below.
Full:
.. code-block:: text
1.0 0.8 0.7
0.8 1.0 0.3
0.7 0.3 1.0
Lower triangle:
.. code-block:: text
1.0
0.8 1.0
0.7 0.3 1.0
Strict lower triangle:
.. code-block:: text
0.8
0.7 0.3
Upper triangle:
.. code-block:: text
1.0 0.8 0.7
1.0 0.3
1.0
Strict upper triangle:
.. code-block:: text
0.8 0.7
0.3
The number of columns must be less than :math:`2^{31}`.
The number of partitions (file shards) exported equals the ceiling
of ``n_rows / partition_size``. By default, there is one partition
per row of blocks in the block matrix. The number of partitions
should be at least the number of cores for efficient parallelism.
Setting the partition size to an exact (rather than approximate)
divisor or multiple of the block size reduces superfluous shuffling
of data.
If `parallel` is ``None``, these file shards are then serially
concatenated by one core into one file, a slow process. See
other options below.
It is highly recommended to export large files with a ``.bgz`` extension,
which will use a block gzipped compression codec. These files can be
read natively with Python's ``gzip.open`` and R's ``read.table``.
Parameters
----------
path_in: :class:`str`
Path to input block matrix, stored row-major on disk.
path_out: :class:`str`
Path for export.
Use extension ``.gz`` for gzip or ``.bgz`` for block gzip.
delimiter: :class:`str`
Column delimiter.
header: :class:`str`, optional
If provided, `header` is prepended before the first row of data.
add_index: :obj:`bool`
If ``True``, add an initial column with the absolute row index.
parallel: :class:`str`, optional
If ``'header_per_shard'``, create a folder with one file per
partition, each with a header if provided.
If ``'separate_header'``, create a folder with one file per
partition without a header; write the header, if provided, in
a separate file.
If ``None``, serially concatenate the header and all partitions
into one file; export will be slower.
If `header` is ``None`` then ``'header_per_shard'`` and
``'separate_header'`` are equivalent.
partition_size: :obj:`int`, optional
Number of rows to group per partition for export.
Default given by block size of the block matrix.
entries: :class:`str`
Describes which entries to export. One of:
``'full'``, ``'lower'``, ``'strict_lower'``, ``'upper'``, ``'strict_upper'``.
"""
export_type = ExportType.default(parallel)
Env.spark_backend('BlockMatrix.export')._jbackend.pyExportBlockMatrix(
path_in, path_out, delimiter, header, add_index, export_type, partition_size, entries)
@typecheck_method(rectangles=sequenceof(sequenceof(int)))
def sparsify_rectangles(self, rectangles):
"""Filter to blocks overlapping the union of rectangular regions.
Examples
--------
Consider the following block matrix:
>>> import numpy as np
>>> nd = np.array([[ 1.0, 2.0, 3.0, 4.0],
... [ 5.0, 6.0, 7.0, 8.0],
... [ 9.0, 10.0, 11.0, 12.0],
... [13.0, 14.0, 15.0, 16.0]])
>>> bm = BlockMatrix.from_numpy(nd, block_size=2)
Filter to blocks covering three rectangles and collect to NumPy:
>>> bm.sparsify_rectangles([[0, 1, 0, 1], [0, 3, 0, 2], [1, 2, 0, 4]]).to_numpy() # doctest: +SKIP_OUTPUT_CHECK
array([[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 0., 0.],
[13., 14., 0., 0.]])
Notes
-----
This method creates a block-sparse matrix by zeroing out (dropping)
all blocks which are disjoint from the union of a set of rectangular
regions. Partially overlapping blocks are *not* modified.
Each rectangle is encoded as a list of length four of
the form ``[row_start, row_stop, col_start, col_stop]``,
where starts are inclusive and stops are exclusive.
These must satisfy ``0 <= row_start <= row_stop <= n_rows`` and
``0 <= col_start <= col_stop <= n_cols``.
For example ``[0, 2, 1, 3]`` corresponds to the row-index range
``[0, 2)`` and column-index range ``[1, 3)``, i.e. the elements at
positions ``(0, 1)``, ``(0, 2)``, ``(1, 1)``, and ``(1, 2)``.
The number of rectangles must be less than :math:`2^{29}`.
Parameters
----------
rectangles: :obj:`list` of :obj:`list` of :obj:`int`
List of rectangles of the form
``[row_start, row_stop, col_start, col_stop]``.
Returns
-------
:class:`.BlockMatrix`
Sparse block matrix.
"""
n_rectangles = len(rectangles)
if n_rectangles >= (1 << 29):
raise ValueError(f'number of rectangles must be less than 2^29, found {n_rectangles}')
n_rows = self.n_rows
n_cols = self.n_cols
for r in rectangles:
if len(r) != 4:
raise ValueError(f'rectangle {r} does not have length 4')
if not (0 <= r[0] <= r[1] <= n_rows and 0 <= r[2] <= r[3] <= n_cols):
raise ValueError(f'rectangle {r} does not satisfy '
f'0 <= r[0] <= r[1] <= n_rows and 0 <= r[2] <= r[3] <= n_cols')
rectangles = hl.literal(list(itertools.chain(*rectangles)), hl.tarray(hl.tint64))
return BlockMatrix(
BlockMatrixSparsify(self._bmir, rectangles._ir, RectangleSparsifier))
@typecheck_method(path_out=str,
rectangles=sequenceof(sequenceof(int)),
delimiter=str,
binary=bool)
def export_rectangles(self, path_out, rectangles, delimiter='\t', binary=False):
"""Export rectangular regions from a block matrix to delimited text or binary files.
Examples
--------
Consider the following block matrix:
>>> import numpy as np
>>> nd = np.array([[ 1.0, 2.0, 3.0, 4.0],
... [ 5.0, 6.0, 7.0, 8.0],
... [ 9.0, 10.0, 11.0, 12.0],
... [13.0, 14.0, 15.0, 16.0]])
Filter to the three rectangles and export as TSV files.
>>> rectangles = [[0, 1, 0, 1], [0, 3, 0, 2], [1, 2, 0, 4]]
>>>
>>> (BlockMatrix.from_numpy(nd)
... .export_rectangles('output/example.bm', rectangles))
This produces three files in the example folder.
The first file is ``rect-0_0-1-0-1``:
.. code-block:: text
1.0
The second file is ``rect-1_0-3-0-2``:
.. code-block:: text
1.0 2.0
5.0 6.0
9.0 10.0
The third file is ``rect-2_1-2-0-4``:
.. code-block:: text
5.0 6.0 7.0 8.0
Notes
-----
This method exports rectangular regions of a stored block matrix
to delimited text or binary files, in parallel by region.
Each rectangle is encoded as a list of length four of
the form ``[row_start, row_stop, col_start, col_stop]``,
where starts are inclusive and stops are exclusive.
These must satisfy ``0 <= row_start <= row_stop <= n_rows`` and
``0 <= col_start <= col_stop <= n_cols``.
For example ``[0, 2, 1, 3]`` corresponds to the row-index range
``[0, 2)`` and column-index range ``[1, 3)``, i.e. the elements at
positions ``(0, 1)``, ``(0, 2)``, ``(1, 1)``, and ``(1, 2)``.
Each file name encodes the index of the rectangle in `rectangles`
and the bounds as formatted in the example.
The block matrix can be sparse provided all blocks overlapping
the rectangles are present, i.e. this method does not currently
support implicit zeros.
If `binary` is true, each element is exported as 8 bytes, in row
major order with no delimiting, new lines, or shape information. Such
files can instantiate, for example, NumPy ndarrays using
`fromfile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.fromfile.html>`__
and
`reshape <https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html>`__.
Note however that these binary files are not platform independent; in
particular, no byte-order or data-type information is saved.
The number of rectangles must be less than :math:`2^{29}`.
Parameters
----------
path_out: :class:`str`
Path for folder of exported files.
rectangles: :obj:`list` of :obj:`list` of :obj:`int`
List of rectangles of the form
``[row_start, row_stop, col_start, col_stop]``.
delimiter: :class:`str`
Column delimiter.
binary: :obj:`bool`
If true, export elements as raw bytes in row major order.
"""
n_rectangles = len(rectangles)
if n_rectangles == 0:
raise ValueError('no rectangles provided')
if n_rectangles >= (1 << 29):
raise ValueError(f'number of rectangles must be less than 2^29, found {n_rectangles}')
for r in rectangles:
if len(r) != 4:
raise ValueError(f'rectangle {r} does not have length 4')
if not (0 <= r[0] <= r[1] <= self.n_rows and 0 <= r[2] <= r[3] <= self.n_cols):
raise ValueError(f'rectangle {r} does not satisfy '
f'0 <= r[0] <= r[1] <= n_rows and 0 <= r[2] <= r[3] <= n_cols')
writer = BlockMatrixRectanglesWriter(path_out, rectangles, delimiter, binary)
Env.backend().execute(BlockMatrixWrite(self._bmir, writer))
@typecheck_method(path_out=str, delimiter=str, binary=bool)
def export_blocks(self, path_out, delimiter='\t', binary=False):
"""Export each block of the block matrix as its own delimited text or binary file.
This is a special case of :meth:`.export_rectangles`
Examples
--------
Consider the following block matrix:
>>> import numpy as np
>>> nd = np.array([[ 1.0, 2.0, 3.0],
... [ 4.0, 5.0, 6.0],
... [ 7.0, 8.0, 9.0]])
>>> BlockMatrix.from_numpy(nd, block_size=2).export_blocks('output/example')
This produces four files in the example folder.
The first file is ``rect-0_0-2-0-2``:
.. code-block:: text
1.0 2.0
4.0 5.0
The second file is ``rect-1_0-2-2-3``:
.. code-block:: text
3.0
6.0
The third file is ``rect-2_2-3-0-2``:
.. code-block:: text
7.0 8.0
And the fourth file is ``rect-3_3-4-3-4``:
.. code-block:: text
9.0
Notes
-----
This method does not have any matrix size limitations.
If exporting to binary files, note that they are not platform independent. No byte-order
or data-type information is saved.
See Also
--------
:meth:`.rectangles_to_numpy`
Parameters
----------
path_out: :class:`str`
Path for folder of exported files.
delimiter: :class:`str`
Column delimiter.
binary: :obj:`bool`
If true, export elements as raw bytes in row major order.
"""
def rows_in_block(block_row):
if block_row == self._n_block_rows - 1:
return self.n_rows - block_row * self.block_size
return self.block_size
def cols_in_block(block_col):
if block_col == self._n_block_cols - 1:
return self.n_cols - block_col * self.block_size
return self.block_size
def bounds(block_row, block_col):
start_row = block_row * self.block_size
start_col = block_col * self.block_size
end_row = start_row + rows_in_block(block_row)
end_col = start_col + cols_in_block(block_col)
return [start_row, end_row, start_col, end_col]
block_indices = itertools.product(range(self._n_block_rows), range(self._n_block_cols))
rectangles = [bounds(block_row, block_col) for (block_row, block_col) in block_indices]
self.export_rectangles(path_out, rectangles, delimiter, binary)
@staticmethod
@typecheck(path=str, binary=bool)
def rectangles_to_numpy(path, binary=False):
"""Instantiates a NumPy ndarray from files of rectangles written out using
:meth:`.export_rectangles` or :meth:`.export_blocks`. For any given
dimension, the ndarray will have length equal to the upper bound of that dimension
across the union of the rectangles. Entries not covered by any rectangle will be initialized to 0.
Examples
--------
Consider the following:
>>> import numpy as np
>>> nd = np.array([[ 1.0, 2.0, 3.0],
... [ 4.0, 5.0, 6.0],
... [ 7.0, 8.0, 9.0]])
>>> BlockMatrix.from_numpy(nd).export_rectangles('output/example', [[0, 3, 0, 1], [1, 2, 0, 2]])
>>> BlockMatrix.rectangles_to_numpy('output/example')
This would produce the following NumPy ndarray:
.. code-block:: text
1.0 0.0
4.0 5.0
7.0 0.0
Notes
-----
If exporting to binary files, note that they are not platform independent. No byte-order
or data-type information is saved.
See Also
--------
:meth:`.export_rectangles`
:meth:`.export_blocks`
Parameters
----------
path: :class:`str`
Path to directory where rectangles were written.
binary: :obj:`bool`
If true, reads the files as binary, otherwise as text delimited.
Returns
-------
:class:`numpy.ndarray`
"""
def parse_rects(fname):
rect_idx_and_bounds = [int(i) for i in re.findall(r'\d+', fname)]
if len(rect_idx_and_bounds) != 5:
raise ValueError(f'Invalid rectangle file name: {fname}')
return rect_idx_and_bounds
rect_files = [file['path'] for file in hl.utils.hadoop_ls(path) if not re.match(r'.*\.crc', file['path'])]
rects = [parse_rects(os.path.basename(file_path)) for file_path in rect_files]
n_rows = max(rects, key=lambda r: r[2])[2]
n_cols = max(rects, key=lambda r: r[4])[4]
nd = np.zeros(shape=(n_rows, n_cols))
with with_local_temp_file() as f:
uri = local_path_uri(f)
for rect, file_path in zip(rects, rect_files):
hl.utils.hadoop_copy(file_path, uri)
if binary:
rect_data = np.reshape(np.fromfile(f), (rect[2] - rect[1], rect[4] - rect[3]))
else:
rect_data = np.loadtxt(f, ndmin=2)
nd[rect[1]:rect[2], rect[3]:rect[4]] = rect_data
return nd
@typecheck_method(compute_uv=bool,
complexity_bound=int)
def svd(self, compute_uv=True, complexity_bound=8192):
r"""Computes the reduced singular value decomposition.
Examples
--------
>>> x = BlockMatrix.from_numpy(np.array([[-2.0, 0.0, 3.0],
... [-1.0, 2.0, 4.0]]))
>>> x.svd()
(array([[-0.60219551, -0.79834865],
[-0.79834865, 0.60219551]]),
array([5.61784832, 1.56197958]),
array([[ 0.35649586, -0.28421866, -0.89001711],
[ 0.6366932 , 0.77106707, 0.00879404]]))
Notes
-----
This method leverages distributed matrix multiplication to compute
reduced `singular value decomposition
<https://en.wikipedia.org/wiki/Singular-value_decomposition>`__ (SVD)
for matrices that would otherwise be too large to work with locally,
provided that at least one dimension is less than or equal to 46300.
Let :math:`X` be an :math:`n \times m` matrix and let
:math:`r = \min(n, m)`. In particular, :math:`X` can have at most
:math:`r` non-zero singular values. The reduced SVD of :math:`X`
has the form
.. math::
X = U \Sigma V^T
where
- :math:`U` is an :math:`n \times r` matrix whose columns are
(orthonormal) left singular vectors,
- :math:`\Sigma` is an :math:`r \times r` diagonal matrix of non-negative
singular values in descending order,
- :math:`V^T` is an :math:`r \times m` matrix whose rows are
(orthonormal) right singular vectors.
If the singular values in :math:`\Sigma` are distinct, then the
decomposition is unique up to multiplication of corresponding left and
right singular vectors by -1. The computational complexity of SVD is
roughly :math:`nmr`.
We now describe the implementation in more detail.
If :math:`\sqrt[3]{nmr}` is less than or equal to `complexity_bound`,
then :math:`X` is localized to an ndarray on which
:func:`scipy.linalg.svd` is called. In this case, all components are
returned as ndarrays.
If :math:`\sqrt[3]{nmr}` is greater than `complexity_bound`, then the
reduced SVD is computed via the smaller gramian matrix of :math:`X`. For
:math:`n > m`, the three stages are:
1. Compute (and localize) the gramian matrix :math:`X^T X`,
2. Compute the eigenvalues and right singular vectors via the
symmetric eigendecomposition :math:`X^T X = V S V^T` with
:func:`numpy.linalg.eigh` or :func:`scipy.linalg.eigh`,
3. Compute the singular values as :math:`\Sigma = S^\frac{1}{2}` and the
the left singular vectors as the block matrix
:math:`U = X V \Sigma^{-1}`.
In this case, since block matrix multiplication is lazy, it is efficient
to subsequently slice :math:`U` (e.g. based on the singular values), or
discard :math:`U` entirely.
If :math:`n \leq m`, the three stages instead use the gramian
:math:`X X^T = U S U^T` and return :math:`V^T` as the
block matrix :math:`\Sigma^{-1} U^T X`.
Warning
-------
Computing reduced SVD via the gramian presents an added wrinkle when
:math:`X` is not full rank, as the block-matrix-side null-basis is not
computable by the formula in the third stage. Furthermore, due to finite
precision, the zero eigenvalues of :math:`X^T X` or :math:`X X^T` will
only be approximately zero.
If the rank is not known ahead, examining the relative sizes of the
trailing singular values should reveal where the spectrum switches from
non-zero to "zero" eigenvalues. With 64-bit floating point, zero
eigenvalues are typically about 1e-16 times the largest eigenvalue.
The corresponding singular vectors should be sliced away **before** an
action which realizes the block-matrix-side singular vectors.
:meth:`svd` sets the singular values corresponding to negative
eigenvalues to exactly ``0.0``.
Warning
-------
The first and third stages invoke distributed matrix multiplication with
parallelism bounded by the number of resulting blocks, whereas the
second stage is executed on the leader (master) node. For matrices of
large minimum dimension, it may be preferable to run these stages
separately.
The performance of the second stage depends critically on the number of
leader (master) cores and the NumPy / SciPy configuration, viewable with
``np.show_config()``. For Intel machines, we recommend installing the
`MKL <https://anaconda.org/anaconda/mkl>`__ package for Anaconda.
Consequently, the optimal value of `complexity_bound` is highly
configuration-dependent.
Parameters
----------
compute_uv: :obj:`bool`
If False, only compute the singular values (or eigenvalues).
complexity_bound: :obj:`int`
Maximum value of :math:`\sqrt[3]{nmr}` for which
:func:`scipy.linalg.svd` is used.
Returns
-------
u: :class:`numpy.ndarray` or :class:`BlockMatrix`
Left singular vectors :math:`U`, as a block matrix if :math:`n > m` and
:math:`\sqrt[3]{nmr}` exceeds `complexity_bound`.
Only returned if `compute_uv` is True.
s: :class:`numpy.ndarray`
Singular values from :math:`\Sigma` in descending order.
vt: :class:`numpy.ndarray` or :class:`BlockMatrix`
Right singular vectors :math:`V^T``, as a block matrix if :math:`n \leq m` and
:math:`\sqrt[3]{nmr}` exceeds `complexity_bound`.
Only returned if `compute_uv` is True.
"""
n, m = self.shape
if n * m * min(n, m) <= complexity_bound ** 3:
return _svd(self.to_numpy(), full_matrices=False, compute_uv=compute_uv, overwrite_a=True)
else:
return self._svd_gramian(compute_uv)
@typecheck_method(compute_uv=bool)
def _svd_gramian(self, compute_uv):
x = self
n, m = x.shape
min_dim = min(n, m)
if min_dim > 46300: # limit due to localizing through Java array
raise ValueError(f'svd: dimensions {n} and {m} both exceed 46300')
left_gramian = n <= m
a = ((x @ x.T if left_gramian else x.T @ x)
.sparsify_triangle(lower=True, blocks_only=True)
.to_numpy())
if compute_uv:
e, w = _eigh(a)
for i in range(np.searchsorted(e, 0.0)):
e[i] = 0
# flip singular values to descending order
s = np.flip(np.sqrt(e), axis=0)
w = np.fliplr(w)
if left_gramian:
u = w
vt = BlockMatrix.from_numpy((w / s).T) @ x
else:
u = x @ (w / s)
vt = w.T
return u, s, vt
else:
e = np.linalg.eigvalsh(a)
for i in range(np.searchsorted(e, 0.0)):
e[i] = 0
return np.flip(np.sqrt(e), axis=0)
block_matrix_type.set(BlockMatrix)
def _is_scalar(x):
return isinstance(x, float) or isinstance(x, int)
def _shape_after_broadcast(left, right):
"""
Follows numpy's strategy of broadcasting through right-align shapes and
compare corresponding dimensions. See:
https://docs.scipy.org/doc/numpy-1.15.0/user/basics.broadcasting.html#general-broadcasting-rules
"""
def join_dim(l_size, r_size):
if not (l_size == r_size or l_size == 1 or r_size == 1):
raise ValueError(f'Incompatible shapes for broadcasting: {left}, {right}')
return max(l_size, r_size)
def pad(arr, n):
return [1 for _ in range(n)] + arr
diff_len = len(left) - len(right)
if diff_len < 0:
left = pad(left, -diff_len)
elif diff_len > 0:
right = pad(right, diff_len)
return [join_dim(lx, rx) for lx, rx in zip(left, right)]
@typecheck(x=oneof(numeric, np.ndarray), block_size=int)
def _to_bmir(x, block_size):
if _is_scalar(x):
return ValueToBlockMatrix(F64(x), [1, 1], block_size)
else:
data = list(_ndarray_as_float64(x).flat)
return ValueToBlockMatrix(hl.literal(data)._ir, list(_ndarray_as_2d(x).shape), block_size)
def _broadcast_to_shape(bmir, result_shape):
in_index_expr = _broadcast_index_expr(bmir.typ.shape, bmir.typ.is_row_vector)
return BlockMatrixBroadcast(bmir, in_index_expr, result_shape, bmir.typ.block_size)
def _broadcast_index_expr(bmir_shape, is_row_vector):
if len(bmir_shape) == 0:
return []
elif len(bmir_shape) == 1:
return [1] if is_row_vector else [0]
else:
raise ValueError(f'Cannot broadcast shape: ${bmir_shape}')
def _ndarray_as_2d(nd):
if nd.ndim == 1:
nd = nd.reshape(1, nd.shape[0])
elif nd.ndim > 2:
raise ValueError(f'ndarray must have one or two axes, found shape {nd.shape}')
return nd
def _ndarray_as_float64(nd):
if nd.dtype != np.float64:
try:
nd = nd.astype(np.float64)
except ValueError as e:
raise TypeError(f"ndarray elements of dtype {nd.dtype} cannot be converted to type 'float64'") from e
return nd
def _jarray_from_ndarray(nd):
if nd.size >= (1 << 31):
raise ValueError(f'size of ndarray must be less than 2^31, found {nd.size}')
nd = _ndarray_as_float64(nd)
with with_local_temp_file() as path:
uri = local_path_uri(path)
nd.tofile(path)
return Env.hail().utils.richUtils.RichArray.importFromDoubles(Env.spark_backend('_jarray_from_ndarray').fs._jfs, uri, nd.size)
def _ndarray_from_jarray(ja):
with with_local_temp_file() as path:
uri = local_path_uri(path)
Env.hail().utils.richUtils.RichArray.exportToDoubles(Env.spark_backend('_ndarray_from_jarray').fs._jfs, uri, ja)
return np.fromfile(path)
def _breeze_fromfile(uri, n_rows, n_cols):
_check_entries_size(n_rows, n_cols)
return Env.hail().utils.richUtils.RichDenseMatrixDouble.importFromDoubles(Env.spark_backend('_breeze_fromfile').fs._jfs, uri, n_rows, n_cols, True)
def _check_entries_size(n_rows, n_cols):
n_entries = n_rows * n_cols
if n_entries >= 1 << 31:
raise ValueError(f'number of entries must be less than 2^31, found {n_entries}')
def _breeze_from_ndarray(nd):
if any(i == 0 for i in nd.shape):
raise ValueError(f'from_numpy: ndarray dimensions must be non-zero, found shape {nd.shape}')
nd = _ndarray_as_2d(nd)
nd = _ndarray_as_float64(nd)
n_rows, n_cols = nd.shape
with with_local_temp_file() as path:
uri = local_path_uri(path)
nd.tofile(path)
return _breeze_fromfile(uri, n_rows, n_cols)
def _svd(a, full_matrices=True, compute_uv=True, overwrite_a=False, check_finite=True):
"""
SciPy supports two Lapack algorithms:
DC: https://software.intel.com/en-us/mkl-developer-reference-fortran-gesdd
GR: https://software.intel.com/en-us/mkl-developer-reference-fortran-gesvd
DC (gesdd) is faster but uses O(elements) memory; lwork may overflow int32
"""
try:
return spla.svd(a, full_matrices=full_matrices, compute_uv=compute_uv, overwrite_a=overwrite_a,
check_finite=check_finite, lapack_driver='gesdd')
except ValueError as e:
if 'Too large work array required' in str(e):
return spla.svd(a, full_matrices=full_matrices, compute_uv=compute_uv, overwrite_a=overwrite_a,
check_finite=check_finite, lapack_driver='gesvd')
else:
raise
def _eigh(a):
"""
Only the lower triangle is used. Returns eigenvalues, eigenvectors.
NumPy and SciPy apply different Lapack algorithms:
NumPy uses DC: https://software.intel.com/en-us/mkl-developer-reference-fortran-syevd
SciPy uses RRR: https://software.intel.com/en-us/mkl-developer-reference-fortran-syevr
DC (syevd) is faster but uses O(elements) memory; lwork overflows int32 for dim_a > 32766
"""
return np.linalg.eigh(a) if a.shape[0] <= 32766 else spla.eigh(a)
|
hail-is/hail
|
hail/python/hail/linalg/blockmatrix.py
|
Python
|
mit
| 99,722
|
[
"Gaussian"
] |
cf7d765e0ba54a5c3ca24c1c1a92901e2fc6d08d357ff2fa6de19ea478fcbeb5
|
"""MSAView - PyMol support.
Copyright (c) 2011 Joel Hedlund.
Contact: Joel Hedlund <yohell@ifm.liu.se>
MSAView is a modular, configurable and extensible package for analysing and
visualising multiple sequence alignments and sequence features.
This package provides support for viewing structures using custom
visualisation in PyMol.
If you have problems with this package, please contact the author.
Copyright
=========
The MIT License
Copyright (c) 2011 Joel Hedlund.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__version__ = "0.9.0"
import os
import subprocess
import tempfile
from msaview.action import (Action,
register_action)
from msaview.options import Option
from msaview.preset import (Setting,
TextSetting,
presets)
presets.add_to_preset_path(__file__)
def get_pymol_path():
for dir in os.environ["PATH"].split(os.pathsep):
path = os.path.join(dir, 'pymol')
if os.path.isfile(path) and os.access(path, os.X_OK):
return path
PYMOL_PATH = get_pymol_path()
PYMOL_AVAILABLE = PYMOL_PATH is not None
def fix_indent(text):
lines = []
for line in text.splitlines():
if not lines and not line.strip():
continue
if not lines:
indent = len(line) - len(line.lstrip())
lines.append(line[indent:])
while lines and not lines[-1].strip():
lines.pop(-1)
return '\n'.join(lines) + '\n'
class PymolScriptSetting(TextSetting):
def parse(self, element):
TextSetting.parse(self, element)
if self.value is None:
return
self.value = fix_indent(self.value)
def encode(self, element):
Setting.encode(self, element)
if self.value is not None:
element.text = '\n%s' % self.value
presets.register_type('pymolscript', PymolScriptSetting)
class ShowStructureInPymol(Action):
action_name = 'show-structure-in-pymol'
path = ['External viewer', 'Pymol', 'Show structure for %s']
tooltip = 'Show structure in the Pymol molecular viewer.'
fetch_cmd = fix_indent("""
get fetch_path
python
cmd._pymol.invocation.options.keep_thread_alive = True
if cmd.get('fetch_path') == '.':
cmd.set('fetch_path', %r)
python end
fetch %%s, async=0
""") % tempfile.gettempdir()
@classmethod
def applicable(cls, target, coord=None):
if not PYMOL_AVAILABLE:
return
if not coord or coord.sequence is None:
return
if target.msaview_classname != 'data.msa':
return
entry = target.sequence_information.get_entry('pdb-ids', coord.sequence)
if not (entry and entry.structures):
return
actions = []
for structure in sorted(entry.structures, key=lambda s: s.resolution or 1000000):
a = cls(target, coord)
a.params['structure'] = structure
a.path = list(cls.path)
label = repr(structure.id)
details = []
if structure.resolution is not None:
details.append(str(structure.resolution) + ' A')
if structure.method is not None:
details.append(structure.method)
if structure.chains is not None:
details.append(structure.chains)
if details:
label += ' (%s)' % ', '.join(details)
a.path[-1] %= label
actions.append(a)
return actions
def get_options(self):
return [Option(None, 'pymolscript', 'default', 'default', 'Pymol script', 'How the structure should be visualized in pymol (the preset name of a pymol_script)')]
def set_options(self, options):
Action.set_options(self, options)
self.params['pymolscript'] = presets.get_value('pymolscript:' + self.params['pymolscript'])
def run(self):
structure = self.params['structure']
startup_script = self.fetch_cmd % structure.id
startup_script += self.params['pymolscript']
_buggy_pymol_parser = True
if _buggy_pymol_parser:
argv = sum((['-d', s] for s in startup_script.splitlines()), [PYMOL_PATH])
p = subprocess.Popen(argv, stdin=subprocess.PIPE)
else:
p = subprocess.Popen([PYMOL_PATH, '-p'], stdin=subprocess.PIPE)
p.stdin.write(startup_script)
p.stdin.close()
register_action(ShowStructureInPymol)
|
yohell/msaview
|
msaview_plugin_pymol/__init__.py
|
Python
|
mit
| 5,546
|
[
"PyMOL"
] |
89fd1bcd318beaf97509b44f035fa32e11ed392c57543f5e6baedde083efff50
|
#!/usr/bin/env python
__author__ = "Mike McCann"
__copyright__ = "Copyright 2012, MBARI"
__license__ = "GPL"
__maintainer__ = "Mike McCann"
__email__ = "mccann at mbari.org"
__doc__ = '''
Script to read data from CSV formatted data that Monique makes from the ESP Drifter
platform and write them to netCDF files.
1. CTD
2. ISUS
Use the conventions for Trajectory feature type and write as much metadata as possible.
This script is meant to preserve the data identically as it is reported in the
.csv files. Use ESP positions from the MBARI Tracking database and interpolate
those as necessary to match the measurement data.
Mike McCann
MBARI 15 September 2012
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@author: __author__
@license: __license__
'''
import os
import sys
import csv
import time
import coards
import urllib2
import datetime
import numpy as np
from pupynere import netcdf_file
# Add grandparent dir to pythonpath so that we can see the CANON and toNetCDF modules
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../") )
from CANON.toNetCDF import BaseWriter
class ParserWriter(BaseWriter):
'''
Handle all information needed to parse LR Waveglider CSV files and produce
NetCDF for each of them
'''
#
# Object-level dictionaries of interpolated lat lon for easy lookup by 5-sec measurement data
#
gps_lat = {}
gps_lon = {}
def __init__(self, parentDir):
self.parentDir = parentDir
# For reading from malibu on the Western Flyer
##self.read_gps(url='http://192.168.111.177/trackingdb/position/ESP/between/20120910T000000/20120920T000000/data.csv')
self.read_gps()
def read_gps(self, url='http://odss.mbari.org/trackingdb/position/ESP/between/20120910T000000/20120920T000000/data.csv'):
'''
Read the GPS positions from the .csv response and save in an array for easy lookup for the measurement data
'''
es = []
la = []
lo = []
# Careful - trackingdb returns the records in reverse time order
for r in csv.DictReader(urllib2.urlopen(url)):
es.append(int(round(float(r['epochSeconds']))))
la.append(float(r['latitude']))
lo.append(float(r['longitude']))
print "Read in %d ESP GPS records" % len(es)
# Create lookup to get lat & lon given any epoch second, accurate to integer seconds
# Reverse the order for the numpy lat & lon arrays
esArray = np.arange(es[-1], es[0], 1, dtype='int32') # Reversed
laArray = np.interp(esArray, np.array(es)[::-1], np.array(la)[::-1])
loArray = np.interp(esArray, np.array(es)[::-1], np.array(lo)[::-1])
for esecs, lat, lon in zip(esArray, laArray, loArray):
#print esecs, lat, lon
self.gps_lat[esecs] = lat
self.gps_lon[esecs] = lon
##te = int(es[5]) # Test epoch seconds
##print 'te = ', te
##d = dict(e = te, lat = self.gps_lat[te], lon = self.gps_lon[te])
##print '{e}: self.gps_lat[{e}] = {lat}, self.gps_lon[{e}] = {lon}'.format(**d)
def write_ctd(self, inFile='ESP_ctd.csv', outFile='ESP_ctd.nc'):
'''
Read in records from one of the ESP drifter and write out as NetCDF. The records look like (time is local):
year,month,day,hour,minute,second,temp,sal,chl (calibrated),chl (ini)
2012, 9, 11, 15, 32, 38,15.24,33.34,0.68,2.54
2012, 9, 11, 15, 37, 39,15.29,33.25,0.66,2.44
'''
# Initialize lists for the data to be parsed and written
esec_list = []
lat_list = []
lon_list = []
dep_list = []
tem_list = []
sal_list = []
chl_cal_list = []
chl_ini_list = []
# Read data in from the input file
reader = csv.DictReader(open(os.path.join(self.parentDir, inFile)))
for r in reader:
localDT = datetime.datetime(int(r['year']), int(r['month']), int(r['day']),
int(r['hour']), int(r['minute']), int(r['second']))
##print str(localDT)
es = time.mktime(localDT.timetuple())
esec_list.append(es)
lat_list.append(self.gps_lat[es])
lon_list.append(self.gps_lon[es])
dep_list.append(10.0) # For September 2012 ESP deployment the nominal depth is 10m
tem_list.append(r['temp'])
sal_list.append(r['sal'])
chl_cal_list.append(r['chl (calibrated)'])
chl_ini_list.append(r['chl (ini)'])
# Create the NetCDF file
self.ncFile = netcdf_file(outFile, 'w')
self.outFile = outFile
# Trajectory dataset, time is the only netCDF dimension
self.ncFile.createDimension('time', len(esec_list))
self.time = self.ncFile.createVariable('time', 'float64', ('time',))
self.time.standard_name = 'time'
self.time.units = 'seconds since 1970-01-01'
self.time[:] = esec_list
# Record Variables - coordinates for trajectory - save in the instance and use for metadata generation
self.latitude = self.ncFile.createVariable('latitude', 'float64', ('time',))
self.latitude.long_name = 'LATITUDE'
self.latitude.standard_name = 'latitude'
self.latitude.units = 'degree_north'
self.latitude[:] = lat_list
self.longitude = self.ncFile.createVariable('longitude', 'float64', ('time',))
self.longitude.long_name = 'LONGITUDE'
self.longitude.standard_name = 'longitude'
self.longitude.units = 'degree_east'
self.longitude[:] = lon_list
self.depth = self.ncFile.createVariable('depth', 'float64', ('time',))
self.depth.long_name = 'DEPTH'
self.depth.standard_name = 'depth'
self.depth.units = 'm'
self.depth[:] = dep_list
# Record Variables - CTD Data
temp = self.ncFile.createVariable('TEMP', 'float64', ('time',))
temp.long_name = 'Sea Water Temperature in-situ ITS-90 or IPTS-68 scale'
temp.standard_name = 'sea_water_temperature'
temp.coordinates = 'time depth latitude longitude'
temp.units = 'Celsius'
temp[:] = tem_list
sal = self.ncFile.createVariable('PSAL', 'float64', ('time',))
sal.long_name = 'Sea Water Salinity in-situ PSS 1978 scale'
sal.standard_name = 'sea_water_salinity'
sal.coordinates = 'time depth latitude longitude'
sal[:] = sal_list
chlcal = self.ncFile.createVariable('chl', 'float64', ('time',))
chlcal.long_name = 'Chlorophyll'
chlcal.coordinates = 'time depth latitude longitude'
chlcal.units = '?'
chlcal[:] = chl_cal_list
chlini = self.ncFile.createVariable('chl_ini', 'float64', ('time',))
chlini.long_name = 'Raw Chlorophyll'
chlini.coordinates = 'time depth latitude longitude'
chlini.units = '?'
chlini[:] = chl_ini_list
self.add_global_metadata()
self.ncFile.close()
# End write_gpctd()
def write_isus(self, inFile='ESP_isus.csv', outFile='ESP_isus.nc'):
'''
Read in records from .csv file and write out as NetCDF. Merge with GPS data from MBARI Tracking.
This method builds the NetCDF variables dynamically using the Python 'exec' method.
'''
esec_list = []
lat_list = []
lon_list = []
dep_list = []
isus_vars = [ 'no3'
]
lastEs = 0
reader = csv.DictReader(open(os.path.join(self.parentDir, inFile)))
for r in reader:
localDT = datetime.datetime(int(r['year']), int(r['month']), int(r['day']),
int(r['hour']), int(r['minute']), int(r['second']))
##print str(localDT)
es = time.mktime(localDT.timetuple())
if es <= lastEs:
continue # Must have monotonically increasing time
esec_list.append(es)
lat_list.append(self.gps_lat[es])
lon_list.append(self.gps_lon[es])
dep_list.append(10.0) # For September 2012 ESP deployment the nominal depth is 10m
# This is kind of ridiculous for just one variable
for v in isus_vars:
ncVar = v.replace(' ', '_', 42)
try:
exec "%s_list.append(r['%s'])" % (ncVar, v, )
except NameError:
exec '%s_list = []' % ncVar
exec "%s_list.append(r['%s'])" % (ncVar, v, )
lastEs = es
# Create the NetCDF file
self.ncFile = netcdf_file(outFile, 'w')
self.outFile = outFile
# Trajectory dataset, time is the only netCDF dimension
self.ncFile.createDimension('time', len(esec_list))
self.time = self.ncFile.createVariable('time', 'float64', ('time',))
self.time.standard_name = 'time'
self.time.units = 'seconds since 1970-01-01'
self.time[:] = esec_list
# Record Variables - coordinates for trajectory - save in the instance and use for metadata generation
self.latitude = self.ncFile.createVariable('latitude', 'float64', ('time',))
self.latitude.long_name = 'LATITUDE'
self.latitude.standard_name = 'latitude'
self.latitude.units = 'degree_north'
self.latitude[:] = lat_list
self.longitude = self.ncFile.createVariable('longitude', 'float64', ('time',))
self.longitude.long_name = 'LONGITUDE'
self.longitude.standard_name = 'longitude'
self.longitude.units = 'degree_east'
self.longitude[:] = lon_list
self.depth = self.ncFile.createVariable('depth', 'float64', ('time',))
self.depth.long_name = 'DEPTH'
self.depth.standard_name = 'depth'
self.depth.units = 'm'
self.depth[:] = dep_list
# isus variables
for v in isus_vars:
ncVar = v.replace(' ', '_', 42)
# Only Latitude, Longitude, Depth, and Time variables are upper case to match other Glider data
if v == 'Latitude' or v == 'Longitude':
exec "self.%s = self.ncFile.createVariable('%s', 'float64', ('time',))" % (ncVar.lower(), ncVar.upper(), )
else:
exec "self.%s = self.ncFile.createVariable('%s', 'float64', ('time',))" % (ncVar.lower(), ncVar, )
exec "self.%s.coordinates = 'time depth latitude longitude'" % ncVar.lower()
exec "self.%s.long_name = '%s'" % (ncVar.lower(), v, )
exec "self.%s[:] = %s_list" % (ncVar.lower(), ncVar, )
self.add_global_metadata()
self.ncFile.close()
# End write_isus()
if __name__ == '__main__':
# Accept optional argument of data directory name, e.g. /mbari/Tracking/gliders, otherwise current dir is used
try:
dataDir = sys.argv[1]
except IndexError:
dataDir = '.'
pw = ParserWriter(parentDir=dataDir)
pw.write_ctd()
print "Wrote %s" % pw.outFile
pw.write_isus()
print "Wrote %s" % pw.outFile
|
josephmfaulkner/stoqs
|
stoqs/loaders/CANON/toNetCDF/espDriftToNetcdf.py
|
Python
|
gpl-3.0
| 11,311
|
[
"NetCDF"
] |
8ceb2b3783776956cf2d703346df76724c657ab1f93051a1d39fea25e8640afc
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.simpleapi import CreateSampleWorkspace, CloneWorkspace, GroupWorkspaces, AddSampleLogMultiple, CreateEmptyTableWorkspace
from testhelpers import run_algorithm
from mantid.api import AnalysisDataService, WorkspaceGroup
from scipy.constants import h, m_n, eV
import numpy as np
class CorrectTOFTest(unittest.TestCase):
def setUp(self):
# create sample workspace
self.xmin = 2123.33867005 + 4005.75
self.xmax = 2123.33867005 + 7995.75
self._input_ws = CreateSampleWorkspace(Function="User Defined", UserDefinedFunction="name=LinearBackground, \
A0=0.3;name=Gaussian, PeakCentre=8190, Height=5, Sigma=75", NumBanks=2,
BankPixelWidth=1, XMin=self.xmin, XMax=self.xmax, BinWidth=10.5,
BankDistanceFromSample=4.0, SourceDistanceFromSample=1.4, OutputWorkspace="ws")
lognames = "wavelength,TOF1"
logvalues = "6.0,2123.33867005"
AddSampleLogMultiple(self._input_ws, lognames, logvalues)
# create EPP table
self._table = CreateEmptyTableWorkspace(OutputWorkspace="epptable")
self._table.addColumn(type="double", name="PeakCentre")
table_row = {'PeakCentre': 8189.5}
for i in range(2):
self._table.addRow(table_row)
def tearDown(self):
for wsname in ['ws', 'epptable']:
if AnalysisDataService.doesExist(wsname):
run_algorithm("DeleteWorkspace", Workspace=wsname)
def testCorrection(self):
# tests that correction is done properly
OutputWorkspaceName = "outputws1"
alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
velocity = h/(m_n*6.0e-10)
t_el = 4.0e+6/velocity
t_corr = np.arange(self.xmin, self.xmax + 1.0, 10.5) + t_el - (8189.5 - 2123.33867005)
self.assertTrue(np.allclose(t_corr, wsoutput.readX(0))) #sdd = 4
self.assertTrue(np.allclose(t_corr + t_el, wsoutput.readX(1))) #sdd = 8
run_algorithm("DeleteWorkspace", Workspace=wsoutput)
def testGroup(self):
# tests whether the group of workspaces is accepted as an input
ws2 = CloneWorkspace(self._input_ws)
group = GroupWorkspaces([self._input_ws, ws2])
OutputWorkspaceName = "output_wsgroup"
alg_test = run_algorithm("CorrectTOF", InputWorkspace='group', EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
self.assertTrue(isinstance(wsoutput, WorkspaceGroup))
self.assertEqual(2, wsoutput.getNumberOfEntries())
run_algorithm("DeleteWorkspace", Workspace=group)
run_algorithm("DeleteWorkspace", Workspace=wsoutput)
def testConvertUnits(self):
# test whether CorrectTof+ConvertUnits+ConvertToDistribution will give the same result as TOFTOFConvertTOFToDeltaE
OutputWorkspaceName = "outputws1"
alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wscorr = AnalysisDataService.retrieve(OutputWorkspaceName)
# convert units, convert to distribution
alg_cu = run_algorithm("ConvertUnits", InputWorkspace=wscorr, Target='DeltaE', EMode='Direct', EFixed=2.27, OutputWorkspace=OutputWorkspaceName+'_dE')
ws_dE = AnalysisDataService.retrieve(OutputWorkspaceName+'_dE')
alg_cd = run_algorithm("ConvertToDistribution", Workspace=ws_dE)
# create reference data for X axis
tof1 = 2123.33867005
dataX = self._input_ws.readX(0) - tof1
tel = 8189.5 - tof1
factor = m_n*1e+15/eV
newX = 0.5*factor*16.0*(1/tel**2 - 1/dataX**2)
# compare
# self.assertEqual(newX[0], ws_dE.readX(0)[0])
self.assertTrue(np.allclose(newX, ws_dE.readX(0), atol=0.01))
# create reference data for Y axis and compare to the output
tof = dataX[:-1] + 5.25
newY = self._input_ws.readY(0)*tof**3/(factor*10.5*16.0)
# compare
self.assertTrue(np.allclose(newY, ws_dE.readY(0), rtol=0.01))
run_algorithm("DeleteWorkspace", Workspace=ws_dE)
run_algorithm("DeleteWorkspace", Workspace=wscorr)
if __name__ == "__main__":
unittest.main()
|
mganeva/mantid
|
Framework/PythonInterface/test/python/plugins/algorithms/CorrectTOFTest.py
|
Python
|
gpl-3.0
| 5,020
|
[
"Gaussian"
] |
392a52d82e1c6d7620aa86cf49ab8429fe666752d9debb210ceeed865650bcba
|
from threading import Lock
class ResponsibleGenerator(object):
"""A generator that will help clean up when it is done being used."""
__slots__ = ['cleanup', 'gen']
def __init__(self, gen, cleanup):
self.cleanup = cleanup
self.gen = gen
def __del__(self):
self.cleanup()
def __iter__(self):
return self
def next(self):
return self.gen.next()
class ConcurrentStore(object):
def __init__(self, store):
self.store = store
# number of calls to visit still in progress
self.__visit_count = 0
# lock for locking down the indices
self.__lock = Lock()
# lists for keeping track of added and removed triples while
# we wait for the lock
self.__pending_removes = []
self.__pending_adds = []
def add(self, triple):
(s, p, o) = triple
if self.__visit_count == 0:
self.store.add((s, p, o))
else:
self.__pending_adds.append((s, p, o))
def remove(self, triple):
(s, p, o) = triple
if self.__visit_count == 0:
self.store.remove((s, p, o))
else:
self.__pending_removes.append((s, p, o))
def triples(self, triple):
(su, pr, ob) = triple
g = self.store.triples((su, pr, ob))
pending_removes = self.__pending_removes
self.__begin_read()
for s, p, o in ResponsibleGenerator(g, self.__end_read):
if not (s, p, o) in pending_removes:
yield s, p, o
for (s, p, o) in self.__pending_adds:
if (su is None or su == s) \
and (pr is None or pr == p) \
and (ob is None or ob == o):
yield s, p, o
def __len__(self):
return self.store.__len__()
def __begin_read(self):
lock = self.__lock
lock.acquire()
self.__visit_count = self.__visit_count + 1
lock.release()
def __end_read(self):
lock = self.__lock
lock.acquire()
self.__visit_count = self.__visit_count - 1
if self.__visit_count == 0:
pending_removes = self.__pending_removes
while pending_removes:
(s, p, o) = pending_removes.pop()
try:
self.store.remove((s, p, o))
except:
# TODO: change to try finally?
print s, p, o, "Not in store to remove"
pending_adds = self.__pending_adds
while pending_adds:
(s, p, o) = pending_adds.pop()
self.store.add((s, p, o))
lock.release()
|
gloaec/trifle
|
src/rdflib/plugins/stores/concurrent.py
|
Python
|
gpl-3.0
| 2,686
|
[
"VisIt"
] |
fedc78b7156b6ec561277e1971ce86ce3c278e856b86a6ff1cd6d9d0ae2c0537
|
from pyibex.thickset import *
from pyibex import Interval, IntervalVector
from vibes import vibes
def ex_circle():
t = ThickDisk(0,0,2)
X0 = IntervalVector(2, [-5,5])
# P = ThickPaving(X0, lambda x: opNot(t.test(x)), 0.05)
P = ThickPaving(X0, t, 0.05)
P.visit(ToVibes(1000, "Disk"))
t2 = ThickTranslateInPaving(P, IntervalVector([[0,0], [0,1]]))
X1 = IntervalVector([[4.6,4.7], [2.8,2.9]])
vibes.drawBox(X1[0][0], X1[0][1], X1[1][0], X1[1][1], 'y')
vibes.drawBox(X1[0][0]+1, X1[0][1]+1, X1[1][0]+1, X1[1][1]+1, 'g')
vibes.drawBox(X1[0][0]+1, X1[0][1]+1, X1[1][0]+1, X1[1][1]+1, 'g')
vibes.drawBox(5, 5, X0[1][0]+1, X0[1][1]+1, 'orange')
vibes.axisAuto()
P2 = ThickPaving(X0.inflate(10), t2, 0.1)
P2.visit(ToVibes(1000, "Disk translated"))
vibes.setFigurePos(500,10)
if __name__ == '__main__':
vibes.beginDrawing()
ex_circle()
vibes.endDrawing()
|
benEnsta/pyIbex
|
pyibex/thickset/examples/ex_translate.py
|
Python
|
lgpl-3.0
| 889
|
[
"VisIt"
] |
9699be0fef675a756eb7c88a82a4407ad0e65921eecab7e7a94b7cd31f7fa931
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(
psdf.index.to_frame(index=False, name="a"),
pdf.index.to_frame(index=False, name="a"),
)
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
# index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
else:
expected = pdf
expected.index.name = "index2"
expected.columns.name = "cols2"
result = psdf.rename_axis(
index={"index": "index2"}, columns={"cols": "cols2"}
).sort_index()
self.assert_eq(expected, result)
expected.index.name = "index"
expected.columns.name = "cols"
result = psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index()
self.assert_eq(expected, result)
expected.index.name = "INDEX"
expected.columns.name = "COLS"
result = psdf.rename_axis(index=str.upper, columns=str.upper).sort_index()
self.assert_eq(expected, result)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
# index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3"}, columns={"cols1": "cols3"}
).sort_index(),
psdf.rename_axis(
index={"index1": "index3"}, columns={"cols1": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
else:
expected = pdf
expected.index.names = ["index3", "index2"]
expected.columns.names = ["cols3", "cols2"]
result = psdf.rename_axis(
index={"index1": "index3"}, columns={"cols1": "cols3"}
).sort_index()
self.assert_eq(expected, result)
expected.index.names = ["index1", "index2"]
expected.columns.names = ["cols1", "cols2"]
result = psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index()
self.assert_eq(expected, result)
expected.index.names = ["index3", "index4"]
expected.columns.names = ["cols3", "cols4"]
result = psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index()
self.assert_eq(expected, result)
expected.index.names = ["INDEX1", "INDEX2"]
expected.columns.names = ["COLS1", "COLS2"]
result = psdf.rename_axis(index=str.upper, columns=str.upper).sort_index()
self.assert_eq(expected, result)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
# droplevel is new in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(
pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1)
)
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
else:
expected = pdf.copy()
expected.index = expected.index.droplevel("a")
self.assert_eq(expected, psdf.droplevel("a"))
self.assert_eq(expected, psdf.droplevel(["a"]))
self.assert_eq(expected, psdf.droplevel(("a",)))
self.assert_eq(expected, psdf.droplevel(0))
expected = pdf.copy()
expected.index = expected.index.droplevel(-1)
self.assert_eq(expected, psdf.droplevel(-1))
expected = pdf.copy()
expected.columns = expected.columns.droplevel("level_1")
self.assert_eq(expected, psdf.droplevel("level_1", axis=1))
self.assert_eq(expected, psdf.droplevel(["level_1"], axis=1))
self.assert_eq(expected, psdf.droplevel(("level_1",), axis=1))
self.assert_eq(expected, psdf.droplevel(0, axis=1))
expected = pdf.copy()
expected.columns = expected.columns.droplevel(-1)
self.assert_eq(expected, psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
# droplevel is new in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
else:
expected = pdf.copy()
expected.index = expected.index.droplevel([("a", 10)])
self.assert_eq(expected, psdf.droplevel([("a", 10)]))
expected = pdf.copy()
expected.columns = expected.columns.droplevel([("level", 1)])
self.assert_eq(expected, psdf.droplevel([("level", 1)], axis=1))
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
# droplevel is new in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
else:
expected = pdf.copy()
expected.index = expected.index.droplevel(10.0)
self.assert_eq(expected, psdf.droplevel(10.0))
self.assert_eq(expected, psdf.droplevel([10.0]))
self.assert_eq(expected, psdf.droplevel((10.0,)))
self.assert_eq(expected, psdf.droplevel(0))
expected = pdf.copy()
expected.index = expected.index.droplevel(-1)
self.assert_eq(expected, psdf.droplevel(-1))
expected = pdf.copy()
expected.columns = expected.columns.droplevel(100.0)
self.assert_eq(expected, psdf.droplevel(100.0, axis=1))
self.assert_eq(expected, psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
if sys.version_info >= (3, 6):
# flaky in Python 3.5.
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
# pd.DataFrame.merge with Series is implemented since version 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"),
right_psser,
right_ps,
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = pd.DataFrame({"a": [2], "b": [1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Subtraction can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"])
ks_err_msg = "Subtraction can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1)
psdf = ps.DataFrame({"a": ["x"], "b": ["y"]})
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
def test_binary_operator_truediv(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "True division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"])
ks_err_msg = "True division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"])
def test_binary_operator_floordiv(self):
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Floor division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"])
ks_err_msg = "Floor division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"])
def test_binary_operator_mod(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Modulo can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal")
ks_err_msg = "Modulo can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"])
def test_binary_operator_multiply(self):
# Positive
pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"])
self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"])
self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"])
self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"])
self.assert_eq(psdf["a"] * 2, pdf["a"] * 2)
self.assert_eq(psdf["b"] * 2, pdf["b"] * 2)
self.assert_eq(2 * psdf["a"], 2 * pdf["a"])
self.assert_eq(2 * psdf["b"], 2 * pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [2]})
ks_err_msg = "Multiplication can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1)
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"])
def test_sample(self):
pdf = pd.DataFrame({"A": [0, 2, 4]})
psdf = ps.from_pandas(pdf)
# Make sure the tests run, but we can't check the result because they are non-deterministic.
psdf.sample(frac=0.1)
psdf.sample(frac=0.2, replace=True)
psdf.sample(frac=0.2, random_state=5)
psdf["A"].sample(frac=0.2)
psdf["A"].sample(frac=0.2, replace=True)
psdf["A"].sample(frac=0.2, random_state=5)
with self.assertRaises(ValueError):
psdf.sample()
with self.assertRaises(NotImplementedError):
psdf.sample(n=1)
def test_add_prefix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
def test_add_suffix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
def test_join(self):
# check basic function
pdf1 = pd.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
pdf2 = pd.DataFrame(
{"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"]
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# join with duplicated columns in Series
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A")
psdf1.join(ks1, how="outer")
# join with duplicated columns in DataFrame
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
psdf1.join(psdf2, how="outer")
# check `on` parameter
join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index("key").join(
pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index("key").join(
psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index columns
columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")])
columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")])
pdf1.columns = columns1
pdf2.columns = columns2
psdf1.columns = columns1
psdf2.columns = columns2
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# check `on` parameter
join_pdf = pdf1.join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index(("x", "key")).join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index(("x", "key")).join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index
midx1 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"]
)
midx2 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf1.index = midx1
pdf2.index = midx2
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
with self.assertRaisesRegex(
ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"'
):
psdf1.join(psdf2, on=["index1"], rsuffix="_right")
def test_replace(self):
pdf = pd.DataFrame(
{
"name": ["Ironman", "Captain America", "Thor", "Hulk"],
"weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"],
},
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only for method='pad"
):
psdf.replace(method="bfill")
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only when limit=None"
):
psdf.replace(limit=10)
with self.assertRaisesRegex(
NotImplementedError, "replace currently doesn't supports regex"
):
psdf.replace(regex="")
with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"):
psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"])
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace("Ironman", lambda x: "Spiderman")
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace(lambda x: "Ironman", "Spiderman")
self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman"))
self.assert_eq(
psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
)
self.assert_eq(
psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
)
# inplace
pser = pdf.name
psser = psdf.name
pdf.replace("Ironman", "Spiderman", inplace=True)
psdf.replace("Ironman", "Spiderman", inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
)
self.assert_eq(
psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e"))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
)
self.assert_eq(
psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("Y", "C"): ["a", None]}, "e"),
pdf.replace({("Y", "C"): ["a", None]}, "e"),
)
def test_update(self):
# check base function
def get_data(left_columns=None, right_columns=None):
left_pdf = pd.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"]
)
right_pdf = pd.DataFrame(
{"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]},
columns=["B", "C"],
)
left_psdf = ps.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"]
)
right_psdf = ps.DataFrame(
{"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"]
)
if left_columns is not None:
left_pdf.columns = left_columns
left_psdf.columns = left_columns
if right_columns is not None:
right_pdf.columns = right_columns
right_psdf.columns = right_columns
return left_psdf, left_pdf, right_psdf, right_pdf
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
pser = left_pdf.B
psser = left_psdf.B
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
self.assert_eq(psser.sort_index(), pser.sort_index())
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
with self.assertRaises(NotImplementedError):
left_psdf.update(right_psdf, join="right")
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
def test_pivot_table_dtypes(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Skip columns comparison by reset_index
res_df = psdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
exp_df = pdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
self.assert_eq(res_df, exp_df)
# Results don't have the same column's name
# Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes,
# pdf.pivot_table(columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes)
def test_pivot_table(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [10, 20, 20, 40, 20, 40],
"c": [1, 2, 9, 4, 7, 4],
"d": [-1, -2, -3, -4, -5, -6],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Checking if both DataFrames have the same results
self.assert_eq(
psdf.pivot_table(columns="a", values="b").sort_index(),
pdf.pivot_table(columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", fill_value=999
).sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(),
pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e"), ("w", "d")],
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")],
columns=("x", "a"),
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
almost=True,
)
def test_pivot_table_and_index(self):
# https://github.com/databricks/koalas/issues/805
pdf = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
ptable = pdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
ktable = psdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
self.assert_eq(ktable, ptable)
self.assert_eq(ktable.index, ptable.index)
self.assert_eq(repr(ktable.index), repr(ptable.index))
def test_stack(self):
pdf_single_level_cols = pd.DataFrame(
[[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
)
psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols)
self.assert_eq(
psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index()
)
multicol1 = pd.MultiIndex.from_tuples(
[("weight", "kg"), ("weight", "pounds")], names=["x", "y"]
)
pdf_multi_level_cols1 = pd.DataFrame(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
)
psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1)
self.assert_eq(
psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index()
)
multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
pdf_multi_level_cols2 = pd.DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2
)
psdf_multi_level_cols2 = ps.from_pandas(pdf_multi_level_cols2)
self.assert_eq(
psdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index()
)
pdf = pd.DataFrame(
{
("y", "c"): [True, True],
("x", "b"): [False, False],
("x", "c"): [True, False],
("y", "a"): [False, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.stack().sort_index(), pdf.stack().sort_index())
self.assert_eq(psdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True)
def test_unstack(self):
pdf = pd.DataFrame(
np.random.randn(3, 3),
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True)
self.assert_eq(
psdf.unstack().unstack().sort_index(), pdf.unstack().unstack().sort_index(), almost=True
)
def test_pivot_errors(self):
psdf = ps.range(10)
with self.assertRaisesRegex(ValueError, "columns should be set"):
psdf.pivot(index="id")
with self.assertRaisesRegex(ValueError, "values should be set"):
psdf.pivot(index="id", columns="id")
def test_pivot_table_errors(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.pivot_table(index=["c"], columns="a", values=5))
msg = "index should be a None or a list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index="c", columns="a", values="b")
msg = "pivot_table doesn't support aggfunc as dict and without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"})
msg = "columns should be one column name."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"})
msg = "Columns in aggfunc must be the same as values."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"}
)
msg = "values can't be a list without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"])
msg = "Wrong columns A."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
)
msg = "values should be one column or list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values=(["b"], ["c"]))
msg = "aggfunc must be a dict mapping from column name to aggregate functions"
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values="b", aggfunc={"a": lambda x: sum(x)})
psdf = ps.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(
index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"}
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"})
def test_transpose(self):
# TODO: what if with random index?
pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"])
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]},
columns=["score", "kids", "age"],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
pdf3 = pd.DataFrame(
{
("cg1", "a"): [1, 2, 3],
("cg1", "b"): [4, 5, 6],
("cg2", "c"): [7, 8, 9],
("cg3", "d"): [9, 9, 9],
},
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
def _test_cummin(self, pdf, psdf):
self.assert_eq(pdf.cummin(), psdf.cummin())
self.assert_eq(pdf.cummin(skipna=False), psdf.cummin(skipna=False))
self.assert_eq(pdf.cummin().sum(), psdf.cummin().sum())
def test_cummin(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def test_cummin_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def _test_cummax(self, pdf, psdf):
self.assert_eq(pdf.cummax(), psdf.cummax())
self.assert_eq(pdf.cummax(skipna=False), psdf.cummax(skipna=False))
self.assert_eq(pdf.cummax().sum(), psdf.cummax().sum())
def test_cummax(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def test_cummax_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def _test_cumsum(self, pdf, psdf):
self.assert_eq(pdf.cumsum(), psdf.cumsum())
self.assert_eq(pdf.cumsum(skipna=False), psdf.cumsum(skipna=False))
self.assert_eq(pdf.cumsum().sum(), psdf.cumsum().sum())
def test_cumsum(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def test_cumsum_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def _test_cumprod(self, pdf, psdf):
self.assert_eq(pdf.cumprod(), psdf.cumprod(), almost=True)
self.assert_eq(pdf.cumprod(skipna=False), psdf.cumprod(skipna=False), almost=True)
self.assert_eq(pdf.cumprod().sum(), psdf.cumprod().sum(), almost=True)
def test_cumprod(self):
pdf = pd.DataFrame(
[[2.0, 1.0, 1], [5, None, 2], [1.0, -1.0, -3], [2.0, 0, 4], [4.0, 9.0, 5]],
columns=list("ABC"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_cumprod_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_drop_duplicates(self):
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
# inplace is False
for keep in ["first", "last", False]:
with self.subTest(keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates("a", keep=keep).sort_index(),
psdf.drop_duplicates("a", keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
psdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
# inplace is False
for keep in ["first", "last", False]:
with self.subTest("multi-index columns", keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
psdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
psdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
)
# inplace is True
subset_list = [None, "a", ["a", "b"]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
pser = pdf.a
psser = psdf.a
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# multi-index columns, inplace is True
subset_list = [None, ("x", "a"), [("x", "a"), ("y", "b")]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
pser = pdf[("x", "a")]
psser = psdf[("x", "a")]
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 2, 2, 3], 20: ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.drop_duplicates(10, keep=keep).sort_index(),
psdf.drop_duplicates(10, keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([10, 20], keep=keep).sort_index(),
psdf.drop_duplicates([10, 20], keep=keep).sort_index(),
)
def test_reindex(self):
index = pd.Index(["A", "B", "C", "D", "E"])
columns = pd.Index(["numbers"])
pdf = pd.DataFrame([1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns = pd.Index(["numbers"], name="cols")
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "B"]).sort_index(), psdf.reindex(index=["A", "B"]).sort_index()
)
self.assert_eq(
pdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
psdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
psdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"]).sort_index(),
psdf.reindex(columns=["numbers"]).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"], copy=True).sort_index(),
psdf.reindex(columns=["numbers"], copy=True).sort_index(),
)
# Using float as fill_value to avoid int64/32 clash
self.assert_eq(
pdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
psdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"])
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
# Reindexing single Index on single Index
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = pd.DataFrame({"index2": ["A", "C", "D", "E", "0"]}).set_index("index2").index
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
# Reindexing MultiIndex on single Index
pindex = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("F", "G")], names=["name1", "name2"]
)
kindex = ps.from_pandas(pindex)
self.assert_eq(
pdf.reindex(index=pindex, fill_value=0.0).sort_index(),
psdf.reindex(index=kindex, fill_value=0.0).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=2))
self.assertRaises(TypeError, lambda: psdf.reindex(columns="numbers"))
self.assertRaises(TypeError, lambda: psdf.reindex(index=["A", "B", "C"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(index=123))
# Reindexing MultiIndex on MultiIndex
pdf = pd.DataFrame({"numbers": [1.0, 2.0, None]}, index=pindex)
psdf = ps.from_pandas(pdf)
pindex2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name1", "name2"]
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = (
pd.DataFrame({"index_level_1": ["A", "C", "I"], "index_level_2": ["G", "D", "J"]})
.set_index(["index_level_1", "index_level_2"])
.index
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", "numbers")], names=["cols1", "cols2"])
pdf.columns = columns
psdf.columns = columns
# Reindexing MultiIndex index on MultiIndex columns and MultiIndex index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
index = pd.Index(["A", "B", "C", "D", "E"])
pdf = pd.DataFrame(data=[1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
# Reindexing single Index on MultiIndex columns and single Index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
psdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
)
columns2 = pd.MultiIndex.from_tuples(
[("X", "numbers"), ("Y", "2"), ("Y", "3")], names=["cols3", "cols4"]
)
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["X"]))
self.assertRaises(ValueError, lambda: psdf.reindex(columns=[("X",)]))
def test_reindex_like(self):
data = [[1.0, 2.0], [3.0, None], [None, 4.0]]
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
# Reindexing single Index on single Index
data2 = [[5.0, None], [6.0, 7.0], [8.0, None]]
index2 = pd.Index(["A", "C", "D"], name="index2")
columns2 = pd.Index(["numbers", "F"], name="cols2")
pdf2 = pd.DataFrame(data=data2, index=index2, columns=columns2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
pdf2 = pd.DataFrame({"index_level_1": ["A", "C", "I"]})
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2.set_index(["index_level_1"])).sort_index(),
psdf.reindex_like(psdf2.set_index(["index_level_1"])).sort_index(),
)
# Reindexing MultiIndex on single Index
index2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name3", "name4"]
)
pdf2 = pd.DataFrame(data=data2, index=index2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex_like(index2))
self.assertRaises(AssertionError, lambda: psdf2.reindex_like(psdf))
# Reindexing MultiIndex on MultiIndex
columns2 = pd.MultiIndex.from_tuples(
[("numbers", "third"), ("values", "second")], names=["cols3", "cols4"]
)
pdf2.columns = columns2
psdf2.columns = columns2
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["name1", "name2"]
)
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
def test_melt(self):
pdf = pd.DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6], "C": [7, 8, 9]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars="A").sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars="A").sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["C"])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=["A"], value_vars=["C"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname")
.sort_values(["myVarname", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname"
).sort_values(["myVarname", "myValname"]),
)
self.assert_eq(
psdf.melt(value_vars=("A", "B"))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assertRaises(KeyError, lambda: psdf.melt(id_vars="Z"))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars="Z"))
# multi-index columns
if LooseVersion("0.24") <= LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
# pandas >=0.24,<1.0 doesn't support mixed int/str columns in melt.
# see: https://github.com/pandas-dev/pandas/pull/29792
TEN = "10"
TWELVE = "20"
else:
TEN = 10.0
TWELVE = 20.0
columns = pd.MultiIndex.from_tuples([(TEN, "A"), (TEN, "B"), (TWELVE, "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["variable_0", "variable_1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable_0", "variable_1", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.rename(columns=name_like_string),
)
columns.names = ["v0", "v1"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["v0", "v1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["v0", "v1", "value"]),
)
self.assertRaises(ValueError, lambda: psdf.melt(id_vars=(TEN, "A")))
self.assertRaises(ValueError, lambda: psdf.melt(value_vars=(TEN, "A")))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[TEN]))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[(TWELVE, "A")]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[TWELVE]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[(TWELVE, "A")]))
# non-string names
pdf.columns = [10.0, 20.0, 30.0]
psdf.columns = [10.0, 20.0, 30.0]
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=10.0).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=10.0).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0, 20.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0, 20.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0], value_vars=[30.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0], value_vars=[30.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(value_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
def test_all(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.all(), pdf.all())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.all(axis=1)
def test_any(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.any(), pdf.any())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.any(axis=1)
def test_rank(self):
pdf = pd.DataFrame(
data={"col1": [1, 2, 3, 1], "col2": [3, 4, 3, 1]},
columns=["col1", "col2"],
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
self.assert_eq(pdf.rank().sum(), psdf.rank().sum())
self.assert_eq(
pdf.rank(ascending=False).sort_index(), psdf.rank(ascending=False).sort_index()
)
self.assert_eq(pdf.rank(method="min").sort_index(), psdf.rank(method="min").sort_index())
self.assert_eq(pdf.rank(method="max").sort_index(), psdf.rank(method="max").sort_index())
self.assert_eq(
pdf.rank(method="first").sort_index(), psdf.rank(method="first").sort_index()
)
self.assert_eq(
pdf.rank(method="dense").sort_index(), psdf.rank(method="dense").sort_index()
)
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
psdf.rank(method="nothing")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "col1"), ("y", "col2")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
def test_round(self):
pdf = pd.DataFrame(
{
"A": [0.028208, 0.038683, 0.877076],
"B": [0.992815, 0.645646, 0.149370],
"C": [0.173891, 0.577595, 0.491027],
},
columns=["A", "B", "C"],
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
pser = pd.Series([1, 0, 2], index=["A", "B", "C"])
psser = ps.Series([1, 0, 2], index=["A", "B", "C"])
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(pdf.round({"A": 1, "C": 2}), psdf.round({"A": 1, "C": 2}))
self.assert_eq(pdf.round({"A": 1, "D": 2}), psdf.round({"A": 1, "D": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
msg = "decimals must be an integer, a dict-like or a Series"
with self.assertRaisesRegex(TypeError, msg):
psdf.round(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
pser = pd.Series([1, 0, 2], index=columns)
psser = ps.Series([1, 0, 2], index=columns)
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(
pdf.round({("X", "A"): 1, ("Y", "C"): 2}), psdf.round({("X", "A"): 1, ("Y", "C"): 2})
)
self.assert_eq(pdf.round({("X", "A"): 1, "Y": 2}), psdf.round({("X", "A"): 1, "Y": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
# non-string names
pdf = pd.DataFrame(
{
10: [0.028208, 0.038683, 0.877076],
20: [0.992815, 0.645646, 0.149370],
30: [0.173891, 0.577595, 0.491027],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.round({10: 1, 30: 2}), psdf.round({10: 1, 30: 2}))
def test_shift(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
self.assert_eq(pdf.shift().sum().astype(int), psdf.shift().sum())
# Need the expected result since pandas 0.23 does not support `fill_value` argument.
pdf1 = pd.DataFrame(
{"Col1": [0, 0, 0, 10, 20], "Col2": [0, 0, 0, 13, 23], "Col3": [0, 0, 0, 17, 27]},
index=pdf.index,
)
self.assert_eq(pdf1, psdf.shift(periods=3, fill_value=0))
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.shift(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
def test_diff(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.diff(), psdf.diff())
self.assert_eq(pdf.diff().diff(-1), psdf.diff().diff(-1))
self.assert_eq(pdf.diff().sum().astype(int), psdf.diff().sum())
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.diff(1.5)
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.diff(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.diff(), psdf.diff())
def test_duplicated(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 3], "b": [1, 1, 1, 4], "c": [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(keep="last").sort_index(),
psdf.duplicated(keep="last").sort_index(),
)
self.assert_eq(
pdf.duplicated(keep=False).sort_index(),
psdf.duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset="b").sort_index(),
psdf.duplicated(subset="b").sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=["b"]).sort_index(),
psdf.duplicated(subset=["b"]).sort_index(),
)
with self.assertRaisesRegex(ValueError, "'keep' only supports 'first', 'last' and False"):
psdf.duplicated(keep="false")
with self.assertRaisesRegex(KeyError, "'d'"):
psdf.duplicated(subset=["d"])
pdf.index.name = "x"
psdf.index.name = "x"
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
# multi-index
self.assert_eq(
pdf.set_index("a", append=True).duplicated().sort_index(),
psdf.set_index("a", append=True).duplicated().sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
psdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
psdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
)
# mutli-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=("x", "b")).sort_index(),
psdf.duplicated(subset=("x", "b")).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=[("x", "b")]).sort_index(),
psdf.duplicated(subset=[("x", "b")]).sort_index(),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 1, 2, 3], 20: [1, 1, 1, 4], 30: [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=10).sort_index(),
psdf.duplicated(subset=10).sort_index(),
)
def test_ffill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.ffill(), pdf.ffill())
self.assert_eq(psdf.ffill(limit=1), pdf.ffill(limit=1))
pser = pdf.y
psser = psdf.y
psdf.ffill(inplace=True)
pdf.ffill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[2]], pser[idx[2]])
def test_bfill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.bfill(), pdf.bfill())
self.assert_eq(psdf.bfill(limit=1), pdf.bfill(limit=1))
pser = pdf.x
psser = psdf.x
psdf.bfill(inplace=True)
pdf.bfill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[0]], pser[idx[0]])
def test_filter(self):
pdf = pd.DataFrame(
{
"aa": ["aa", "bd", "bc", "ab", "ce"],
"ba": [1, 2, 3, 4, 5],
"cb": [1.0, 2.0, 3.0, 4.0, 5.0],
"db": [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index("aa")
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
with option_context("compute.isin_limit", 0):
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=["ba", "db"], axis=1).sort_index(),
pdf.filter(items=["ba", "db"], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
pdf = pdf.set_index("ba", append=True)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
pdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
)
with self.assertRaisesRegex(TypeError, "Unsupported type list"):
psdf.filter(items=[["aa", 1], ("bd", 2)], axis=0)
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
psdf.filter(items=[(), ("bd", 2)], axis=0)
self.assert_eq(psdf.filter(like="b", axis=0), pdf.filter(like="b", axis=0))
self.assert_eq(psdf.filter(regex="b.*", axis=0), pdf.filter(regex="b.*", axis=0))
with self.assertRaisesRegex(ValueError, "items should be a list-like object"):
psdf.filter(items="b")
with self.assertRaisesRegex(ValueError, "No axis named"):
psdf.filter(regex="b.*", axis=123)
with self.assertRaisesRegex(TypeError, "Must pass either `items`, `like`"):
psdf.filter()
with self.assertRaisesRegex(TypeError, "mutually exclusive"):
psdf.filter(regex="b.*", like="aaa")
# multi-index columns
pdf = pd.DataFrame(
{
("x", "aa"): ["aa", "ab", "bc", "bd", "ce"],
("x", "ba"): [1, 2, 3, 4, 5],
("y", "cb"): [1.0, 2.0, 3.0, 4.0, 5.0],
("z", "db"): [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index(("x", "aa"))
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
pdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
def test_pipe(self):
psdf = ps.DataFrame(
{"category": ["A", "A", "B"], "col1": [1, 2, 3], "col2": [4, 5, 6]},
columns=["category", "col1", "col2"],
)
self.assertRaisesRegex(
ValueError,
"arg is both the pipe target and a keyword argument",
lambda: psdf.pipe((lambda x: x, "arg"), arg="1"),
)
def test_transform(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=2).sort_index(),
pdf.transform(lambda x, y: x + y, y=2).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=1).sort_index(),
pdf.transform(lambda x, y: x + y, y=1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.transform(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
def test_apply(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.apply(1)
with self.assertRaisesRegex(TypeError, "The given function.*1 or 'column'; however"):
def f1(_) -> ps.DataFrame[int]:
pass
psdf.apply(f1, axis=0)
with self.assertRaisesRegex(TypeError, "The given function.*0 or 'index'; however"):
def f2(_) -> ps.Series[int]:
pass
psdf.apply(f2, axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
def test_apply_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.apply(identify1, axis=1)
expected = pdf.apply(identify1, axis=1)
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.apply(identify2, axis=1)
expected = pdf.apply(identify2, axis=1)
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_apply_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, a: pdf + a, args=(1,)).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, b: pdf + b, b=1).sort_index(),
(pdf + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.apply_batch(1)
with self.assertRaisesRegex(TypeError, "The given function.*frame as its type hints"):
def f2(_) -> ps.Series[int]:
pass
psdf.pandas_on_spark.apply_batch(f2)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.apply_batch(lambda pdf: 1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_apply_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.apply_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.apply_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]},
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
def identify3(x) -> ps.DataFrame[float, [int, List[int]]]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify3)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
# For NumPy typing, NumPy version should be 1.21+ and Python version should be 3.8+
if sys.version_info >= (3, 8) and LooseVersion(np.__version__) >= LooseVersion("1.21"):
import numpy.typing as ntp
psdf = ps.from_pandas(pdf)
def identify4(x) -> ps.DataFrame[float, [int, ntp.NDArray[int]]]: # type: ignore
return x
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
def test_transform_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.c + 1).sort_index(),
(pdf.c + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b + 1).sort_index(),
(pdf.b + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.transform_batch(1)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.transform_batch(lambda pdf: 1)
with self.assertRaisesRegex(
ValueError, "transform_batch cannot produce aggregated results"
):
psdf.pandas_on_spark.transform_batch(lambda pdf: pd.Series(1))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_transform_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.transform_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.transform_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_transform_batch_same_anchor(self):
psdf = ps.range(10)
psdf["d"] = psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.id + 1)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(pdf) -> ps.Series[np.int64]:
return pdf.id + 1
psdf["d"] = psdf.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(ser) -> ps.Series[np.int64]:
return ser + 1
psdf["d"] = psdf.id.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
def test_empty_timestamp(self):
pdf = pd.DataFrame(
{
"t": [
datetime(2019, 1, 1, 0, 0, 0),
datetime(2019, 1, 2, 0, 0, 0),
datetime(2019, 1, 3, 0, 0, 0),
]
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf[psdf["t"] != psdf["t"]], pdf[pdf["t"] != pdf["t"]])
self.assert_eq(psdf[psdf["t"] != psdf["t"]].dtypes, pdf[pdf["t"] != pdf["t"]].dtypes)
def test_to_spark(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(ValueError, "'index_col' cannot be overlapped"):
psdf.to_spark(index_col="a")
with self.assertRaisesRegex(ValueError, "length of index columns.*1.*3"):
psdf.to_spark(index_col=["x", "y", "z"])
def test_keys(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.keys(), pdf.keys())
def test_quantile(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
self.assert_eq(psdf.loc[[]].quantile(0.5), pdf.loc[[]].quantile(0.5))
self.assert_eq(
psdf.loc[[]].quantile([0.25, 0.5, 0.75]), pdf.loc[[]].quantile([0.25, 0.5, 0.75])
)
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.quantile(0.5, axis=1)
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
psdf.quantile(accuracy="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q=["a"])
with self.assertRaisesRegex(
ValueError, r"percentiles should all be in the interval \[0, 1\]"
):
psdf.quantile(q=[1.1])
self.assert_eq(
psdf.quantile(0.5, numeric_only=False), pdf.quantile(0.5, numeric_only=False)
)
self.assert_eq(
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
pdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
)
# multi-index column
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
pdf = pd.DataFrame({"x": ["a", "b", "c"]})
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
else:
self.assert_eq(psdf.quantile(0.5), pd.Series(name=0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pd.DataFrame(index=[0.25, 0.5, 0.75]))
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile(0.5, numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False)
def test_pct_change(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [300, 200, 400, 200]},
index=np.random.rand(4),
)
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.pct_change(2), pdf.pct_change(2), check_exact=False)
self.assert_eq(psdf.pct_change().sum(), pdf.pct_change().sum(), check_exact=False)
def test_where(self):
pdf, psdf = self.df_pair
# pandas requires `axis` argument when the `other` is Series.
# `axis` is not fully supported yet in pandas-on-Spark.
self.assert_eq(
psdf.where(psdf > 2, psdf.a + 10, axis=0), pdf.where(pdf > 2, pdf.a + 10, axis=0)
)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.where(1)
def test_mask(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.mask(1)
def test_query(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2), "C": range(10, 5, -1)})
psdf = ps.from_pandas(pdf)
exprs = ("A > B", "A < C", "C == B")
for expr in exprs:
self.assert_eq(psdf.query(expr), pdf.query(expr))
# test `inplace=True`
for expr in exprs:
dummy_psdf = psdf.copy()
dummy_pdf = pdf.copy()
pser = dummy_pdf.A
psser = dummy_psdf.A
dummy_pdf.query(expr, inplace=True)
dummy_psdf.query(expr, inplace=True)
self.assert_eq(dummy_psdf, dummy_pdf)
self.assert_eq(psser, pser)
# invalid values for `expr`
invalid_exprs = (1, 1.0, (exprs[0],), [exprs[0]])
for expr in invalid_exprs:
with self.assertRaisesRegex(
TypeError,
"expr must be a string to be evaluated, {} given".format(type(expr).__name__),
):
psdf.query(expr)
# invalid values for `inplace`
invalid_inplaces = (1, 0, "True", "False")
for inplace in invalid_inplaces:
with self.assertRaisesRegex(
TypeError,
'For argument "inplace" expected type bool, received type {}.'.format(
type(inplace).__name__
),
):
psdf.query("a < b", inplace=inplace)
# doesn't support for MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
with self.assertRaisesRegex(TypeError, "Doesn't support for MultiIndex columns"):
psdf.query("('A', 'Z') > ('B', 'X')")
def test_take(self):
pdf = pd.DataFrame(
{"A": range(0, 50000), "B": range(100000, 0, -2), "C": range(100000, 50000, -1)}
)
psdf = ps.from_pandas(pdf)
# axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
pdf.columns = columns
# MultiIndex columns with axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# Checking the type of indices.
self.assertRaises(TypeError, lambda: psdf.take(1))
self.assertRaises(TypeError, lambda: psdf.take("1"))
self.assertRaises(TypeError, lambda: psdf.take({1, 2}))
self.assertRaises(TypeError, lambda: psdf.take({1: None, 2: None}))
def test_axes(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.axes, psdf.axes)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.axes, psdf.axes)
def test_udt(self):
sparse_values = {0: 0.1, 1: 1.1}
sparse_vector = SparseVector(len(sparse_values), sparse_values)
pdf = pd.DataFrame({"a": [sparse_vector], "b": [10]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_eval(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2)})
psdf = ps.from_pandas(pdf)
# operation between columns (returns Series)
self.assert_eq(pdf.eval("A + B"), psdf.eval("A + B"))
self.assert_eq(pdf.eval("A + A"), psdf.eval("A + A"))
# assignment (returns DataFrame)
self.assert_eq(pdf.eval("C = A + B"), psdf.eval("C = A + B"))
self.assert_eq(pdf.eval("A = A + A"), psdf.eval("A = A + A"))
# operation between scalars (returns scalar)
self.assert_eq(pdf.eval("1 + 1"), psdf.eval("1 + 1"))
# complicated operations with assignment
self.assert_eq(
pdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
psdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
)
# inplace=True (only support for assignment)
pdf.eval("C = A + B", inplace=True)
psdf.eval("C = A + B", inplace=True)
self.assert_eq(pdf, psdf)
pser = pdf.A
psser = psdf.A
pdf.eval("A = B + C", inplace=True)
psdf.eval("A = B + C", inplace=True)
self.assert_eq(pdf, psdf)
self.assert_eq(pser, psser)
# doesn't support for multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b"), ("z", "c")])
psdf.columns = columns
self.assertRaises(TypeError, lambda: psdf.eval("x.a + y.b"))
@unittest.skipIf(not have_tabulate, tabulate_requirement_message)
def test_to_markdown(self):
pdf = pd.DataFrame(data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]})
psdf = ps.from_pandas(pdf)
# `to_markdown()` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assertRaises(NotImplementedError, lambda: psdf.to_markdown())
else:
self.assert_eq(pdf.to_markdown(), psdf.to_markdown())
def test_cache(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
with psdf.spark.cache() as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(
repr(cached_df.spark.storage_level), repr(StorageLevel(True, True, False, True))
)
def test_persist(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
storage_levels = [
StorageLevel.DISK_ONLY,
StorageLevel.MEMORY_AND_DISK,
StorageLevel.MEMORY_ONLY,
StorageLevel.OFF_HEAP,
]
for storage_level in storage_levels:
with psdf.spark.persist(storage_level) as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(repr(cached_df.spark.storage_level), repr(storage_level))
self.assertRaises(TypeError, lambda: psdf.spark.persist("DISK_ONLY"))
def test_squeeze(self):
axises = [None, 0, 1, "rows", "index", "columns"]
# Multiple columns
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=["a", "b"], index=["x", "y"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Multiple columns with MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value
pdf = pd.DataFrame([[1]], columns=["a"], index=["x"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value with MultiIndex column
columns = pd.MultiIndex.from_tuples([("A", "Z")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values
pdf = pd.DataFrame([1, 2, 3, 4], columns=["a"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values with MultiIndex column
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
def test_rfloordiv(self):
pdf = pd.DataFrame(
{"angles": [0, 3, 4], "degrees": [360, 180, 360]},
index=["circle", "triangle", "rectangle"],
columns=["angles", "degrees"],
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) < LooseVersion("1.0.0") and LooseVersion(
pd.__version__
) >= LooseVersion("0.24.0"):
expected_result = pd.DataFrame(
{"angles": [np.inf, 3.0, 2.0], "degrees": [0.0, 0.0, 0.0]},
index=["circle", "triangle", "rectangle"],
columns=["angles", "degrees"],
)
else:
expected_result = pdf.rfloordiv(10)
self.assert_eq(psdf.rfloordiv(10), expected_result)
def test_truncate(self):
pdf1 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[1000, 550, 400, 0, -1, -20, -500],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf = ps.DataFrame(
{"A": ["b", "c", "d"], "B": ["i", "j", "k"], "C": ["p", "q", "r"]},
index=[550, 400, 0],
)
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "Z")])
pdf1.columns = columns
psdf1.columns = columns
pdf2.columns = columns
psdf2.columns = columns
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf.columns = columns
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# Exceptions
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, 100, 400, 0, -1, 550, -20],
)
msg = "truncate requires a sorted index"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate()
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
msg = "Truncate: -20 must be after 400"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate(400, -20)
msg = "Truncate: B must be after C"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate("C", "B", axis=1)
def test_explode(self):
pdf = pd.DataFrame({"A": [[-1.0, np.nan], [0.0, np.inf], [1.0, -np.inf]], "B": 1})
pdf.index.name = "index"
pdf.columns.name = "columns"
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("0.25.0"):
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
else:
expected_result1 = pd.DataFrame(
{"A": [-1, np.nan, 0, np.inf, 1, -np.inf], "B": [1, 1, 1, 1, 1, 1]},
index=pd.Index([0, 0, 1, 1, 2, 2]),
)
expected_result1.index.name = "index"
expected_result1.columns.name = "columns"
expected_result2 = pdf
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.name, expected_result1.index.name)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex
midx = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf.index = midx
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("0.25.0"):
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
else:
midx = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "a"), ("x", "b"), ("x", "b"), ("y", "c"), ("y", "c")],
names=["index1", "index2"],
)
expected_result1.index = midx
expected_result2 = pdf
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.names, expected_result1.index.names)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")], names=["column1", "column2"])
pdf.columns = columns
psdf.columns = columns
if LooseVersion(pd.__version__) >= LooseVersion("0.25.0"):
expected_result1 = pdf.explode(("A", "Z"))
expected_result2 = pdf.explode(("B", "X"))
expected_result3 = pdf.A.explode("Z")
else:
expected_result1.columns = columns
expected_result2 = pdf
expected_result3 = pd.DataFrame({"Z": [-1, np.nan, 0, np.inf, 1, -np.inf]}, index=midx)
expected_result3.index.name = "index"
expected_result3.columns.name = "column2"
self.assert_eq(psdf.explode(("A", "Z")), expected_result1, almost=True)
self.assert_eq(psdf.explode(("B", "X")), expected_result2)
self.assert_eq(psdf.explode(("A", "Z")).index.names, expected_result1.index.names)
self.assert_eq(psdf.explode(("A", "Z")).columns.names, expected_result1.columns.names)
self.assert_eq(psdf.A.explode("Z"), expected_result3, almost=True)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
self.assertRaises(ValueError, lambda: psdf.explode("A"))
def test_spark_schema(self):
psdf = ps.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
columns=["a", "b", "c", "d", "e", "f"],
)
actual = psdf.spark.schema()
expected = (
StructType()
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
actual = psdf.spark.schema("index")
expected = (
StructType()
.add("index", "long", False)
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
def test_print_schema(self):
psdf = ps.DataFrame(
{"a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1")},
columns=["a", "b", "c"],
)
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
psdf.spark.print_schema()
actual = out.getvalue().strip()
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
out = StringIO()
sys.stdout = out
psdf.spark.print_schema(index_col="index")
actual = out.getvalue().strip()
self.assertTrue("index: long" in actual, actual)
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
finally:
sys.stdout = prev
def test_explain_hint(self):
psdf1 = ps.DataFrame(
{"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 5]},
columns=["lkey", "value"],
)
psdf2 = ps.DataFrame(
{"rkey": ["foo", "bar", "baz", "foo"], "value": [5, 6, 7, 8]},
columns=["rkey", "value"],
)
merged = psdf1.merge(psdf2.spark.hint("broadcast"), left_on="lkey", right_on="rkey")
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
merged.spark.explain()
actual = out.getvalue().strip()
self.assertTrue("Broadcast" in actual, actual)
finally:
sys.stdout = prev
def test_mad(self):
pdf = pd.DataFrame(
{
"A": [1, 2, None, 4, np.nan],
"B": [-0.1, 0.2, -0.3, np.nan, 0.5],
"C": ["a", "b", "c", "d", "e"],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
with self.assertRaises(ValueError):
psdf.mad(axis=2)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("A", "Z")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
pdf = pd.DataFrame({"A": [True, True, False, False], "B": [True, False, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
def test_abs(self):
pdf = pd.DataFrame({"a": [-2, -1, 0, 1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(abs(psdf), abs(pdf))
self.assert_eq(np.abs(psdf), np.abs(pdf))
def test_iteritems(self):
pdf = pd.DataFrame(
{"species": ["bear", "bear", "marsupial"], "population": [1864, 22000, 80000]},
index=["panda", "polar", "koala"],
columns=["species", "population"],
)
psdf = ps.from_pandas(pdf)
for (p_name, p_items), (k_name, k_items) in zip(pdf.iteritems(), psdf.iteritems()):
self.assert_eq(p_name, k_name)
self.assert_eq(p_items, k_items)
def test_tail(self):
pdf = pd.DataFrame({"x": range(1000)})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.tail(), psdf.tail())
self.assert_eq(pdf.tail(10), psdf.tail(10))
self.assert_eq(pdf.tail(-990), psdf.tail(-990))
self.assert_eq(pdf.tail(0), psdf.tail(0))
self.assert_eq(pdf.tail(-1001), psdf.tail(-1001))
self.assert_eq(pdf.tail(1001), psdf.tail(1001))
self.assert_eq((pdf + 1).tail(), (psdf + 1).tail())
self.assert_eq((pdf + 1).tail(10), (psdf + 1).tail(10))
self.assert_eq((pdf + 1).tail(-990), (psdf + 1).tail(-990))
self.assert_eq((pdf + 1).tail(0), (psdf + 1).tail(0))
self.assert_eq((pdf + 1).tail(-1001), (psdf + 1).tail(-1001))
self.assert_eq((pdf + 1).tail(1001), (psdf + 1).tail(1001))
with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"):
psdf.tail("10")
def test_last_valid_index(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, None], "b": [1.0, 2.0, 3.0, None], "c": [100, 200, 400, None]},
index=["Q", "W", "E", "R"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
self.assert_eq(pdf[[]].last_valid_index(), psdf[[]].last_valid_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
# Empty DataFrame
pdf = pd.Series([]).to_frame()
psdf = ps.Series([]).to_frame()
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
def test_last(self):
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pdf = pd.DataFrame([1, 2, 3, 4], index=index)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last("1D"), psdf.last("1D"))
self.assert_eq(pdf.last(DateOffset(days=1)), psdf.last(DateOffset(days=1)))
with self.assertRaisesRegex(TypeError, "'last' only supports a DatetimeIndex"):
ps.DataFrame([1, 2, 3, 4]).last("1D")
def test_first(self):
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pdf = pd.DataFrame([1, 2, 3, 4], index=index)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first("1D"), psdf.first("1D"))
self.assert_eq(pdf.first(DateOffset(days=1)), psdf.first(DateOffset(days=1)))
with self.assertRaisesRegex(TypeError, "'first' only supports a DatetimeIndex"):
ps.DataFrame([1, 2, 3, 4]).first("1D")
def test_first_valid_index(self):
pdf = pd.DataFrame(
{"a": [None, 2, 3, 2], "b": [None, 2.0, 3.0, 1.0], "c": [None, 200, 400, 200]},
index=["Q", "W", "E", "R"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
self.assert_eq(pdf[[]].first_valid_index(), psdf[[]].first_valid_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
# Empty DataFrame
pdf = pd.Series([]).to_frame()
psdf = ps.Series([]).to_frame()
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
pdf = pd.DataFrame(
{"a": [None, 2, 3, 2], "b": [None, 2.0, 3.0, 1.0], "c": [None, 200, 400, 200]},
index=[
datetime(2021, 1, 1),
datetime(2021, 2, 1),
datetime(2021, 3, 1),
datetime(2021, 4, 1),
],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
def test_product(self):
pdf = pd.DataFrame(
{"A": [1, 2, 3, 4, 5], "B": [10, 20, 30, 40, 50], "C": ["a", "b", "c", "d", "e"]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# Named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# Named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# No numeric columns
pdf = pd.DataFrame({"key": ["a", "b", "c"], "val": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# No numeric named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# No numeric MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# No numeric named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# All NaN columns
pdf = pd.DataFrame(
{
"A": [np.nan, np.nan, np.nan, np.nan, np.nan],
"B": [10, 20, 30, 40, 50],
"C": ["a", "b", "c", "d", "e"],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
def test_from_dict(self):
data = {"row_1": [3, 2, 1, 0], "row_2": [10, 20, 30, 40]}
pdf = pd.DataFrame.from_dict(data)
psdf = ps.DataFrame.from_dict(data)
self.assert_eq(pdf, psdf)
pdf = pd.DataFrame.from_dict(data, dtype="int8")
psdf = ps.DataFrame.from_dict(data, dtype="int8")
self.assert_eq(pdf, psdf)
pdf = pd.DataFrame.from_dict(data, orient="index", columns=["A", "B", "C", "D"])
psdf = ps.DataFrame.from_dict(data, orient="index", columns=["A", "B", "C", "D"])
self.assert_eq(pdf, psdf)
def test_pad(self):
pdf = pd.DataFrame(
{
"A": [None, 3, None, None],
"B": [2, 4, None, 3],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pdf.pad(), psdf.pad())
# Test `inplace=True`
pdf.pad(inplace=True)
psdf.pad(inplace=True)
self.assert_eq(pdf, psdf)
else:
expected = ps.DataFrame(
{
"A": [None, 3, 3, 3],
"B": [2.0, 4.0, 4.0, 3.0],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
self.assert_eq(expected, psdf.pad())
# Test `inplace=True`
psdf.pad(inplace=True)
self.assert_eq(expected, psdf)
def test_backfill(self):
pdf = pd.DataFrame(
{
"A": [None, 3, None, None],
"B": [2, 4, None, 3],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pdf.backfill(), psdf.backfill())
# Test `inplace=True`
pdf.backfill(inplace=True)
psdf.backfill(inplace=True)
self.assert_eq(pdf, psdf)
else:
expected = ps.DataFrame(
{
"A": [3.0, 3.0, None, None],
"B": [2.0, 4.0, 3.0, 3.0],
"C": [1.0, 1.0, 1.0, 1.0],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
self.assert_eq(expected, psdf.backfill())
# Test `inplace=True`
psdf.backfill(inplace=True)
self.assert_eq(expected, psdf)
def test_align(self):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
psdf1 = ps.from_pandas(pdf1)
for join in ["outer", "inner", "left", "right"]:
for axis in [None, 0, 1]:
psdf_l, psdf_r = psdf1.align(psdf1[["b"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1.align(pdf1[["b"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1[["a"]].align(psdf1[["b", "a"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1[["a"]].align(pdf1[["b", "a"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1[["b", "a"]].align(psdf1[["a"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1[["b", "a"]].align(pdf1[["a"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1.align(psdf1["b"], axis=0)
pdf_l, pdf_r = pdf1.align(pdf1["b"], axis=0)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psser_b = psdf1[["a"]].align(psdf1["b"], axis=0)
pdf_l, pser_b = pdf1[["a"]].align(pdf1["b"], axis=0)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psser_b, pser_b)
self.assertRaises(ValueError, lambda: psdf1.align(psdf1, join="unknown"))
self.assertRaises(ValueError, lambda: psdf1.align(psdf1["b"]))
self.assertRaises(TypeError, lambda: psdf1.align(["b"]))
self.assertRaises(NotImplementedError, lambda: psdf1.align(psdf1["b"], axis=1))
pdf2 = pd.DataFrame({"a": [4, 5, 6], "d": ["d", "e", "f"]}, index=[10, 11, 12])
psdf2 = ps.from_pandas(pdf2)
for join in ["outer", "inner", "left", "right"]:
psdf_l, psdf_r = psdf1.align(psdf2, join=join, axis=1)
pdf_l, pdf_r = pdf1.align(pdf2, join=join, axis=1)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
def test_between_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pdf = pd.DataFrame({"A": [1, 2, 3, 4]}, index=idx)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
pdf.index.name = "ts"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Column label is 'index'
pdf.columns = pd.Index(["index"])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Both index name and column label are 'index'
pdf.index.name = "index"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Index name is 'index', column label is ('X', 'A')
pdf.columns = pd.MultiIndex.from_arrays([["X"], ["A"]])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
with self.assertRaisesRegex(
NotImplementedError, "between_time currently only works for axis=0"
):
psdf.between_time("0:15", "0:45", axis=1)
psdf = ps.DataFrame({"A": [1, 2, 3, 4]})
with self.assertRaisesRegex(TypeError, "Index must be DatetimeIndex"):
psdf.between_time("0:15", "0:45")
def test_at_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pdf = pd.DataFrame({"A": [1, 2, 3, 4]}, index=idx)
psdf = ps.from_pandas(pdf)
psdf.at_time("0:20")
self.assert_eq(
pdf.at_time("0:20").sort_index(),
psdf.at_time("0:20").sort_index(),
)
# Index name is 'ts'
pdf.index.name = "ts"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:20").sort_index(),
psdf.at_time("0:20").sort_index(),
)
# Index name is 'ts', column label is 'index'
pdf.columns = pd.Index(["index"])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
# Both index name and column label are 'index'
pdf.index.name = "index"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
# Index name is 'index', column label is ('X', 'A')
pdf.columns = pd.MultiIndex.from_arrays([["X"], ["A"]])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
with self.assertRaisesRegex(NotImplementedError, "'asof' argument is not supported"):
psdf.at_time("0:15", asof=True)
with self.assertRaisesRegex(NotImplementedError, "at_time currently only works for axis=0"):
psdf.at_time("0:15", axis=1)
psdf = ps.DataFrame({"A": [1, 2, 3, 4]})
with self.assertRaisesRegex(TypeError, "Index must be DatetimeIndex"):
psdf.at_time("0:15")
def test_astype(self):
psdf = self.psdf
msg = "Only a column name can be used for the key in a dtype mappings argument."
with self.assertRaisesRegex(KeyError, msg):
psdf.astype({"c": float})
def test_describe(self):
psdf = self.psdf
msg = r"Percentiles should all be in the interval \[0, 1\]"
with self.assertRaisesRegex(ValueError, msg):
psdf.describe(percentiles=[1.1])
psdf = ps.DataFrame({"A": ["a", "b", "c"], "B": ["d", "e", "f"]})
msg = "Cannot describe a DataFrame without columns"
with self.assertRaisesRegex(ValueError, msg):
psdf.describe()
def test_getitem_with_none_key(self):
psdf = self.psdf
with self.assertRaisesRegex(KeyError, "none key"):
psdf[None]
def test_iter_dataframe(self):
pdf, psdf = self.df_pair
for value_psdf, value_pdf in zip(psdf, pdf):
self.assert_eq(value_psdf, value_pdf)
def test_combine_first(self):
pdf = pd.DataFrame(
{("X", "A"): [None, 0], ("X", "B"): [4, None], ("Y", "C"): [3, 3], ("Y", "B"): [1, 1]}
)
pdf1, pdf2 = pdf["X"], pdf["Y"]
psdf = ps.from_pandas(pdf)
psdf1, psdf2 = psdf["X"], psdf["Y"]
if LooseVersion(pd.__version__) >= LooseVersion("1.2.0"):
self.assert_eq(pdf1.combine_first(pdf2), psdf1.combine_first(psdf2))
else:
# pandas < 1.2.0 returns unexpected dtypes,
# please refer to https://github.com/pandas-dev/pandas/issues/28481 for details
expected_pdf = pd.DataFrame({"A": [None, 0], "B": [4.0, 1.0], "C": [3, 3]})
self.assert_eq(expected_pdf, psdf1.combine_first(psdf2))
if __name__ == "__main__":
from pyspark.pandas.tests.test_dataframe import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
taroplus/spark
|
python/pyspark/pandas/tests/test_dataframe.py
|
Python
|
apache-2.0
| 238,076
|
[
"Elk"
] |
819a8b5095eb83a3881ca94fbd203094eab1f8baed4672f809d09828f69fe978
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_stringgroup
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of StringGroup Avi RESTful Object
description:
- This module is used to configure StringGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
description:
description:
- User defined description for the object.
kv:
description:
- Configure key value in the string group.
name:
description:
- Name of the string group.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Type of stringgroup.
- Enum options - SG_TYPE_STRING, SG_TYPE_KEYVAL.
- Default value when not specified in API or module is interpreted by Avi Controller as SG_TYPE_STRING.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the string group.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a string group configuration
avi_stringgroup:
controller: '{{ controller }}'
password: '{{ password }}'
username: '{{ username }}'
kv:
- key: text/html
- key: text/xml
- key: text/plain
- key: text/css
- key: text/javascript
- key: application/javascript
- key: application/x-javascript
- key: application/xml
- key: application/pdf
name: System-Compressible-Content-Types
tenant_ref: admin
type: SG_TYPE_STRING
"""
RETURN = '''
obj:
description: StringGroup (api/stringgroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
description=dict(type='str',),
kv=dict(type='list',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'stringgroup',
set([]))
if __name__ == '__main__':
main()
|
simonwydooghe/ansible
|
lib/ansible/modules/network/avi/avi_stringgroup.py
|
Python
|
gpl-3.0
| 4,256
|
[
"VisIt"
] |
0a0a2a4363d6b6ca6534437a65690bfbfd06addabe16fbed9d09951e69e36634
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_tenant
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Tenant Avi RESTful Object
description:
- This module is used to configure Tenant object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
config_settings:
description:
- Tenantconfiguration settings for tenant.
created_by:
description:
- Creator of this tenant.
description:
description:
- User defined description for the object.
local:
description:
- Boolean flag to set local.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
name:
description:
- Name of the object.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create Tenant using Service Engines in provider mode
avi_tenant:
controller: ''
password: ''
username: ''
config_settings:
se_in_provider_context: false
tenant_access_to_provider_se: true
tenant_vrf: false
description: VCenter, Open Stack, AWS Virtual services
local: true
name: Demo
'''
RETURN = '''
obj:
description: Tenant (api/tenant) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
config_settings=dict(type='dict',),
created_by=dict(type='str',),
description=dict(type='str',),
local=dict(type='bool',),
name=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'tenant',
set([]))
if __name__ == '__main__':
main()
|
andreaso/ansible
|
lib/ansible/modules/network/avi/avi_tenant.py
|
Python
|
gpl-3.0
| 3,806
|
[
"VisIt"
] |
ab154eb1599e41fa8c2c9f4ae69acd4e7e2a68d5f678d8fc07eb01fcea5d64a8
|
#!/usr/bin/python
# Howto, Code license, Credits, etc: http://code.google.com/B/BCI-Project-Triathlon/
noGL = False # Set noGL to True for disabling the use of OpenGL (to gain speed, or to avoid python-wx-opengl problems)
import pyfann
from pyfann import libfann
import string
import os
import sys
import random
import copy
import wx
import numpy
from array import array
import WXElements
try:
from wx import glcanvas
haveGLCanvas = True
except ImportError:
haveGLCanvas = False
noGL = True
print "Will start without OpenGL, because wx.glcanvas is not available."
try:
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
haveOpenGL = True
except ImportError:
haveOpenGL = False
noGL = True
print "Will start without OpenGL, because PyOpenGL is not available."
class AppSettings():
def __init__(self,
datafile,
desired_error = 0.0000000001,
iterations_between_reports = 1000):
self.datafile = datafile
self.desired_error = desired_error
self.iterations_between_reports = iterations_between_reports
f = open(datafile+".train", 'r')
firstline = f.readline()
f.close
l = string.split(firstline)
self.num_input = int(l[1])
self.num_output = int(l[2])
self.breeding = False
self.stage = 0
self.netsTried = 0
self.maxMutations = 18
self.populationSize = 12
self.trainingData = libfann.training_data()
self.trainingData.read_train_from_file(datafile+".train")
self.testData = libfann.training_data()
self.testData.read_train_from_file(datafile+".test")
self.flist = [libfann.LINEAR,libfann.SIGMOID,libfann.SIGMOID_STEPWISE,libfann.SIGMOID_SYMMETRIC,libfann.SIGMOID_SYMMETRIC_STEPWISE,
libfann.GAUSSIAN,libfann.GAUSSIAN_SYMMETRIC,libfann.ELLIOT,libfann.ELLIOT_SYMMETRIC,libfann.LINEAR_PIECE,
libfann.LINEAR_PIECE_SYMMETRIC,libfann.SIN_SYMMETRIC,libfann.COS_SYMMETRIC]
self.mutationlist = ["change_connection_rate",
"change_learning_rate",
"change_num_neurons_hidden",
"change_num_layers_hidden",
"change_max_iterations",
"change_training_algorithm",
"change_activation_function_hidden",
"change_activation_function_output",
"change_learning_momentum",
"change_activation_steepness_hidden",
"change_activation_steepness_output",
"change_training_param"]
self.trmutlist = ["change_connection_type",
"change_quickprop_decay",
"change_quickprop_mu",
"change_rprop_increase_factor",
"change_rprop_decrease_factor",
"change_rprop_delta_min",
"change_rprop_delta_max",
# "change_rprop_delta_zero"
]
class BreedingEventTimer(wx.Timer):
def __init__(self):
wx.Timer.__init__(self)
self.population = NeuralNetPopulation(maxSize=settings.populationSize)
self.childNN = ""
self.Start(20)
def Notify(self):
if settings.breeding:
self.evolve(1)
def evolve(self,steps):
if settings.breeding:
for i in range(steps):
newStage = 0
if settings.stage == 0:
names = ["Adam","Eve","Joe","Sue","Richard","Juan","Peter","Micheal","Olga","Sam","Olaf","Sasha","Eliza","Alan"]
for n in range(settings.populationSize):
newNet = NeuralNet(name = names[n%len(names)])
for each in range(50):
newNet.mutate()
newNet.train()
self.population.addIfBetter(newNet)
del newNet
newStage = 1
elif settings.stage == 1:
self.childNN = self.population.getAChild(settings.maxMutations)
neuralNetBreederApp.mainWindow.rightNet.setToNN(self.childNN)
newStage = 2
elif settings.stage == 2:
self.population.addIfBetter(self.childNN)
self.population.setBestUI()
newStage = 1
settings.stage = newStage
class NeuralNet():
def __init__(self,
name = "Eve",
generation = 1,
connection_rate = 0.5,
learning_rate = 0.5,
max_iterations = 50,
bornBefore = 0,
trainAlg = libfann.TRAIN_RPROP,
learning_momentum = 0.0,
neurons = [],
connectionType = "Sparse"):
settings.netsTried += 1
self.name = name
self.generation = generation
self.connection_rate = connection_rate
self.learning_rate = learning_rate
self.max_iterations = max_iterations
self.ann = ""
self.childrenHad = 0
self.bornBefore = bornBefore
self.trainAlg = trainAlg
self.learning_momentum = learning_momentum
self.mseHistory = []
self.testmseHistory = []
self.summedError = 1.0
self.neurons = copy.deepcopy(neurons)
if (self.neurons == []):
self.neurons = [[[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())],
[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]] ,
[[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]
for i in range(settings.num_output)]]
self.foodcost = (0.001*(len(self.neurons)-1)) + (0.0001*sum(map(len,self.neurons[0:-1])))
self.connectionType = connectionType
if self.ann =="":
self.ann = libfann.neural_net()
def getChild(self, num_mutations):
self.childrenHad = self.childrenHad + 1
newANN = NeuralNet(name = (self.getNameStub(self.name) + "-" + str(self.generation + 1) + "-" +
str(self.childrenHad) + "-" + str(self.bornBefore + self.childrenHad)),
generation = self.generation + 1,
connection_rate = self.connection_rate,
learning_rate = self.learning_rate,
max_iterations = self.max_iterations,
bornBefore = self.bornBefore + self.childrenHad,
trainAlg = self.trainAlg,
learning_momentum = self.learning_momentum,
neurons = self.neurons,
connectionType = self.connectionType)
newANN.ann.set_quickprop_decay(self.ann.get_quickprop_decay())
newANN.ann.set_quickprop_mu(self.ann.get_quickprop_mu())
newANN.ann.set_rprop_increase_factor(self.ann.get_rprop_increase_factor())
newANN.ann.set_rprop_decrease_factor(self.ann.get_rprop_decrease_factor())
newANN.ann.set_rprop_delta_min(self.ann.get_rprop_delta_min())
newANN.ann.set_rprop_delta_max(self.ann.get_rprop_delta_max())
# newANN.ann.set_rprop_delta_zero(self.ann.get_rprop_delta_zero())
for each in range(random.randrange(num_mutations)):
newANN.mutate()
newANN.train()
return newANN
def mutate(self):
mutation = settings.mutationlist[random.randrange(len(settings.mutationlist))]
if mutation == "change_connection_rate":
self.connection_rate = self.connection_rate + (-0.1+(0.2*random.random()))
if self.connection_rate<0.001:
self.connection_rate = 0.001
elif self.connection_rate>1.0:
self.connection_rate = 1.0
elif mutation == "change_learning_rate":
self.learning_rate = self.learning_rate + (-0.1+(0.2*random.random()))
if self.learning_rate<0.00001:
self.learning_rate = 0.00001
elif self.learning_rate>0.99:
self.learning_rate = 0.99
elif mutation == "change_num_neurons_hidden":
layerIndex = random.randrange(len(self.neurons)-1)
if len(self.neurons[layerIndex]) <= 1:
self.neurons[layerIndex] = ([[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]] +
self.neurons[layerIndex])
elif len(self.neurons[layerIndex]) >= 50:
del self.neurons[layerIndex][random.randrange(len(self.neurons[layerIndex]))]
else:
if random.random()>0.5:
self.neurons[layerIndex] = ([[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]] +
self.neurons[layerIndex])
else:
del self.neurons[layerIndex][random.randrange(len(self.neurons[layerIndex]))]
elif mutation == "change_num_layers_hidden":
if len(self.neurons)==2:
self.neurons = [[[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]
for each in range(1+random.randrange(10))]] + self.neurons
elif len(self.neurons)>=11:
del self.neurons[random.randrange(len(self.neurons)-1)]
elif random.random()>0.5:
del self.neurons[random.randrange(len(self.neurons)-1)]
else:
newLayerIndex = random.randrange(len(self.neurons)-1)
self.neurons = (self.neurons[:newLayerIndex] +
[[[settings.flist[random.randrange(len(settings.flist))],0.0001+(0.9999*random.random())]
for each in range(1+random.randrange(10))]] + self.neurons[newLayerIndex:])
elif mutation == "change_max_iterations":
self.max_iterations = int(float(self.max_iterations) * (0.5+(random.random())))
if self.max_iterations<10:
self.max_iterations = 10
elif self.max_iterations>50000:
self.max_iterations = 50000
elif mutation == "change_training_algorithm":
p = random.random()
if p < 0.25:
self.trainAlg = libfann.TRAIN_BATCH
elif p < 0.5:
self.trainAlg = libfann.TRAIN_RPROP
elif p < 0.75:
self.trainAlg = libfann.TRAIN_INCREMENTAL
else:
self.trainAlg = libfann.TRAIN_QUICKPROP
elif mutation == "change_activation_function_hidden":
layerIndex = random.randrange(len(self.neurons)-1)
neuronIndex = random.randrange(len(self.neurons[layerIndex]))
self.neurons[layerIndex][neuronIndex][0] = settings.flist[random.randrange(len(settings.flist))]
elif mutation == "change_activation_function_output":
layerIndex = -1
neuronIndex = random.randrange(len(self.neurons[layerIndex]))
self.neurons[layerIndex][neuronIndex][0] = settings.flist[random.randrange(len(settings.flist))]
elif mutation == "change_learning_momentum":
self.learning_momentum = self.learning_momentum + (-0.1+(0.2*random.random()))
if self.learning_momentum<0.0:
self.learning_momentum = 0.0
elif self.learning_momentum>0.99:
self.learning_momentum = 0.99
elif mutation == "change_activation_steepness_hidden":
layerIndex = random.randrange(len(self.neurons)-1)
neuronIndex = random.randrange(len(self.neurons[layerIndex]))
new = self.neurons[layerIndex][neuronIndex][1] + (-0.1+(0.2*random.random()))
if new <0.0001:
new = 0.001
elif new > 0.9999:
new = 0.9999
self.neurons[layerIndex][neuronIndex][1] = new
elif mutation == "change_activation_steepness_output":
layerIndex = -1
neuronIndex = random.randrange(len(self.neurons[layerIndex]))
new = self.neurons[layerIndex][neuronIndex][1] + (-0.1+(0.2*random.random()))
if new <0.0001:
new = 0.001
elif new > 0.9999:
new = 0.9999
self.neurons[layerIndex][neuronIndex][1] = new
elif mutation == "change_training_param":
trmutation = settings.trmutlist[random.randrange(len(settings.trmutlist))]
if trmutation == "change_connection_type":
if self.connectionType == 'Sparse':
self.connectionType = 'Shortcut'
elif self.connectionType == 'Shortcut':
self.connectionType = 'Sparse'
elif trmutation == "change_quickprop_decay":
new = self.ann.get_quickprop_decay()
new = new * (2.0*random.random())
if new < -0.3:
new = -0.3
elif new >= 0.0 :
new = -0.0000001
self.ann.set_quickprop_decay(new)
elif trmutation == "change_quickprop_mu":
new = self.ann.get_quickprop_mu()
new = new * (0.6+(0.8*random.random()))
if new <= 1.0:
new = 1.000001
elif new >= 3.0 :
new = 3.0
self.ann.set_quickprop_mu(new)
elif trmutation == "change_rprop_increase_factor":
new = self.ann.get_rprop_increase_factor()
new = new * (0.6+(0.8*random.random()))
if new <= 1.0:
new = 1.000001
elif new >= 3.0 :
new = 3.0
self.ann.set_rprop_increase_factor(new)
elif trmutation == "change_rprop_decrease_factor":
new = self.ann.get_rprop_decrease_factor()
new = new * (0.6+(0.8*random.random()))
if new <= 0.0:
new = 0.000001
elif new >= 1.0 :
new = 0.99999
self.ann.set_rprop_decrease_factor(new)
elif trmutation == "change_rprop_delta_min":
new = self.ann.get_rprop_delta_min()
new = new * (0.6+(0.8*random.random()))
if new <= 0.0:
new = 0.0
elif new >= 1.0 :
new = 0.99999
self.ann.set_rprop_delta_min(new)
elif trmutation == "change_rprop_delta_max":
new = self.ann.get_rprop_delta_max()
new = new * (0.6+(0.8*random.random()))
if new <= 1.0:
new = 1.0
elif new >= 200.0 :
new = 200.0
self.ann.set_rprop_delta_max(new)
# elif trmutation == "change_rprop_delta_zero":
# new = self.ann.get_rprop_delta_zero()
# new = new * (0.6+(0.8*random.random()))
# if new <= 0.0:
# new = 0.0001
# elif new >= 20.0 :
# new = 20.0
# self.ann.set_rprop_delta_zero(new)
self.foodcost = (0.001*(len(self.neurons)-1)) + (0.0001*sum(map(len,self.neurons[0:-1])))
def train(self):
self.ann.set_learning_momentum(self.learning_momentum)
self.ann.set_training_algorithm(self.trainAlg)
if self.connectionType == 'Sparse':
self.ann.create_sparse_array(self.connection_rate, [settings.num_input]+map(len,self.neurons))
elif self.connectionType == 'Shortcut':
self.ann.create_shortcut_array([settings.num_input]+map(len,self.neurons))
self.ann.set_learning_rate(self.learning_rate)
for layerIndex in range(len(self.neurons)):
for neuronIndex in range(len(self.neurons[layerIndex])):
funcSteep = self.neurons[layerIndex][neuronIndex]
self.ann.set_activation_function(funcSteep[0],layerIndex+1,neuronIndex)
self.ann.set_activation_steepness(funcSteep[1],layerIndex+1,neuronIndex)
itsLeft = self.max_iterations
while itsLeft > 0:
self.ann.train_on_data(settings.trainingData, 1, settings.iterations_between_reports, settings.desired_error)
itsLeft = itsLeft - 1
self.mseHistory.append(self.ann.get_MSE())
t = self.ann.test_data(settings.testData)
self.testmseHistory.append(t)
self.foodcost = 0.0000001*float(self.ann.get_total_connections())
self.summedError = 0.9*self.mseHistory[-1] + 1.1*self.testmseHistory[-1] + self.foodcost
if str(self.summedError) == 'nan':
self.summedError = 9999999.0
neuralNetBreederApp.mainWindow.updateNumberOfNets()
def getNameStub(self,name):
result = name
if '-' in result:
result = result[0:name.index('-')]
return result
class NeuralNetPopulation():
def __init__(self,maxSize = 5):
self.maxSize = maxSize
self.subjects = []
self.lastSavedName = ""
def addIfBetter(self,newSubject):
if len(self.subjects)< self.maxSize:
self.subjects.append(newSubject)
subjectIndex = len(self.subjects)-1
neuralNetBreederApp.mainWindow.subjectPanels[subjectIndex].setToNN(self.subjects[subjectIndex])
else:
newTotalValue = newSubject.summedError
highestTotalIndex = 0
highestTotalValue = 0.0
for subjectIndex in range(len(self.subjects)):
if highestTotalValue < self.subjects[subjectIndex].summedError:
highestTotalValue = self.subjects[subjectIndex].summedError
highestTotalIndex = subjectIndex
if newTotalValue< highestTotalValue:
self.subjects[highestTotalIndex] = newSubject
neuralNetBreederApp.mainWindow.subjectPanels[highestTotalIndex].setToNN(self.subjects[highestTotalIndex])
def getAChild(self,maxMutations):
return (self.subjects[random.randrange(len(self.subjects))].getChild(maxMutations))
def setBestUI(self):
bestIndex = 0
bestTotalValue = 100.0
worstTotalValue = 0.0
for subjectIndex in range(len(self.subjects)):
if self.subjects[subjectIndex].summedError < bestTotalValue:
bestIndex = subjectIndex
bestTotalValue = self.subjects[subjectIndex].summedError
if self.subjects[subjectIndex].summedError > worstTotalValue:
worstTotalValue = self.subjects[subjectIndex].summedError
neuralNetBreederApp.mainWindow.leftNet.setToNN(self.subjects[bestIndex])
if self.subjects[bestIndex].name != self.lastSavedName:
self.lastSavedName = self.subjects[bestIndex].name
self.subjects[bestIndex].ann.save(settings.datafile+".net")
if worstTotalValue*0.5 >= 0.25:
fr = 255
fg = 0
fb = 0
elif worstTotalValue*0.5 >= 0.125:
fr = 255
fg = int((1.0-((worstTotalValue*0.5-0.125)*8.0))*255.0)
fb = 0
else:
fr = int(((worstTotalValue*0.5)*8.0)*255.0)
fg = 255
fb = 0
if bestTotalValue*0.5 >= 0.25:
tr = 255
tg = 0
tb = 0
elif bestTotalValue*0.5 >= 0.125:
tr = 255
tg = int((1.0-((bestTotalValue*0.5-0.125)*8.0))*255.0)
tb = 0
else:
tr = int(((bestTotalValue*0.5)*8.0)*255.0)
tg = 255
tb = 0
neuralNetBreederApp.setIcon(fr,fg,fb,tr,tg,tb)
class ErrorCanvas(WXElements.GLCanvasBase):
def InitGL(self):
self.history = []
light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_position = [1.0, 1.0, 1.0, 0.0]
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv(GL_LIGHT0, GL_POSITION, light_position)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0.0, 0.0, 10.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def OnDraw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glEnableClientState(GL_VERTEX_ARRAY)
if len(self.history)>=2:
wave_array = []
for historyIndex in range(len(self.history)):
wave_array.append([-1.0 + (2.0 * (float(historyIndex)/float(len(self.history)-1)) ),
-1.0 + (8.0 *self.history[historyIndex])])
wave_array.append([-1.0 + (2.0 * (float(historyIndex)/float(len(self.history)-1)) ),
-1.0 ])
if self.history[-1] >= 0.25:
glColor(1.0,0.0,0.0)
elif self.history[-1] >= 0.125:
glColor(1.0,1.0-((self.history[-1]-0.125)*8.0),0.0)
else:
glColor(((self.history[-1])*8.0),1.0,0.0)
glVertexPointerf(wave_array)
glDrawArrays(GL_QUAD_STRIP, 0, len(wave_array))
self.SwapBuffers()
def setHistory(self, history):
self.history = history
self.Refresh()
self.Update()
class NetPanel(wx.Panel):
def __init__(self, parent,panellabel):
wx.Panel.__init__(self, parent)
panelSizer = wx.FlexGridSizer(0,1,0,0)
panelSizer.AddGrowableCol(0)
panelText = wx.StaticText(self,label=panellabel)
panelSizer.Add(panelText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
nb = wx.Notebook(self)
page1 = wx.Panel(nb)
page1Sizer = wx.FlexGridSizer(0,1,0,0)
page1Sizer.AddGrowableCol(0)
self.nameText = wx.StaticText(page1,label=" ")
page1Sizer.Add(self.nameText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.layerSummaryText = wx.StaticText(page1,label="\n")
page1Sizer.Add(self.layerSummaryText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.connection_rateText = wx.StaticText(page1,label="")
page1Sizer.Add(self.connection_rateText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
page1.SetSizer(page1Sizer)
nb.AddPage(page1,"Neural net")
page2 = wx.Panel(nb)
page2Sizer = wx.FlexGridSizer(0,1,0,0)
page2Sizer.AddGrowableCol(0)
self.trainAlgText = wx.StaticText(page2,label="")
page2Sizer.Add(self.trainAlgText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.learning_rateText = wx.StaticText(page2,label="")
page2Sizer.Add(self.learning_rateText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.learning_momentumText = wx.StaticText(page2,label="")
page2Sizer.Add(self.learning_momentumText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.max_iterationsText = wx.StaticText(page2,label="")
page2Sizer.Add(self.max_iterationsText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
page2.SetSizer(page2Sizer)
nb.AddPage(page2,"Training")
page3 = wx.Panel(nb)
page3Sizer = wx.FlexGridSizer(0,1,0,0)
page3Sizer.AddGrowableCol(0)
page3Sizer.AddGrowableRow(0)
self.nn = ""
printButton = wx.Button(page3, label="Print to console")
printButton.Bind(wx.EVT_BUTTON, self.printDetails)
page3Sizer.Add(printButton, 0, wx.ALIGN_CENTER|wx.ALL, 4)
page3.SetSizer(page3Sizer)
nb.AddPage(page3,"Details")
panelSizer.Add(nb, 0, wx.EXPAND|wx.ALL, 4)
self.errorText = wx.StaticText(self,label="")
panelSizer.Add(self.errorText, 0, wx.ALIGN_LEFT|wx.ALL, )
self.errorCanvas = ''
if noGL:
self.errorCanvas = WXElements.NoGLVisualizationPanel(self)
else:
self.errorCanvas = ErrorCanvas(self)
panelSizer.AddGrowableRow(3)
panelSizer.Add(self.errorCanvas, 0, wx.EXPAND|wx.ALL, 4)
self.testerrorCanvas = ''
if noGL:
self.testerrorCanvas = WXElements.NoGLVisualizationPanel(self)
else:
self.testerrorCanvas = ErrorCanvas(self)
self.testerrorText = wx.StaticText(self,label="")
panelSizer.Add(self.testerrorText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
panelSizer.AddGrowableRow(5)
panelSizer.Add(self.testerrorCanvas, 0, wx.EXPAND|wx.ALL, 4)
self.foodText = wx.StaticText(self,label="")
panelSizer.Add(self.foodText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.qlText = wx.StaticText(self,label="")
panelSizer.Add(self.qlText, 0, wx.ALIGN_LEFT|wx.ALL, 4)
self.SetSizer(panelSizer)
self.SetAutoLayout(1)
def setToNN(self,neuralnet):
self.nn = neuralnet
self.nameText.SetLabel(" Name: "+neuralnet.name)
self.max_iterationsText.SetLabel(" Training Epochs: "+str(neuralnet.max_iterations))
self.learning_rateText.SetLabel(" Learning rate: "+str(neuralnet.learning_rate))
if (neuralnet.connectionType=='Sparse'):
self.connection_rateText.SetLabel(" "+str(neuralnet.ann.get_total_connections())+" Connections (no shortcuts)")
elif (neuralnet.connectionType=='Shortcut'):
self.connection_rateText.SetLabel(" "+str(neuralnet.ann.get_total_connections())+" Connections (including shortcuts)")
self.foodText.SetLabel(" Energy required: "+str(neuralnet.foodcost))
self.layerSummaryText.SetLabel(" "+str(1+len(neuralnet.neurons))+" Layers ("+
str(len(neuralnet.neurons)-1)+" hidden)\n "+
str(settings.num_input + sum(map(len,neuralnet.neurons)))+" Nodes total ("+
str(settings.num_input)+" in, "+
str(sum(map(len,neuralnet.neurons[0:-1])))+" hidden, "+
str(len(neuralnet.neurons[-1]))+" out)")
self.layerSummaryText.SetToolTip(wx.ToolTip("Nodes per layer: "+str([settings.num_input]+map(len,neuralnet.neurons))))
if neuralnet.trainAlg == 0:
self.trainAlgText.SetLabel(" Training algorithm: Backprop incremental")
self.trainAlgText.SetToolTip(wx.ToolTip("no special settings"))
elif neuralnet.trainAlg == 1:
self.trainAlgText.SetLabel(" Training algorithm: Backprop batch")
self.trainAlgText.SetToolTip(wx.ToolTip("no special settings"))
elif neuralnet.trainAlg == 2:
self.trainAlgText.SetLabel(" Training algorithm: iRPROP batch")
self.trainAlgText.SetToolTip(wx.ToolTip("increase factor: "+str(neuralnet.ann.get_rprop_increase_factor())+"\n"+
"decrease factor: "+str(neuralnet.ann.get_rprop_decrease_factor())+"\n"+
"delta min: "+str(neuralnet.ann.get_rprop_delta_min())+"\n"+
"delta max: "+str(neuralnet.ann.get_rprop_delta_max())))
elif neuralnet.trainAlg == 3:
self.trainAlgText.SetLabel(" Training algorithm: quickprop batch")
self.trainAlgText.SetToolTip(wx.ToolTip("decay: "+str(neuralnet.ann.get_quickprop_decay())+"\n"+
"mu: "+str(neuralnet.ann.get_quickprop_mu())))
self.learning_momentumText.SetLabel(" Learning momentum: "+str(neuralnet.learning_momentum))
self.errorText.SetLabel(" Mean Square Error: "+str(neuralnet.mseHistory[-1]))
self.testerrorText.SetLabel(" Test MSE: "+str(neuralnet.testmseHistory[-1]))
self.errorCanvas.setHistory(neuralnet.mseHistory)
self.testerrorCanvas.setHistory(neuralnet.testmseHistory)
self.qlText.SetLabel(" Total Quality: "+str(1.0-(2.0*neuralnet.summedError)))
def printDetails(self, event=None):
if self.nn != "":
print ("\nDetails about "+self.nameText.GetLabel()[7:]+":\n")
self.nn.ann.print_parameters()
self.nn.ann.print_connections()
else:
print ("\nYou have not started breeding yet.\n")
class GUIMain(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,title="Triathlon Breeder",size=(300,600))
self.panel = wx.Panel(self, wx.ID_ANY)
MenuBar = wx.MenuBar()
self.FileMenu = wx.Menu()
item = self.FileMenu.Append(wx.ID_EXIT, text="Quit")
self.Bind(wx.EVT_MENU, self.OnQuit, item)
MenuBar.Append(self.FileMenu, "Menu")
self.SetMenuBar(MenuBar)
sizer = wx.FlexGridSizer(0,1,0,0)
sizer.AddGrowableCol(0)
self.netsTried = wx.StaticText(self.panel,label="Neural nets tried: 0")
sizer.Add(self.netsTried, 0, wx.EXPAND|wx.ALL, 2)
self.playButton = wx.Button(self.panel, label="Start breeding")
self.playButton.Bind(wx.EVT_BUTTON, self.OnPlay)
sizer.Add(self.playButton, 0, wx.EXPAND|wx.ALL, 2)
categoryNotebook = wx.Notebook(self.panel)
self.leftNet = NetPanel(categoryNotebook,"\n Best Translator\n (saved as "+settings.datafile+".net)\n")
self.rightNet = NetPanel(categoryNotebook,"\n New Translator\n (to be tested)\n")
subjectNBPanel = wx.Panel(categoryNotebook)
subjectNBSizer = wx.FlexGridSizer(0,1,0,0)
subjectNB = wx.Notebook(subjectNBPanel)
subjectNBSizer.AddGrowableCol(0)
subjectNBSizer.Add(wx.StaticText(subjectNBPanel,label=""), 0, wx.EXPAND|wx.ALL , 0)
subjectNBSizer.AddGrowableRow(1)
subjectNBSizer.Add(subjectNB, 0, wx.EXPAND)
subjectNBPanel.SetSizer(subjectNBSizer)
self.subjectPanels = []
for i in range(settings.populationSize):
self.subjectPanels.append(NetPanel(subjectNB," Population member"))
subjectNB.AddPage(self.subjectPanels[i],str(i+1))
categoryNotebook.AddPage(self.leftNet,"Best")
categoryNotebook.AddPage(subjectNBPanel,"Population")
categoryNotebook.AddPage(self.rightNet,"New")
sizer.AddGrowableRow(2)
sizer.Add(categoryNotebook, 0, wx.EXPAND|wx.ALL , 2)
self.panel.SetSizer(sizer)
self.panel.Layout()
def OnQuit(self, event=None):
self.Close()
def OnPlay(self, event=None):
if settings.breeding:
settings.breeding = False
self.playButton.SetLabel("Continue breeding")
else:
settings.breeding = True
self.playButton.SetLabel("Pause breeding")
def updateNumberOfNets(self):
self.netsTried.SetLabel("Neural nets tried: "+str(settings.netsTried))
class NeuralNetBreederApp(wx.App):
def __init__(self, redirect = False):
wx.App.__init__(self)
ib = wx.IconBundle()
bmp = self.make_grad_image(32,32, (0,0,0), (0,0,0))
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bmp)
ib.AddIcon(icon)
self.mainWindow = GUIMain()
self.setIcon(0,0,0,0,0,0)
self.mainWindow.Show(True)
def setIcon(self,from_r,from_g,from_b,to_r,to_g,to_b):
ib = wx.IconBundle()
bmp = self.make_grad_image(32,32, (from_r,from_g,from_b), (to_r,to_g,to_b))
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bmp)
ib.AddIcon(icon)
self.mainWindow.SetIcons(ib)
def make_grad_image(self, width, height, col_left, col_right):
array = numpy.zeros((height, width, 3), 'uint8')
alpha = numpy.linspace(0.0, 1.0, width)
color_gradient = numpy.outer(alpha, col_right) + \
numpy.outer((1.0-alpha), col_left)
array[:,:,:] = color_gradient
image = wx.EmptyImage(width, height)
image.SetData(array.tostring())
return wx.BitmapFromImage(image)
if __name__ == "__main__":
datafile = ""
if len(sys.argv)<2:
path = os.getcwd()
fileList = os.listdir(path)
profileList = []
for fileName in fileList:
if fileName[-5:] == "train":
profileList.append(fileName[:-6])
if len(profileList) > 0:
datafile = str(WXElements.selection("Select your Sample-set",profileList[0], profileList))
else:
print "Error: no profiles found"
else:
datafile = sys.argv[1]
if len(datafile)==0:
print ( "If you want to breed a neural net based on myProfile.train and myProfile.test,\n"+
"use: python Triathlon-Breeder.py myProfile")
else:
if os.path.exists(datafile+".train") and os.path.exists(datafile+".test"):
settings = AppSettings(datafile)
neuralNetBreederApp = NeuralNetBreederApp()
breedTimer = BreedingEventTimer()
neuralNetBreederApp.MainLoop()
else:
print "Error: no "+datafile+".train file\nor no "+datafile+".test file found."
|
Buggaboo/Triathlon
|
TriathlonBeta-orig/Triathlon-Breeder.py
|
Python
|
mit
| 34,534
|
[
"Gaussian"
] |
00f4273d43624aba4cfc6093f4b19e65acaffb736e1a60b5b374973a2a17e068
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example demonstrates how to handle policy violation errors.
To get ad groups, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import re
import suds
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
ad_group_ad_service = client.GetService('AdGroupAdService', 'v201502')
# Create text ad.
text_ad_operation = {
'operator': 'ADD',
'operand': {
'adGroupId': ad_group_id,
'ad': {
# The 'xsi_type' field allows you to specify the xsi:type of the
# object being created. It's only necessary when you must provide
# an explicit type that the client library can't infer.
'xsi_type': 'TextAd',
'headline': 'Mars Cruise!!!',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'finalUrls': ['http://www.example.com'],
'displayUrl': 'www.example.com',
}
}
}
operations = [text_ad_operation]
# Validate the ad.
try:
# Enable "validate only" to check for errors.
client.validate_only = True
ad_group_ad_service.mutate(operations)
print 'Validation successful, no errors returned.'
except suds.WebFault, e:
for error in e.fault.detail.ApiExceptionFault.errors:
if error['ApiError.Type'] == 'PolicyViolationError':
operation_index = re.findall(r'operations\[(.*)\]\.',
error['fieldPath'])
if operation_index:
operation = operations[int(operation_index[0])]
print ('Ad with headline \'%s\' violated %s policy \'%s\'.' %
(operation['operand']['ad']['headline'],
'exemptable' if error['isExemptable'] else 'non-exemptable',
error['externalPolicyName']))
if error['isExemptable'].lower() == 'true':
# Add exemption request to the operation.
print ('Adding exemption request for policy name \'%s\' on text '
'\'%s\'.' %
(error['key']['policyName'], error['key']['violatingText']))
if 'exemptionRequests' not in operation:
operation['exemptionRequests'] = []
operation['exemptionRequests'].append({
'key': error['key']
})
else:
# Remove non-exemptable operation
print 'Removing the operation from the request.'
operations.delete(operation)
else:
# Non-policy error returned, re-throw exception.
raise e
# Add these ads. Disable "validate only" so the ads will get created.
client.validate_only = False
if operations:
response = ad_group_ad_service.mutate(operations)
if response and response['value']:
ads = response['value']
print 'Added %s ad(s) to ad group %s.' % (len(ads), ad_group_id)
for ad in ads:
print (' Ad id is %s, type is %s and status is \'%s\'.' %
(ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
print 'No ads were added.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
wubr2000/googleads-python-lib
|
examples/adwords/v201502/error_handling/handle_policy_violation_error.py
|
Python
|
apache-2.0
| 4,149
|
[
"VisIt"
] |
5dbe05134131b08fb433a1ca65abd6b04270cea4bd4573290eca4393c107e0f5
|
"""
Created on 29/06/2015
@author: vgil
@brief: Performs some automatic analysis over the trajectories. The kind of analysis to be done are
specificied using the command line (rmsf, sasa, radius of gyration or a report analysis if it is a
PELE result)
"""
import os
import glob
import numpy
from optparse import OptionParser
from anmichelpers.tools.measure import ca_rmsf
mdtraj_accessible = True
try:
import mdtraj
except:
mdtraj_accessible = False
anmhelpers_accessible = True
try:
from anmichelpers.tools.tools import load_all_pdbs_ca
from anmichelpers.tools.measure import rmsf
except:
anmhelpers_accessible = False
VMD_MEASURE_SCRIPT = """# from http://www.ks.uiuc.edu/Research/vmd/mailing_list/vmd-l/7502.html
mol load pdb %s
set outfile [open %s w]
set nf [molinfo top get numframes]
set all [atomselect top "%s"]
for {set i 0} {$i<$nf} {incr i} {
$all frame $i
$all update
set sasa [measure %s $all]
puts $outfile "$sasa"
}
close $outfile
quit
"""
VMD_MEASURE_SASA_AND_RGYR_SCRIPT = """mol load pdb %s
set outfile_sasa [open %s w]
set outfile_rgyr [open %s w]
set nf [molinfo top get numframes]
set all [atomselect top "%s"]
for {set i 0} {$i<$nf} {incr i} {
$all frame $i
$all update
set sasa [measure sasa 1.4 $all]
set rgyr [measure rgyr $all]
puts $outfile_sasa "$sasa"
puts $outfile_rgyr "$rgyr"
}
close $outfile_sasa
close $outfile_rgyr
quit
"""
def execute_vmd_script(vmd_code):
open("tmp_vmd_script","w").write(vmd_code)
os.system("vmd -dispdev none -e tmp_vmd_script > vmd_out")
def get_vmd_array():
a = numpy.loadtxt("tmp_vmd_out")
os.system("rm tmp_vmd_script tmp_vmd_out vmd_out")
return a
def calculate_sasa_with_vmd(pdb_file, outfile, vmd_selection):
"""
It executes a vmd script in order to calculate trajectory' SASA (Angstrom^2).
'mdtraj' calculates it in nm^2.
"""
vmd_code = VMD_MEASURE_SCRIPT%(pdb_file, "tmp_vmd_out", vmd_selection, "sasa 1.4")
execute_vmd_script(vmd_code)
sasa = get_vmd_array()
sasa = sasa /100.
numpy.savetxt(outfile, sasa, "%.4f")
def calculate_rgyr_with_vmd(pdb_file, outfile, vmd_selection):
"""
http://www.ks.uiuc.edu/Research/vmd/vmd-1.9/ug/node135.html
"""
vmd_code = VMD_MEASURE_SCRIPT%(pdb_file, "tmp_vmd_out", vmd_selection, "rgyr")
execute_vmd_script(vmd_code)
rgyr = get_vmd_array()
numpy.savetxt(outfile, rgyr, "%.4f")
def calculate_sasa_and_rgyr_with_vmd(pdb_file, sasa_outfile, rgyr_outfile, vmd_selection):
vmd_code = VMD_MEASURE_SASA_AND_RGYR_SCRIPT%(pdb_file, "tmp_vmd_out_sasa", "tmp_vmd_out_rgyr", vmd_selection)
execute_vmd_script(vmd_code)
sasa = numpy.loadtxt("tmp_vmd_out_sasa")/100.
rgyr = numpy.loadtxt("tmp_vmd_out_rgyr")
os.system("rm tmp_vmd_script tmp_vmd_out_sasa tmp_vmd_out_rgyr vmd_out")
numpy.savetxt(sasa_outfile, sasa, "%.4f")
numpy.savetxt(rgyr_outfile, rgyr, "%.4f")
# def calculate_torsional_variation(pdb_file, outfile):
def analyze_trajectory(traj_path,
do_sasa, do_sasa_vmd,
do_rgyr, do_rgyr_vmd, vmd_selection,
do_rmsf,
report_pattern,
report_dir,
disp_logfile):
trajectory = None
data = None
if do_sasa:
print "Calculating SASA ..."
if trajectory is None:
trajectory = mdtraj.load(traj_path)
#Shrake, A; Rupley, JA. (1973) J Mol Biol 79 (2): 351--71.
sasa = mdtraj.shrake_rupley(trajectory, mode = 'residue').sum(axis=1)
numpy.savetxt(traj_path+'.sasa', sasa)
if do_sasa_vmd and do_rgyr_vmd:
calculate_sasa_and_rgyr_with_vmd(traj_path,
'%s.%s.sasa'%(traj_path, vmd_selection.replace(" ","_")),
'%s.%s.rgyr'%(traj_path, vmd_selection.replace(" ","_")),
vmd_selection)
if do_sasa_vmd and not do_rgyr_vmd:
calculate_sasa_with_vmd(traj_path,
'%s.%s.sasa'%(traj_path, vmd_selection.replace(" ","_")),
vmd_selection)
if do_rgyr_vmd and not do_sasa_vmd:
calculate_rgyr_with_vmd(traj_path,
'%s.%s.rgyr'%(traj_path, vmd_selection.replace(" ","_")),
vmd_selection)
if do_rgyr:
print "Calculating Radius of Gyration ..."
if trajectory is None:
trajectory = mdtraj.load(traj_path)
rgyr = mdtraj.compute_rg(trajectory)
numpy.savetxt(traj_path+'.rgyr', rgyr)
if trajectory is not None:
del trajectory
if do_rmsf:
print "Calculating RMSF ..."
# data, _ = load_all_pdbs_ca([{
# "source": traj_path,
# "base_selection":"name CA"
# }])
# rmsf_array = rmsf(data.structure_ensemble)
import prody
print "Loading structure..."
pdb = prody.proteins.pdbfile.parsePDB(traj_path, subset='ca')
print "Calculating ..."
rmsf_array = ca_rmsf(pdb)
numpy.savetxt(traj_path+'.rmsf', rmsf_array)
if data is not None:
del data
if report_pattern != "":
print "Extracting acceptance and energies from report files with pattern %s inside %s ..."%(report_pattern,
report_dir)
files = glob.glob(os.path.join(report_dir, report_pattern))
assert len(files)!=0, "No report file with pattern %s found inside %s"%(report_pattern,
report_dir)
all_accepted = []
all_total = []
all_energies = []
for report_file in files:
total, accepted, energies = process_report_file(report_file)
all_total.append(total)
all_accepted.append(accepted)
all_energies.append(list(energies))
total = numpy.sum(all_total)
accepted = numpy.sum(all_accepted)
acceptance = accepted / total
numpy.savetxt(traj_path+'.acc', [acceptance], fmt = "%.4f ")
energy_handler = open(traj_path+'.ener', "w")
for i in range(len(all_energies)):
for j in range(len(all_energies[i])):
energy_handler.write("%f\n"%all_energies[i][j])
energy_handler.write("###\n")
if disp_logfile != "":
handler = open(disp_logfile)
fractions = []
for line in handler:
if line[0:4] == "DBG:":
if "iterations performed" in line:
parts = line.split()
fractions.append(float(parts[1])/float(parts[3]))
numpy.savetxt(traj_path+'.frac', [numpy.mean(fractions)], fmt = "%.4f ")
handler.close()
def process_report_file(report_file):
handler = open(report_file)
lines = handler.readlines()
header = lines[0].split()
total_steps_index = header.index("Step")
accepted_steps_index = header.index("AcceptedSteps")
energy_index = header.index("Energy")
handler.close()
values = numpy.loadtxt(report_file,comments = "#").T
return values[total_steps_index][-1], values[accepted_steps_index][-1], values[energy_index]
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-i", dest="traj_path")
parser.add_option("--sasa", dest="do_sasa", action="store_true", default=False)
parser.add_option("--sasa-vmd", dest="do_sasa_vmd", action="store_true", default=False)
parser.add_option("--vmd-sel", dest="vmd_sel", default="all")
parser.add_option("--rgyr", dest="do_rgyr", action="store_true", default=False)
parser.add_option("--rgyr-vmd", dest="do_rgyr_vmd", action="store_true", default=False)
parser.add_option("--rmsf", dest="do_rmsf", action="store_true", default=False)
parser.add_option("--report", dest="report", default="")
parser.add_option("--report-dir", dest="report_dir", default="")
parser.add_option("--disp", dest="disp", default="")
(options, args) = parser.parse_args()
assert options.traj_path is not None, "It is mandatory to choose a valid trajectory file (-i option)"
if (options.do_sasa or options.do_rgyr) and not mdtraj_accessible:
print "It was not possible to load the 'mdtraj' module. Using the '--sasa' or '--rgyr' options is not possible."
exit()
if options.do_rmsf and not anmhelpers_accessible:
print "It was not possible to load the 'anmichelpers' module. Using the '--rmsf' option is not possible."
exit()
_, ext = os.path.splitext(options.traj_path)
if ext == ".txt":
print "Reading from ", options.traj_path
for line in open(options.traj_path).readlines():
parts = line.rstrip('\r\n').split()
print "Processing ", parts[0], " ..."
if len(parts) == 2:
analyze_trajectory(parts[0],
options.do_sasa,
options.do_sasa_vmd,
options.do_rgyr,
options.do_rgyr_vmd,
options.vmd_sel,
options.do_rmsf,
parts[1],
parts[2],
options.disp)
else:
analyze_trajectory(parts[0],
options.do_sasa,
options.do_sasa_vmd,
options.do_rgyr,
options.do_rgyr_vmd,
options.vmd_sel,
options.do_rmsf,
options.report,
options.report_dir,
options.disp)
else:
analyze_trajectory(options.traj_path,
options.do_sasa,
options.do_sasa_vmd,
options.do_rgyr,
options.do_rgyr_vmd,
options.vmd_sel,
options.do_rmsf,
options.report,
options.report_dir,
options.disp)
|
victor-gil-sepulveda/PhD-ANMPythonHelpers
|
trajectory_comparison/trajectory_full_analysis.py
|
Python
|
mit
| 10,635
|
[
"MDTraj",
"VMD"
] |
d2d76df4a054fb720919ea7b91a67349d3d279a5a6492c9b5d519d9223addfa2
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
"""
Gaussian Naive Bayes classification.
This checks that GaussianNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
"""Test whether class priors are properly set. """
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
"""Test whether class priors are properly set. """
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
"""Test Multinomial Naive Bayes classification.
This checks that MultinomialNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
"""Test picklability of discrete naive Bayes classifiers"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
"""Test input checks for the fit method"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
"""Test discrete NB classes' probability scores"""
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
"""Test whether discrete NB classes fit a uniform prior
when fit_prior=False and class_prior=None"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
"""Test whether discrete NB classes use provided prior"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
"""Test whether discrete NB classes use provided prior
when using partial_fit"""
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
"""coef_ and intercept_ should have shapes as in other linear models.
Non-regression test for issue #2127.
"""
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
"""Test for issue #4268.
Tests that the feature log prob value computed by BernoulliNB when
alpha=1.0 is equal to the expression given in Manning, Raghavan,
and Schuetze's "Introduction to Information Retrieval" book:
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
"""
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
"""
Tests that BernoulliNB when alpha=1.0 gives the same values as
those given for the toy example in Manning, Raghavan, and
Schuetze's "Introduction to Information Retrieval" book:
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
"""
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
|
mblondel/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
Python
|
bsd-3-clause
| 17,567
|
[
"Gaussian"
] |
edcc24c305473c079e2eb7e8709108ee3a89670925cb510d56fb43769004d30f
|
"""Dataset generation for simulation.
"""
from __future__ import division, absolute_import
import abc
from copy import deepcopy
import numpy as np
from scipy.stats import pearsonr
from bcn.bias import BiasLowRank, BiasUnconstrained
from bcn.redundant_signal import RedundantSignal
from bcn.missing import Missing
def estimate_partial_signal_characterists(mixed, correlation_threshold, true_pairs=None, true_directions=None, true_stds=None, true_correlations=None):
"""Estimate correlations, pairs, directions and strandard deviations from a corrupted signal.
Parameters
----------
mixed : numpy.ndarray, shape=(n_samples, n_features)
The bias corrupted low-rank matrix from which the bias is to be recovered.
correlation_threshold : float
The threshold to use when estimating pairs from a correlation matrix (the higher the fewer pairs).
true_pairs : dict, values=('space' : numpy.ndarray, elements=int, shape=(n, 2))
Sequence of true pairs given as tuples for both spaces in a dict.
true_directions : dict, values=('space' : numpy.ndarray, elements=int, len=n)
Sequence of true directions, e.g. -1, +1 for both spaces in a dict.
true_stds : dict, values=('space' : numpy.ndarray, elements=int, shape=(n, 2))
Sequence of true standard deviations of each pair for both spaces in a dict.
true_correlations : dict, values=('space' : numpy.ndarray, shape=(n_samples, n_samples) or shape=(n_features, n_features))
True correlation matrices for both spaces in a dict.
Returns
-------
estimates : dict
Dictionary of estimated signal characteristics.
"""
estimates = {'feature': {'mixed': mixed.T, 'shape': mixed.T.shape},
'sample': {'mixed': mixed, 'shape': mixed.shape}}
for space in ['feature', 'sample']:
if true_correlations is not None:
estimates[space]['estimated_correlations'] = true_correlations[space]
else:
estimates[space]['estimated_correlations'] = estimate_correlations(
estimates[space]['mixed'])
if true_pairs is not None:
estimates[space]['estimated_pairs'] = true_pairs[space]
else:
estimates[space]['estimated_pairs'] = estimate_pairs(
estimates[space]['estimated_correlations'], correlation_threshold)
if true_stds is not None:
estimates[space]['estimated_stds'] = true_stds[space]
else:
estimates[space]['estimated_stds'] = estimate_stds(
estimates[space]['mixed'], estimates[space]['estimated_pairs'])
if true_directions is not None:
estimates[space]['estimated_directions'] = true_directions[space]
else:
estimates[space]['estimated_directions'] = estimate_directions(
estimates[space]['estimated_correlations'], estimates[space]['estimated_pairs'])
return estimates
def transpose_view(X, space):
"""Transpose of input matrix if required.
Parameters
----------
X : numpy.ndarray, shape=(n_samples, n_features)
A matrix that may need to be transposed (view only).
space : str, values=('sample', 'feature')
The space the matrix should be for (determines if transpossed or not).
Returns
-------
X_transpose : numpy.ndarray, shape=(n_features, n_samples) or shape=(n_samples, n_features)
Possibly transposed inpute matrix X.
"""
if space == 'feature':
X_transpose = X.T
if space == 'sample':
X_transpose = X
return X_transpose
def opposite(space):
"""Convert to opposite dimension.
Parameters
----------
space : str, values=('feature', 'sample')
Dimension.
Returns
-------
return : str, values=('feature', 'sample')
Dimension.
"""
if space == 'feature':
return 'sample'
if space == 'sample':
return 'feature'
def estimate_pairs(correlations, threshold=0.8):
"""Estimate pairs from a correlation matrix.
Parameters
----------
correlations : numpy.ndarray, shape=(n_samples, n_samples)
A correlation matrix. Can contain numpy.nan values.
threshold : float
The threshold below which correlations are not considered as pairs.
Returns
-------
pairs : numpy.ndarray, shape=(<= n_samples, 2)
A sequence of pairs which contain the indices of samples that are strongly correlated (as determined by the threshold).
"""
correlations = np.nan_to_num(correlations)
correlations[np.absolute(correlations) < threshold] = 0
pairs = np.vstack(np.nonzero(np.tril(correlations, -1))).T
indices = np.arange(len(pairs))
np.random.shuffle(indices)
pairs = np.asarray(pairs)
pairs = pairs[indices]
return pairs
def estimate_correlations(mixed):
"""Estimate correlations from a `mixed` matrix.
Parameters
----------
mixed : numpy.ndarray, shape=(n_samples, n_features)
A matrix that requires bias removal. Can contain numpy.nan values.
Returns
-------
correlations : numpy.ndarray, shape=(n_samples, n_samples)
"""
correlations = np.zeros((mixed.shape[0], mixed.shape[0])) * np.nan
for i, a in enumerate(mixed):
bool_indices_a = np.isfinite(a)
for j, b in enumerate(mixed):
if i == j:
correlations[i, j] = 1
else:
bool_indices_b = np.isfinite(b)
bool_indices = np.logical_and(bool_indices_a, bool_indices_b)
if np.sum(bool_indices) < 3:
continue
else:
r = pearsonr(a[bool_indices], b[bool_indices])[0]
correlations[i, j] = r
return correlations
def estimate_directions(correlations, pairs):
"""Estimate directions from a correlation matrix for specific pairs.
Parameters
----------
correlations : numpy.ndarray, shape=(n_samples, n_samples)
A correlation matrix. Can contain nan values.
pairs : numpy.ndarray, shape=(< n_samples, 2)
A sequence of pairs which contain the indices of samples that are strongly correlated.
Returns
-------
directions : numpy.ndarray, shape=(< n_samples)
A sequence of -1 or +1 which indicates the direction of the correlation (e.g. anti or normal).
"""
directions = np.sign(correlations[pairs[:, 0], pairs[:, 1]])
return directions
def estimate_stds(mixed, pairs):
"""Estimate standard deviations from a mixed` matrix for specific pairs.
Parameters
----------
mixed : numpy.ndarray, shape=(n_samples, n_features)
A matrix that requires bias removal. Can contain numpy.nan values.
pairs : numpy.ndarray, shape=(< n_samples, 2)
A sequence of pairs which contain the indices of samples that are strongly correlated.
Returns
-------
stds : numpy.ndarray, shape=(< n_samples)
A sequence of estimated standard deviations.
"""
stds = []
for pair in pairs:
bool_indices_a = np.isfinite(mixed[pair[0]])
std_a = np.std(mixed[pair[0]][bool_indices_a])
# NOTE No need to check because there would be no pair if there were not 3 overlapping finite values for the pair (see estimate correlations).
if np.sum(bool_indices_a) < 3:
std_a = np.nan
bool_indices_b = np.isfinite(mixed[pair[1]])
std_b = np.std(mixed[pair[1]][bool_indices_b])
if np.sum(bool_indices_b) < 3:
std_b = np.nan
stds.append([std_a, std_b])
stds = np.vstack(stds)
return stds
def random_permutation(shape):
"""
Random permutation of a matrix in both feature and sample space.
Parameters
----------
shape = tuple of int
Shape of the matrix to be permuted.
Returns
-------
d : dict, elements=dict
Mapping from old indices to new indices.
inverse : dict, elements=dict
Mapping from new indices to old indices.
"""
a = np.arange(shape[0], dtype=int)
b = np.arange(shape[1], dtype=int)
new_a = np.random.permutation(shape[0])
new_b = np.random.permutation(shape[1])
d = {'feature': dict(zip(b, new_b)), 'sample': dict(zip(a, new_a))}
inverse = {'feature': dict(zip(new_b, b)), 'sample': dict(zip(new_a, a))}
return d, inverse
def shuffle_matrix(matrix, d_sample, d_feature=None):
"""
Shuffle a matrix in feature and sample space.
Parameters
----------
matrix : numpy.ndarray, shape=(n_samples, n_features) or shape=(n_features, n_samples)
Matrix to be shuffled.
d_sample : dict
How to shuffle.
d_feature : dict
How to shuffle.
Returns
-------
new_matrix : numpy.ndarray, shape=(n_samples, n_features) or shape=(n_features, n_samples)
Shuffled matrix.
"""
if d_feature is None:
d_feature = d_sample
x_indices = np.asarray([d_sample[i] for i in xrange(matrix.shape[0])])
y_indices = np.asarray([d_feature[i] for i in xrange(matrix.shape[1])])
new_matrix = matrix[x_indices]
new_matrix = new_matrix[:, y_indices]
return new_matrix
def shuffle_pairs(pairs, d):
"""
Shuffle pairs with a given mapping.
Parameters
----------
pairs : numpy.ndarray, shape=(n ,2)
Old pairs.
d : dict
Mapping for the shuffle.
"""
new_pairs = np.zeros_like(pairs, dtype=int)
for i in xrange(pairs.shape[0]):
for j in xrange(pairs.shape[1]):
new_pairs[i, j] = d[pairs[i, j]]
return new_pairs
class DataSimulated(object):
def __init__(self, shape, rank, bias_model='gaussian', m_blocks_size=2, noise_amplitude=1.0, correlation_strength=1.0, missing_type='MAR', missing_fraction=0.1, image_source='../../tests/trump.png'):
"""Creates (simulates) and stores all the data of a bias recovery experiment.
Parameters
----------
shape : tuple of int
Shape of the mixed, signal, bias and missing matrix in the form of (n_samples, n_features).
rank : int
Rank of the low-rank decomposition.
bias_model : str
Bias model to be used.
m_blocks_size : int, default = 2
Size of each block (e.g. number of pairs). Factor to determine the number of blocks in the correlation matix of features or samples that are varying together (with differences only in degree, direction and scale). Fewer blocks are better for bias recovery.
noise_amplitude : float, default = None
Scale/amptitude of the bias (noise).
correlation_strength : float
Strength of all correlations in block matrix.
missing_type : {'MAR', 'NMAR', 'no-missing'}
The type if missing values, from none to censored.
missing_fraction : float
Percentage of missing values in missing matrix.
image_source : str
Path to the image used as bias.
"""
self.shape = shape
self.rank = rank
self.bias_model = bias_model
self.m_blocks_size = m_blocks_size
self.noise_amplitude = noise_amplitude
self.correlation_strength = correlation_strength
self.missing_type = missing_type
self.missing_fraction = missing_fraction
self.image_source = image_source
self.d = {'sample': {}, 'feature': {}}
# NOTE using the sample space to determine the m_blocks here.
m_blocks = self.shape[0] // self.m_blocks_size
# BiasUnconstrained(self.shape, bias_model='gaussian', noise_amplitude=1.0).generate()
bias_unshuffled = BiasLowRank(self.shape, self.rank, bias_model=self.bias_model,
noise_amplitude=self.noise_amplitude, image_source=self.image_source).generate()
self.map_forward_bias, self.map_backward_bias = random_permutation(
bias_unshuffled['X'].shape)
bias = shuffle_matrix(
bias_unshuffled['X'], self.map_forward_bias['sample'], self.map_forward_bias['feature'])
missing = Missing(self.shape, self.missing_type,
p_random=self.missing_fraction).generate()['X']
signal_unshuffled = RedundantSignal(
self.shape, 'random', m_blocks, self.correlation_strength).generate()
self.map_forward, self.map_backward = random_permutation(
signal_unshuffled['X'].shape)
signal = shuffle_matrix(
signal_unshuffled['X'], self.map_forward['sample'], self.map_forward['feature'])
mixed = signal + bias + missing
for space in ['sample', 'feature']:
self.d[space]['mixed'] = transpose_view(mixed, space)
self.d[space]['shape'] = self.d[space]['mixed'].shape
self.d[space]['signal_unshuffled'] = transpose_view(
signal_unshuffled['X'], space)
self.d[space]['signal'] = transpose_view(signal, space)
self.d[space]['true_missing'] = transpose_view(missing, space)
self.d[space]['true_bias_unshuffled'] = transpose_view(
bias_unshuffled['X'], space)
self.d[space]['true_bias'] = transpose_view(bias, space)
self.d[space]['true_correlations_unshuffled'] = signal_unshuffled[space]['correlation_matrix']
self.d[space]['true_correlations'] = shuffle_matrix(
signal_unshuffled[space]['correlation_matrix'], self.map_forward[space])
self.d[space]['true_pairs_unshuffled'] = signal_unshuffled[space]['pairs']
self.d[space]['true_pairs'] = shuffle_pairs(
signal_unshuffled[space]['pairs'], self.map_backward[space])
self.d[space]['true_stds'] = signal_unshuffled[space]['stds'][signal_unshuffled[space]['pairs']]
self.d[space]['true_directions'] = signal_unshuffled[space]['directions']
|
a378ec99/bcn
|
bcn/data.py
|
Python
|
mit
| 13,971
|
[
"Gaussian"
] |
83d5f9640dbcc61c03e8d754a4411195e158c2791e8b0a8e181655fdbfb7c9f1
|
from .config import *
from .compute_lst import compute_lst
def compute_lsrk(wf):
""" Computes the LSR in km/s
Computes the Local standard of rest kinematic using the time (MJD),
RA and DEC of the observation to compute along with the telescope location.
Requires pyslalib
Args:
wf (bl.Waterfall): Waterfall object for which to compute LSR
"""
ra = Angle(wf.header['src_raj'], unit='hourangle')
dec = Angle(wf.header['src_dej'], unit='degree')
mjdd = wf.header['tstart']
rarad = ra.to('radian').value
dcrad = dec.to('radian').value
last = compute_lst(wf)
tellat = np.deg2rad(wf.coords[0])
tellong = np.deg2rad(wf.coords[1])
# convert star position to vector
starvect = s.sla_dcs2c(rarad, dcrad)
# velocity component in ra,dec due to Earth rotation
Rgeo = s.sla_rverot(tellat, rarad, dcrad, last)
# get Barycentric and heliocentric velocity and position of the Earth.
evp = s.sla_evp(mjdd, 2000.0)
dvb = evp[0] # barycentric velocity vector, in AU/sec
dpb = evp[1] # barycentric position vector, in AU
dvh = evp[2] # heliocentric velocity vector, in AU/sec
dph = evp[3] # heliocentric position vector, in AU
# dot product of vector to object and heliocentric velocity
# convert AU/sec to km/sec
vcorhelio = -s.sla_dvdv(starvect, dvh) * 149.597870e6
vcorbary = -s.sla_dvdv(starvect, dvb) * 149.597870e6
# rvlsrd is velocity component in ra,dec direction due to the Sun's
# motion with respect to the "dynamical" local standard of rest
rvlsrd = s.sla_rvlsrd(rarad, dcrad)
# rvlsrk is velocity component in ra,dec direction due to i
# the Sun's motion w.r.t the "kinematic" local standard of rest
rvlsrk = s.sla_rvlsrk(rarad, dcrad)
# rvgalc is velocity component in ra,dec direction due to
# the rotation of the Galaxy.
rvgalc = s.sla_rvgalc(rarad, dcrad)
totalhelio = Rgeo + vcorhelio
totalbary = Rgeo + vcorbary
totallsrk = totalhelio + rvlsrk
totalgal = totalbary + rvlsrd + rvgalc
return totallsrk
|
UCBerkeleySETI/blimpy
|
blimpy/ephemeris/compute_lsrk.py
|
Python
|
bsd-3-clause
| 2,089
|
[
"Galaxy"
] |
a5eb40418ee44bf09c57e9916b298b41ad31359ebfa98fe5877b322273cbc543
|
# This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import numpy as np
import pandas as pd
from scipy.signal import correlate
import lsst.utils.tests
from lsst.afw import image as afwImage
from lsst.daf import butler as dafButler
from lsst.ts.wep.task.EstimateZernikesScienceSensorTask import (
EstimateZernikesScienceSensorTask,
EstimateZernikesScienceSensorTaskConfig,
)
from lsst.ts.wep.Utility import (
getModulePath,
runProgram,
DefocalType,
writePipetaskCmd,
writeCleanUpRepoCmd,
)
class TestEstimateZernikesScienceSensorTask(lsst.utils.tests.TestCase):
@classmethod
def setUpClass(cls):
"""
Run the pipeline only once since it takes a
couple minutes with the ISR.
"""
moduleDir = getModulePath()
testDataDir = os.path.join(moduleDir, "tests", "testData")
testPipelineConfigDir = os.path.join(testDataDir, "pipelineConfigs")
cls.repoDir = os.path.join(testDataDir, "gen3TestRepo")
cls.runName = "run1"
# Check that run doesn't already exist due to previous improper cleanup
butler = dafButler.Butler(cls.repoDir)
registry = butler.registry
collectionsList = list(registry.queryCollections())
if cls.runName in collectionsList:
cleanUpCmd = writeCleanUpRepoCmd(cls.repoDir, cls.runName)
runProgram(cleanUpCmd)
# Point to the collections for the reference catalogs,
# the raw images and the camera model in the calib directory
# that comes from `butler write-curated-calibrations`.
collections = "refcats,LSSTCam/calib,LSSTCam/raw/all"
instrument = "lsst.obs.lsst.LsstCam"
cls.cameraName = "LSSTCam"
pipelineYaml = os.path.join(testPipelineConfigDir, "testFamPipeline.yaml")
pipeCmd = writePipetaskCmd(
cls.repoDir, cls.runName, instrument, collections, pipelineYaml=pipelineYaml
)
pipeCmd += " -d 'exposure IN (4021123106001, 4021123106002)'"
runProgram(pipeCmd)
def setUp(self):
self.config = EstimateZernikesScienceSensorTaskConfig()
self.task = EstimateZernikesScienceSensorTask(config=self.config)
self.butler = dafButler.Butler(self.repoDir)
self.registry = self.butler.registry
self.dataIdExtra = {
"instrument": "LSSTCam",
"detector": 94,
"exposure": 4021123106001,
"visit": 4021123106001,
}
self.dataIdIntra = {
"instrument": "LSSTCam",
"detector": 94,
"exposure": 4021123106002,
"visit": 4021123106002,
}
def _generateTestExposures(self):
# Generate donut template
template = self.task.getTemplate("R22_S11", DefocalType.Extra)
correlatedImage = correlate(template, template)
maxIdx = np.argmax(correlatedImage)
maxLoc = np.unravel_index(maxIdx, np.shape(correlatedImage))
templateCenter = np.array(maxLoc) - self.task.donutTemplateSize / 2
# Make donut centered in exposure
initCutoutSize = (
self.task.donutTemplateSize + self.task.initialCutoutPadding * 2
)
centeredArr = np.zeros((initCutoutSize, initCutoutSize), dtype=np.float32)
centeredArr[
self.task.initialCutoutPadding : -self.task.initialCutoutPadding,
self.task.initialCutoutPadding : -self.task.initialCutoutPadding,
] += template
centeredImage = afwImage.ImageF(initCutoutSize, initCutoutSize)
centeredImage.array = centeredArr
centeredExp = afwImage.ExposureF(initCutoutSize, initCutoutSize)
centeredExp.setImage(centeredImage)
centerCoord = (
self.task.initialCutoutPadding + templateCenter[1],
self.task.initialCutoutPadding + templateCenter[0],
)
# Make new donut that needs to be shifted by 20 pixels
# from the edge of the exposure
offCenterArr = np.zeros((initCutoutSize, initCutoutSize), dtype=np.float32)
offCenterArr[
: self.task.donutTemplateSize - 20, : self.task.donutTemplateSize - 20
] = template[20:, 20:]
offCenterImage = afwImage.ImageF(initCutoutSize, initCutoutSize)
offCenterImage.array = offCenterArr
offCenterExp = afwImage.ExposureF(initCutoutSize, initCutoutSize)
offCenterExp.setImage(offCenterImage)
# Center coord value 20 pixels closer than template center
# due to stamp overrunning the edge of the exposure.
offCenterCoord = templateCenter - 20
return centeredExp, centerCoord, template, offCenterExp, offCenterCoord
def testValidateConfigs(self):
self.config.donutTemplateSize = 120
self.config.donutStampSize = 120
self.config.initialCutoutPadding = 290
self.task = EstimateZernikesScienceSensorTask(config=self.config)
self.assertEqual(self.task.donutTemplateSize, 120)
self.assertEqual(self.task.donutStampSize, 120)
self.assertEqual(self.task.initialCutoutPadding, 290)
def testAssignExtraIntraIdx(self):
focusZNegative = -1
focusZPositive = 1
focusZ0 = 0
extraIdx, intraIdx = self.task.assignExtraIntraIdx(
focusZNegative, focusZPositive
)
self.assertEqual(extraIdx, 1)
self.assertEqual(intraIdx, 0)
extraIdx, intraIdx = self.task.assignExtraIntraIdx(
focusZPositive, focusZNegative
)
self.assertEqual(extraIdx, 0)
self.assertEqual(intraIdx, 1)
with self.assertRaises(ValueError):
self.task.assignExtraIntraIdx(focusZPositive, focusZPositive)
with self.assertRaises(ValueError):
self.task.assignExtraIntraIdx(focusZPositive, focusZ0)
with self.assertRaises(ValueError):
self.task.assignExtraIntraIdx(focusZNegative, focusZNegative)
with self.assertRaises(ValueError):
self.task.assignExtraIntraIdx(focusZNegative, focusZ0)
with self.assertRaises(ValueError) as context:
self.task.assignExtraIntraIdx(focusZ0, focusZPositive)
self.assertEqual(
"Must have one extra-focal and one intra-focal image.",
str(context.exception),
)
def testTaskRun(self):
# Grab two exposures from the same detector at two different visits to
# get extra and intra
exposureExtra = self.butler.get(
"postISRCCD", dataId=self.dataIdExtra, collections=[self.runName]
)
exposureIntra = self.butler.get(
"postISRCCD", dataId=self.dataIdIntra, collections=[self.runName]
)
donutCatalogExtra = self.butler.get(
"donutCatalog", dataId=self.dataIdExtra, collections=[self.runName]
)
donutCatalogIntra = self.butler.get(
"donutCatalog", dataId=self.dataIdIntra, collections=[self.runName]
)
camera = self.butler.get(
"camera",
dataId={"instrument": "LSSTCam"},
collections="LSSTCam/calib/unbounded",
)
# Test return values when no sources in catalog
noSrcDonutCatalog = pd.DataFrame(columns=donutCatalogExtra.columns)
testOutNoSrc = self.task.run(
[exposureExtra, exposureIntra], [noSrcDonutCatalog] * 2, camera
)
np.testing.assert_array_equal(
testOutNoSrc.outputZernikesRaw, [np.ones(19) * np.nan] * 2
)
np.testing.assert_array_equal(
testOutNoSrc.outputZernikesAvg, [np.ones(19) * np.nan] * 2
)
self.assertEqual(len(testOutNoSrc.donutStampsExtra[0]), 0)
self.assertEqual(len(testOutNoSrc.donutStampsIntra[1]), 0)
# Test normal behavior
taskOut = self.task.run(
[exposureIntra, exposureExtra],
[donutCatalogExtra, donutCatalogIntra],
camera,
)
testExtraStamps = self.task.cutOutStamps(
exposureExtra, donutCatalogExtra, DefocalType.Extra, camera.getName()
)
testIntraStamps = self.task.cutOutStamps(
exposureIntra, donutCatalogIntra, DefocalType.Intra, camera.getName()
)
for donutStamp, cutOutStamp in zip(
taskOut.donutStampsExtra[0], testExtraStamps
):
self.assertMaskedImagesAlmostEqual(
donutStamp.stamp_im, cutOutStamp.stamp_im
)
for donutStamp, cutOutStamp in zip(
taskOut.donutStampsIntra[1], testIntraStamps
):
self.assertMaskedImagesAlmostEqual(
donutStamp.stamp_im, cutOutStamp.stamp_im
)
testCoeffsRaw = self.task.estimateZernikes(testExtraStamps, testIntraStamps)
testCoeffsAvg = self.task.combineZernikes.run(testCoeffsRaw)
np.testing.assert_array_equal(taskOut.outputZernikesRaw[0], testCoeffsRaw)
np.testing.assert_array_equal(
taskOut.outputZernikesAvg[0], testCoeffsAvg.combinedZernikes
)
@classmethod
def tearDownClass(cls):
cleanUpCmd = writeCleanUpRepoCmd(cls.repoDir, cls.runName)
runProgram(cleanUpCmd)
|
lsst-ts/ts_wep
|
tests/task/test_estimateZernikesScienceSensorTask.py
|
Python
|
gpl-3.0
| 10,126
|
[
"VisIt"
] |
59bd3159186e51b35afb58a4240a3bc53a728639d6b392b2b02ce9ad8b9c2976
|
"""
Message Queue Handler
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import json
import logging
import socket
from DIRAC.Resources.MessageQueue.MQCommunication import createProducer
class MessageQueueHandler(logging.Handler):
"""
MessageQueueHandler is a custom handler from logging.
It has no equivalent in the standard logging library because it is linked to DIRAC.
It is useful to send log messages to a destination, like the StreamHandler to a stream, the FileHandler to a file.
Here, this handler send log messages to a message queue server.
There is an assumption made that the formatter used is JsonFormatter
"""
def __init__(self, queue):
"""
Initialization of the MessageQueueHandler.
:param queue: queue identifier in the configuration.
example: "mardirac3.in2p3.fr::Queues::TestQueue"
"""
super(MessageQueueHandler, self).__init__()
self.producer = None
result = createProducer(queue)
if result['OK']:
self.producer = result['Value']
else:
# print because logging not available
print("ERROR initializing MessageQueueHandler: %s" % result)
self.hostname = socket.gethostname()
def emit(self, record):
"""
Add the record to the message queue.
:param record: log record object
"""
# add the hostname to the record
record.hostname = self.hostname
strRecord = self.format(record)
if self.producer is not None:
self.producer.put(json.loads(strRecord))
|
yujikato/DIRAC
|
src/DIRAC/FrameworkSystem/private/standardLogging/Handler/MessageQueueHandler.py
|
Python
|
gpl-3.0
| 1,589
|
[
"DIRAC"
] |
e17b62e4d70c3ccf2156307484137d171373bad0a05b49aeab619f96d6b47a84
|
#!/usr/bin/env python
"""
Runs a simulation under NVT conditions.
Outputs a portable state (.xml) file with positions and velocities,
to allow restarting and/or continuation.
.2019. joaor@stanford.edu
"""
from __future__ import print_function, division
import argparse
import logging
import math
import os
import random
import re
import sys
import numpy as np
import simtk.openmm.app as app
import simtk.openmm as mm
import simtk.unit as units
import _utils
import _restraints
# Format logger
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
##
# Parse user input and options
ap = argparse.ArgumentParser(description=__doc__)
# Mandatory
ap.add_argument('structure', help='Input coordinate file (.cif)')
# Options
ap.add_argument('--output', type=str, default=None,
help='Root name for output files. Default is input file name.')
ap.add_argument('--forcefield', type=str, default='amber14-all.xml',
help='Force field to build the system with (XML format).')
ap.add_argument('--solvent', type=str, default='amber14/tip3p.xml',
help='Solvent model to use in minimization (XML format).')
ap.add_argument('--xyz-frequency', dest='xyz_freq', type=int, default=None,
help='Frequency (number of steps) to write coordinates.')
ap.add_argument('--log-frequency', dest='log_freq', type=int, default=None,
help='Frequency (number of steps) to log run parameters.')
ap.add_argument('--platform', type=str, default=None,
choices=('OpenCL', 'CUDA', 'CPU', 'Reference'),
help='Platform to run calculations on.')
ap.add_argument('--state', type=str,
help='Checkpoint/XML file to read positions/velocities from.')
ap.add_argument('--seed', type=int, default=917,
help='Seed number for random number generator(s).')
ap.add_argument('--temperature', default=310, type=float,
help='Target temperature, in Kelvin. Default is 310.')
ap.add_argument('--runtime', default=5, type=float,
help='Simulation length in nanoseconds. Default 5.')
ap.add_argument('--continuation', action='store_true',
help='Reads elapsed simulation time from checkpoint/state files.')
ap.add_argument('--restraint-heavy-atom', action='store_true', default=False,
help='Apply position restraints to non-solvent heavy atoms')
ap.add_argument('--restraint-lipids', action='store_true', default=False,
help='Apply position restraints to lipid head groups')
ap.add_argument('--restraint-heavy-atom-k', default=500, type=int,
help='Force constant for heavy atom restraints. Default: 500')
ap.add_argument('--restraint-lipids-k', default=500, type=int,
help='Force constant for lipid restraints. Default: 500')
ap.add_argument('--hmr', action='store_true', default=False,
help='Use Hydrogen Mass Repartitioning.')
cmd = ap.parse_args()
logging.info('Started')
# Set random seed for reproducibility
random.seed(cmd.seed)
# Figure out platform
platform, plat_properties = _utils.get_platform(cmd.platform)
logging.info('Simulation Details:')
logging.info(f' random seed : {cmd.seed}')
logging.info(f' structure : {cmd.structure}')
logging.info(f' force field : {cmd.forcefield}')
logging.info(f' solvent model: {cmd.solvent}')
logging.info(f' temperature : {cmd.temperature} K')
logging.info(f' runtime : {cmd.runtime} ns')
logging.info(f' heavy-atom restraints : {cmd.restraint_heavy_atom}')
if cmd.restraint_heavy_atom:
logging.info(f' K = {cmd.restraint_heavy_atom_k} kJ/mol/nm^2')
logging.info(f' lipid restraints : {cmd.restraint_lipids}')
if cmd.restraint_lipids:
logging.info(f' K = {cmd.restraint_lipids_k} kJ/mol/nm^2')
logging.info(f' HMR : {cmd.hmr}')
# Make rootname for output files
basename = os.path.basename(cmd.structure)
fname, fext = os.path.splitext(basename)
if cmd.output is None:
rootname = fname + '_EqNVT'
else:
rootname = cmd.output
# Read in structure data and setup OpenMM system
structure = app.PDBxFile(cmd.structure)
# Remove dummy atoms (mass 0) just in case
model = app.Modeller(structure.topology, structure.positions)
dummy_idx = [a for a in model.topology.atoms() if a.element is None]
n_dummies = len(dummy_idx)
if n_dummies:
logging.info(f'Removing {n_dummies} dummy atoms from input')
model.delete(dummy_idx)
structure.topology = model.topology
structure.positions = model.positions
forcefield = app.ForceField(cmd.forcefield, cmd.solvent)
md_temp = cmd.temperature * units.kelvin
md_step = 2.0*units.femtosecond
md_fric = 1.0/units.picosecond
md_nbct = 1.0*units.nanometer
md_hamu = None
md_cstr = app.HBonds
if cmd.hmr: # adapt for HMR if necessary
md_step *= 2.5 # make 5 fs
md_hamu = 4*units.amu
md_cstr = app.AllBonds
# Build system & integrator
logging.info('Setting up system and integrator')
system = forcefield.createSystem(structure.topology, nonbondedMethod=app.PME,
nonbondedCutoff=md_nbct,
constraints=md_cstr,
hydrogenMass=md_hamu,
ewaldErrorTolerance=0.0005,
rigidWater=True)
# Setup integrator and temperature coupling
integrator = mm.LangevinIntegrator(md_temp, md_fric, md_step)
integrator.setRandomNumberSeed(cmd.seed)
integrator.setConstraintTolerance(0.00001)
# Restrain heavy atoms
if cmd.restraint_heavy_atom:
# force = _restraints.make_heavy_atom_restraints(structure,
# cmd.restraint_heavy_atom_k)
force = _restraints.make_heavy_atom_restraints_v2(system, structure,
cmd.restraint_heavy_atom_k)
system.addForce(force)
# Restrain lipid headgroups in Z
if cmd.restraint_lipids:
# force = _restraints.make_lipid_restraints(structure,
# cmd.restraint_lipids_k)
force = _restraints.make_lipid_restraints_v2(system, structure,
cmd.restraint_lipids_k)
system.addForce(force)
# Setup simulation
simulation = app.Simulation(structure.topology, system, integrator,
platform, plat_properties)
simulation.context.setPositions(structure.positions)
simulation.context.setVelocitiesToTemperature(md_temp)
# Load checkpoint/state file
if cmd.state:
if cmd.state.endswith('.xml'): # is XML state file
logging.info(f'Loading XML state file: {cmd.state}')
simulation.loadState(cmd.state)
logging.info(f' resetting simulation time')
simulation.context.setTime(0.0) # resets simulation time
cmd.runtime = cmd.runtime * units.nanosecond
elif cmd.state.endswith('.cpt'): # is binary checkpoint
logging.info(f'Loading binary checkpoint file: {cmd.state}')
simulation.loadCheckpoint(cmd.state)
if cmd.continuation:
# Adjust remaining running time
run_time = simulation.context.getState().getTime()
run_time_val = run_time.value_in_unit(units.nanosecond)
logging.info(f' {run_time_val:8.2f}/{cmd.runtime:8.2f} ns completed')
expected_t = cmd.runtime * units.nanosecond
cmd.runtime = (expected_t - run_time).in_units_of(units.nanosecond)
else: # restart from 0
simulation.context.setTime(0.0)
cmd.runtime = cmd.runtime * units.nanosecond
else:
raise Exception(f'State file format not recognized: {cmd.state}')
else:
cmd.runtime = cmd.runtime * units.nanosecond
# Assert we actually have to run something.
if cmd.runtime <= 0.00001 * units.nanosecond:
logging.info('Equilibration completed. Apparently. Maybe ask for more?')
logging.info('Finished')
sys.exit(0)
# Setup writer/logger frequencies
# Default: 0.01 ns
if cmd.hmr:
# Time step is 5 fs
xyz_freq = cmd.xyz_freq if cmd.xyz_freq is not None else 2000
log_freq = cmd.log_freq if cmd.log_freq is not None else 2000
else:
# Time step is 2 fs
xyz_freq = cmd.xyz_freq if cmd.xyz_freq is not None else 5000
log_freq = cmd.log_freq if cmd.log_freq is not None else 5000
# Calculate total simulation length in steps
n_steps = math.ceil(cmd.runtime / md_step.in_units_of(units.nanoseconds))
# n_steps is dimensionless (ns/ns)
# Setup Reporters
dcd_fname = _utils.make_fname(rootname + '.dcd')
cpt_fname = _utils.make_fname(rootname + '.cpt')
log_fname = _utils.make_fname(rootname + '.log')
dcd = app.DCDReporter(dcd_fname, xyz_freq)
cpt = app.CheckpointReporter(cpt_fname, xyz_freq)
state = app.StateDataReporter(log_fname, log_freq,
step=True,
time=True,
potentialEnergy=True,
kineticEnergy=True,
temperature=True,
progress=True,
remainingTime=True,
totalSteps=n_steps,
speed=True,
separator='\t')
simulation.reporters.append(dcd)
simulation.reporters.append(cpt)
simulation.reporters.append(state)
logging.info(f'Writing coordinates to \'{dcd_fname}\'')
logging.info(f'Writing checkpoint file to \'{cpt_fname}\'')
logging.info(f'Writing simulation log to \'{log_fname}\'')
# Run simulation
simulation.step(n_steps)
# Write state file (without restraining forces)
xml_fname = _utils.make_fname(rootname + '.xml')
logging.info(f'Writing state file to \'{xml_fname}\'')
system = simulation.system
n_rest_forces = sum([cmd.restraint_heavy_atom, cmd.restraint_lipids])
while n_rest_forces:
system.removeForce(system.getNumForces() - 1)
n_rest_forces -= 1
# Reinitialize context. Keep velocities, positions.
state = simulation.context.getState(getPositions=True, getVelocities=True)
vx, vy, vz = state.getPeriodicBoxVectors()
xyz, vel = state.getPositions(), state.getVelocities()
simulation.context.reinitialize(preserveState=False)
simulation.context.setPositions(xyz)
simulation.context.setVelocities(vel)
simulation.context.setPeriodicBoxVectors(vx, vy, vz)
simulation.saveState(xml_fname)
# Write last frame as mmCIF
cif_fname = _utils.make_fname(rootname + '.cif')
logging.info(f'Writing final structure to \'{cif_fname}\'')
with open(cif_fname, 'w') as handle:
app.PDBxFile.writeFile(structure.topology, xyz, handle, keepIds=True)
# Write system without dummy atoms
# Easier to redo system object
# and set positions/velocities manually.
model = app.Modeller(structure.topology, structure.positions)
dummy = [c for c in model.topology.chains() if c.id.startswith('DUM')]
model.delete(dummy) # delete entire chains
n_ini_atoms = model.topology.getNumAtoms()
logging.info('Writing system without dummy (restraint) atoms')
system = forcefield.createSystem(model.topology, nonbondedMethod=app.PME,
nonbondedCutoff=md_nbct,
constraints=md_cstr,
hydrogenMass=md_hamu,
ewaldErrorTolerance=0.0005,
rigidWater=True)
integrator = mm.LangevinIntegrator(md_temp, md_fric, md_step)
simulation = app.Simulation(model.topology, system, integrator)
simulation.context.setPositions(xyz[:n_ini_atoms])
simulation.context.setVelocities(vel[:n_ini_atoms])
simulation.context.setPeriodicBoxVectors(vx, vy, vz)
xml_fname = _utils.make_fname(rootname + '_noDUM' + '.xml')
logging.info(f'Writing dummy-less state to \'{xml_fname}\'')
simulation.saveState(xml_fname)
cpt_fname = _utils.make_fname(rootname + '_noDUM' + '.cpt')
logging.info(f'Writing dummy-less checkpoint to \'{cpt_fname}\'')
simulation.saveState(cpt_fname)
# Write last frame as mmCIF
cif_fname = _utils.make_fname(rootname + '_noDUM' + '.cif')
logging.info(f'Writing dummy-less structure to \'{cif_fname}\'')
with open(cif_fname, 'w') as handle:
app.PDBxFile.writeFile(model.topology, xyz[:n_ini_atoms], handle, keepIds=True)
logging.info('Finished')
|
csblab/md_scripts
|
openmm/amberff/equilibrate_NVT.py
|
Python
|
mit
| 12,419
|
[
"OpenMM"
] |
9ad0a52ddc8a98ded07448404a1164c356324345d0651417ba7e99353edbff65
|
from __future__ import with_statement
__author__ = 'Tom Schaul, tom@idsia.ch; Justin Bayer, bayerj@in.tum.de'
import gc
import pickle
import logging
import threading
import os
import operator
from itertools import count
from math import sqrt
from random import random, choice
from string import split
from scipy import where, array, exp, zeros, size, mat, median
# file extension for load/save protocol mapping
known_extensions = {
'mat': 'matlab',
'txt': 'ascii',
'svm': 'libsvm',
'pkl': 'pickle',
'nc' : 'netcdf' }
def abstractMethod():
""" This should be called when an abstract method is called that should have been
implemented by a subclass. It should not be called in situations where no implementation
(i.e. a 'pass' behavior) is acceptable. """
raise NotImplementedError('Method not implemented!')
def drawIndex(probs, tolerant=False):
""" Draws an index given an array of probabilities.
:key tolerant: if set to True, the array is normalized to sum to 1. """
if not sum(probs) < 1.00001 or not sum(probs) > 0.99999:
if tolerant:
probs /= sum(probs)
else:
print probs, 1 - sum(probs)
raise ValueError()
r = random()
s = 0
for i, p in enumerate(probs):
s += p
if s > r:
return i
return choice(range(len(probs)))
def drawGibbs(vals, temperature=1.):
""" Return the index of the sample drawn by a softmax (Gibbs). """
if temperature == 0:
# randomly pick one of the values with the max value.
m = max(vals)
best = []
for i, v in enumerate(vals):
if v == m:
best.append(i)
return choice(best)
else:
temp = vals / temperature
# make sure we keep the exponential bounded (between +20 and -20)
temp += 20 - max(temp)
if min(temp) < -20:
for i, v in enumerate(temp):
if v < -20:
temp[i] = -20
temp = exp(temp)
temp /= sum(temp)
return drawIndex(temp)
def iterCombinations(tup):
""" all possible of integer tuples of the same dimension than tup, and each component being
positive and strictly inferior to the corresponding entry in tup. """
if len(tup) == 1:
for i in range(tup[0]):
yield (i,)
elif len(tup) > 1:
for prefix in iterCombinations(tup[:-1]):
for i in range(tup[-1]):
yield tuple(list(prefix) + [i])
def setAllArgs(obj, argdict):
""" set all those internal variables which have the same name than an entry in the
given object's dictionary.
This function can be useful for quick initializations. """
xmlstore = isinstance(obj, XMLBuildable)
for n in argdict.keys():
if hasattr(obj, n):
setattr(obj, n, argdict[n])
if xmlstore:
obj.argdict[n] = argdict[n]
else:
print 'Warning: parameter name', n, 'not found!'
if xmlstore:
if not hasattr(obj, '_unknown_argdict'):
obj._unknown_argdict = {}
obj._unknown_argdict[n] = argdict[n]
def linscale(d, lim):
""" utility function to linearly scale array d to the interval defined by lim """
return (d - d.min())*(lim[1] - lim[0]) + lim[0]
def percentError(out, true):
""" return percentage of mismatch between out and target values (lists and arrays accepted) """
arrout = array(out).flatten()
wrong = where(arrout != array(true).flatten())[0].size
return 100. * float(wrong) / float(arrout.size)
def formatFromExtension(fname):
"""Tries to infer a protocol from the file extension."""
_base, ext = os.path.splitext(fname)
if not ext:
return None
try:
format = known_extensions[ext.replace('.', '')]
except KeyError:
format = None
return format
class XMLBuildable(object):
""" subclasses of this can be losslessly stored in XML, and
automatically reconstructed on reading. For this they need to store
their construction arguments in the variable <argdict>. """
argdict = None
def setArgs(self, **argdict):
if not self.argdict:
self.argdict = {}
setAllArgs(self, argdict)
class Serializable(object):
"""Class that implements shortcuts to serialize an object.
Serialization is done by various formats. At the moment, only 'pickle' is
supported.
"""
def saveToFileLike(self, flo, format=None, **kwargs):
"""Save the object to a given file like object in the given format.
"""
format = 'pickle' if format is None else format
save = getattr(self, "save_%s" % format, None)
if save is None:
raise ValueError("Unknown format '%s'." % format)
save(flo, **kwargs)
@classmethod
def loadFromFileLike(cls, flo, format=None):
"""Load the object to a given file like object with the given protocol.
"""
format = 'pickle' if format is None else format
load = getattr(cls, "load_%s" % format, None)
if load is None:
raise ValueError("Unknown format '%s'." % format)
return load(flo)
def saveToFile(self, filename, format=None, **kwargs):
"""Save the object to file given by filename."""
if format is None:
# try to derive protocol from file extension
format = formatFromExtension(filename)
with file(filename, 'wb') as fp:
self.saveToFileLike(fp, format, **kwargs)
@classmethod
def loadFromFile(cls, filename, format=None):
"""Return an instance of the class that is saved in the file with the
given filename in the specified format."""
if format is None:
# try to derive protocol from file extension
format = formatFromExtension(filename)
with file(filename, 'rbU') as fp:
obj = cls.loadFromFileLike(fp, format)
obj.filename = filename
return obj
def save_pickle(self, flo, protocol=0):
pickle.dump(self, flo, protocol)
@classmethod
def load_pickle(cls, flo):
return pickle.load(flo)
class Named(XMLBuildable):
"""Class whose objects are guaranteed to have a unique name."""
_nameIds = count(0)
def getName(self):
logging.warning("Deprecated, use .name property instead.")
return self.name
def setName(self, newname):
logging.warning("Deprecated, use .name property instead.")
self.name = newname
def _getName(self):
"""Returns the name, which is generated if it has not been already."""
if self._name is None:
self._name = self._generateName()
return self._name
def _setName(self, newname):
"""Change name to newname. Uniqueness is not guaranteed anymore."""
self._name = newname
_name = None
name = property(_getName, _setName)
def _generateName(self):
"""Return a unique name for this object."""
return "%s-%i" % (self.__class__.__name__, self._nameIds.next())
def __repr__(self):
""" The default representation of a named object is its name. """
return "<%s '%s'>" % (self.__class__.__name__, self.name)
def fListToString(a_list, a_precision=3):
""" Returns a string representing a list of floats with a given precision """
s_list = ", ".join(("%g" % round(x, a_precision)).ljust(a_precision+3)
for x in a_list)
return "[%s]" % s_list
def tupleRemoveItem(tup, index):
""" remove the item at position index of the tuple and return a new tuple. """
l = list(tup)
return tuple(l[:index] + l[index + 1:])
def confidenceIntervalSize(stdev, nbsamples):
""" Determine the size of the confidence interval, given the standard deviation and the number of samples.
t-test-percentile: 97.5%, infinitely many degrees of freedom,
therefore on the two-sided interval: 95% """
# CHECKME: for better precision, maybe get the percentile dynamically, from the scipy library?
return 2 * 1.98 * stdev / sqrt(nbsamples)
def trace(func):
def inner(*args, **kwargs):
print "%s: %s, %s" % (func.__name__, args, kwargs)
return func(*args, **kwargs)
return inner
def threaded(callback=lambda * args, **kwargs: None, daemonic=False):
"""Decorate a function to run in its own thread and report the result
by calling callback with it."""
def innerDecorator(func):
def inner(*args, **kwargs):
target = lambda: callback(func(*args, **kwargs))
t = threading.Thread(target=target)
t.setDaemon(daemonic)
t.start()
return inner
return innerDecorator
def garbagecollect(func):
"""Decorate a function to invoke the garbage collector after each execution.
"""
def inner(*args, **kwargs):
result = func(*args, **kwargs)
gc.collect()
return result
return inner
def memoize(func):
"""Decorate a function to 'memoize' results by holding it in a cache that
maps call arguments to returns."""
cache = {}
def inner(*args, **kwargs):
# Dictionaries and lists are unhashable
args = tuple(args)
# Make a set for checking in the cache, since the order of
# .iteritems() is undefined
kwargs_set = frozenset(kwargs.iteritems())
if (args, kwargs_set) in cache:
result = cache[args, kwargs_set]
else:
result = func(*args, **kwargs)
cache[args, kwargs_set] = result
return result
return inner
def storeCallResults(obj, verbose=False):
"""Pseudo-decorate an object to store all evaluations of the function in the returned list."""
results = []
oldcall = obj.__class__.__call__
def newcall(*args, **kwargs):
result = oldcall(*args, **kwargs)
results.append(result)
if verbose:
print result
return result
obj.__class__.__call__ = newcall
return results
def multiEvaluate(repeat):
"""Decorate a function to evaluate repeatedly with the same arguments, and return the average result """
def decorator(func):
def inner(*args, **kwargs):
result = 0.
for dummy in range(repeat):
result += func(*args, **kwargs)
return result / repeat
return inner
return decorator
def _import(name):
"""Return module from a package.
These two are equivalent:
> from package import module as bar
> bar = _import('package.module')
"""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
try:
mod = getattr(mod, comp)
except AttributeError:
raise ImportError("No module named %s" % mod)
return mod
# tools for binary Gray code manipulation:
def int2gray(i):
""" Returns the value of an integer in Gray encoding."""
return i ^ (i >> 1)
def gray2int(g, size):
""" Transforms a Gray code back into an integer. """
res = 0
for i in reversed(range(size)):
gi = (g >> i) % 2
if i == size - 1:
bi = gi
else:
bi = bi ^ gi
res += bi * 2 ** i
return res
def asBinary(i):
""" Produces a string from an integer's binary representation.
(preceding zeros removed). """
if i > 1:
if i % 2 == 1:
return asBinary(i >> 1) + '1'
else:
return asBinary(i >> 1) + '0'
else:
return str(i)
def one_to_n(val, maxval):
""" Returns a 1-in-n binary encoding of a non-negative integer. """
a = zeros(maxval, float)
a[val] = 1.
return a
def n_to_one(arr):
""" Returns the reverse of a 1-in-n binary encoding. """
return where(arr == 1)[0][0]
def canonicClassString(x):
""" the __class__ attribute changed from old-style to new-style classes... """
if isinstance(x, object):
return split(repr(x.__class__), "'")[1]
else:
return repr(x.__class__)
def decrementAny(tup):
""" the closest tuples to tup: decrementing by 1 along any dimension.
Never go into negatives though. """
res = []
for i, x in enumerate(tup):
if x > 0:
res.append(tuple(list(tup[:i]) + [x - 1] + list(tup[i + 1:])))
return res
def reachable(stepFunction, start, destinations, _alreadyseen=None):
""" Determines the subset of destinations that can be reached from a set of starting positions,
while using stepFunction (which produces a list of neighbor states) to navigate.
Uses breadth-first search.
Returns a dictionary with reachable destinations and their distances.
"""
if len(start) == 0 or len(destinations) == 0:
return {}
if _alreadyseen is None:
_alreadyseen = []
_alreadyseen.extend(start)
# dict with distances to destinations
res = {}
for s in start:
if s in destinations:
res[s] = 0
start.remove(s)
# do one step
new = set()
for s in start:
new.update(stepFunction(s))
new.difference_update(_alreadyseen)
ndestinations = list(destinations)
for s in list(new):
if s in destinations:
res[s] = 1
new.remove(s)
ndestinations.remove(s)
_alreadyseen.append(s)
# recursively do the rest
deeper = reachable(stepFunction, new, ndestinations, _alreadyseen)
# adjust distances
for k, val in deeper.items():
res[k] = val + 1
return res
def flood(stepFunction, fullSet, initSet, relevant=None):
""" Returns a list of elements of fullSet linked to some element of initSet
through the neighborhood-setFunction (which must be defined on all elements of fullSet).
:key relevant: (optional) list of relevant elements: stop once all relevant elements are found.
"""
if fullSet is None:
flooded = set(initSet)
else:
full = set(fullSet)
flooded = full.intersection(set(initSet))
if relevant is None:
relevant = full.copy()
if relevant:
relevant = set(relevant)
change = flooded.copy()
while len(change)>0:
new = set()
for m in change:
if fullSet is None:
new.update(stepFunction(m))
else:
new.update(full.intersection(stepFunction(m)))
change = new.difference(flooded)
flooded.update(change)
if relevant is not None and relevant.issubset(flooded):
break
return list(flooded)
def crossproduct(ss, row=None, level=0):
"""Returns the cross-product of the sets given in `ss`."""
if row is None:
row = []
if len(ss) > 1:
return reduce(operator.add,
[crossproduct(ss[1:], row + [i], level + 1) for i in ss[0]])
else:
return [row + [i] for i in ss[0]]
def permute(arr, permutation):
"""Return an array like arr but with elements permuted.
Only the first dimension is permuted, which makes it possible to permute
blocks of the input.
arr can be anything as long as it's indexable."""
return array([arr[i] for i in permutation])
def permuteToBlocks(arr, blockshape):
"""Permute an array so that it consists of linearized blocks.
Example: A two-dimensional array of the form
0 1 2 3
4 5 6 7
8 9 10 11
12 13 14 15
would be turned into an array like this with (2, 2) blocks:
0 1 4 5 2 3 6 7 8 9 12 13 10 11 14 15
"""
if len(blockshape) < 2:
raise ValueError("Need more than one dimension.")
elif len(blockshape) == 2:
blockheight, blockwidth = blockshape
return permuteToBlocks2d(arr, blockheight, blockwidth)
elif len(blockshape) == 3:
blockdepth, blockheight, blockwidth = blockshape
return permuteToBlocks3d(arr, blockdepth, blockheight, blockwidth)
else:
raise NotImplementedError("Only for dimensions 2 and 3.")
def permuteToBlocks3d(arr, blockdepth, blockheight, blockwidth):
depth, height, width = arr.shape
arr_ = arr.reshape(height * depth, width)
arr_ = permuteToBlocks2d(arr_, blockheight, blockwidth)
arr_.shape = depth, height * width
return permuteToBlocks2d(arr_, blockdepth, blockwidth * blockheight)
def permuteToBlocks2d(arr, blockheight, blockwidth):
_height, width = arr.shape
arr = arr.flatten()
new = zeros(size(arr))
for i in xrange(size(arr)):
blockx = (i % width) / blockwidth
blocky = i / width / blockheight
blockoffset = blocky * width / blockwidth + blockx
blockoffset *= blockwidth * blockheight
inblockx = i % blockwidth
inblocky = (i / width) % blockheight
j = blockoffset + inblocky * blockwidth + inblockx
new[j] = arr[i]
return new
def triu2flat(m):
""" Flattens an upper triangular matrix, returning a vector of the
non-zero elements. """
dim = m.shape[0]
res = zeros(dim * (dim + 1) / 2)
index = 0
for row in range(dim):
res[index:index + dim - row] = m[row, row:]
index += dim - row
return res
def flat2triu(a, dim):
""" Produces an upper triangular matrix of dimension dim from the elements of the given vector. """
res = zeros((dim, dim))
index = 0
for row in range(dim):
res[row, row:] = a[index:index + dim - row]
index += dim - row
return res
def blockList2Matrix(l):
""" Converts a list of matrices into a corresponding big block-diagonal one. """
dims = [m.shape[0] for m in l]
s = sum(dims)
res = zeros((s, s))
index = 0
for i in range(len(l)):
d = dims[i]
m = l[i]
res[index:index + d, index:index + d] = m
index += d
return res
def blockCombine(l):
""" Produce a matrix from a list of lists of its components. """
l = [map(mat, row) for row in l]
hdims = [m.shape[1] for m in l[0]]
hs = sum(hdims)
vdims = [row[0].shape[0] for row in l]
vs = sum(vdims)
res = zeros((hs, vs))
vindex = 0
for i, row in enumerate(l):
hindex = 0
for j, m in enumerate(row):
res[vindex:vindex + vdims[i], hindex:hindex + hdims[j]] = m
hindex += hdims[j]
vindex += vdims[i]
return res
def avgFoundAfter(decreasingTargetValues, listsOfActualValues, batchSize=1, useMedian=False):
""" Determine the average number of steps to reach a certain value (for the first time),
given a list of value sequences.
If a value is not always encountered, the length of the longest sequence is used.
Returns an array. """
from scipy import sum
numLists = len(listsOfActualValues)
longest = max(map(len, listsOfActualValues))
# gather a list of indices of first encounters
res = [[0] for _ in range(numLists)]
for tval in decreasingTargetValues:
for li, l in enumerate(listsOfActualValues):
lres = res[li]
found = False
for i in range(lres[-1], len(l)):
if l[i] <= tval:
lres.append(i)
found = True
break
if not found:
lres.append(longest)
tmp = array(res)
if useMedian:
resx = median(tmp, axis=0)[1:]
else:
resx = sum(tmp, axis=0)[1:] / float(numLists)
return resx * batchSize
class DivergenceError(Exception):
""" Raised when an algorithm diverges. """
def matchingDict(d, selection, require_existence=False):
""" Determines if the dictionary d conforms to the specified selection,
i.e. if a (key, x) is in the selection, then if key is in d as well it must be x
or contained in x (if x is a list). """
for k, v in selection.items():
if k in d:
if isinstance(v, list):
if d[k] not in v:
return False
else:
if d[k] != v:
return False
elif require_existence:
return False
return True
def subDict(d, allowedkeys, flip=False):
""" Returns a new dictionary with a subset of the entries of d
that have on of the (dis-)allowed keys."""
res = {}
for k, v in d.items():
if (k in allowedkeys) ^ flip:
res[k] = v
return res
def dictCombinations(listdict):
""" Iterates over dictionaries that go through every possible combination
of key-value pairs as specified in the lists of values for each key in listdict."""
listdict = listdict.copy()
if len(listdict) == 0:
return [{}]
k, vs = listdict.popitem()
res = dictCombinations(listdict)
if isinstance(vs, list) or isinstance(vs, tuple):
res = [dict(d, **{k:v}) for d in res for v in sorted(set(vs))]
else:
res = [dict(d, **{k:vs}) for d in res]
return res
def r_argmax(v):
""" Acts like scipy argmax, but break ties randomly. """
if len(v) == 1:
return 0
maxbid = max(v)
maxbidders = [i for (i, b) in enumerate(v) if b==maxbid]
return choice(maxbidders)
def all_argmax(x):
""" Return the indices of all values that are equal to the maximum: no breaking ties. """
m = max(x)
return [i for i, v in enumerate(x) if v == m]
def dense_orth(dim):
""" Constructs a dense orthogonal matrix. """
from scipy import rand
from scipy.linalg import orth
return orth(rand(dim, dim))
def sparse_orth(d):
""" Constructs a sparse orthogonal matrix.
The method is described in:
Gi-Sang Cheon et al., Constructions for the sparsest orthogonal matrices,
Bull. Korean Math. Soc 36 (1999) No.1 pp.199-129
"""
from scipy.sparse import eye
from scipy import r_, pi, sin, cos
if d%2 == 0:
seq = r_[0:d:2,1:d-1:2]
else:
seq = r_[0:d-1:2,1:d:2]
Q = eye(d,d).tocsc()
for i in seq:
theta = random() * 2 * pi
flip = (random() - 0.5)>0;
Qi = eye(d,d).tocsc()
Qi[i,i] = cos(theta)
Qi[(i+1),i] = sin(theta)
if flip > 0:
Qi[i,(i+1)] = -sin(theta)
Qi[(i+1),(i+1)] = cos(theta)
else:
Qi[i,(i+1)] = sin(theta)
Qi[(i+1),(i+1)] = -cos(theta)
Q = Q*Qi;
return Q
def xhash(arr):
""" Hashing function for arrays. Use with care. """
import hashlib
return hashlib.sha1(arr).hexdigest()
def binArr2int(arr):
""" Convert a binary array into its (long) integer representation. """
from numpy import packbits
tmp2 = packbits(arr.astype(int))
return sum(val * 256 ** i for i, val in enumerate(tmp2[::-1]))
def uniqueArrays(vs):
""" create a set of arrays """
resdic = {}
for v in vs:
resdic[xhash(v)] = v
return resdic.values()
def seedit(seed=0):
""" Fixed seed makes for repeatability, but there may be two different
random number generators involved. """
import random
import numpy
random.seed(seed)
numpy.random.seed(seed)
|
fxsjy/pybrain
|
pybrain/utilities.py
|
Python
|
bsd-3-clause
| 23,298
|
[
"NetCDF"
] |
40727179af4098573798c34752607b97b7f5a017261313ae46ef11e2a073663a
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import h5py
from plotstuff import params, colours
reb = params()
cols = colours()
from gatspy.periodic import LombScargle
import sys
import multiprocessing as mp
from multiprocessing import Pool
from GProtation import make_plot, lnprob, Gprob, neglnlike
import emcee
import time
import george
from george.kernels import ExpSquaredKernel, ExpSine2Kernel
import scipy.optimize as spo
def periodograms(id, x, y, yerr, path, plot=False, savepgram=False):
"""
takes id of the star, returns an array of period measurements and saves the
results.
id: star id.
x, y, yerr: time, flux and error arrays.
path: path where you want to save the output.
"""
ps = np.linspace(2, 100, 1000)
model = LombScargle().fit(x, y, yerr)
pgram = model.periodogram(ps)
# find peaks
peaks = np.array([i for i in range(1, len(ps)-1) if pgram[i-1] <
pgram[i] and pgram[i+1] < pgram[i]])
if len(peaks):
period = ps[pgram==max(pgram[peaks])][0]
else: period = 0
if plot:
plt.clf()
plt.plot(ps, pgram)
plt.axvline(period, color="r")
plt.savefig("{0}/{1}_pgram".format(path, str(int(id)).zfill(4)))
if savepgram:
np.savetxt("{0}/{1}_pgram.txt".format(path, str(int(id)).zfill(4)),
np.transpose((ps, pgram)))
np.savetxt("{0}/{1}_pgram_result.txt".format(path, str(int(id)).zfill(4)),
np.ones(2).T*period)
return period
def recover_injections(id, x, y, yerr, path, burnin, run, nwalkers=32,
plot=True):
"""
Take x, y, yerr, calculate ACF period for initialisation and do MCMC.
npts: number of points per period.
id: star id.
x, y, yerr: time, flux and error arrays.
path: path where you want to save the output.
burnin: the number of burnin steps.
run: the number of steps to run for.
nwalkers: the number of walkers.
plot: if True then plots of posteriors and chains will be made.
"""
# initialise with pgram
try:
p_init = np.genfromtxt("{0}/{1}_pgramresult.txt".format(path, id))
except:
p_init = periodograms(id, x, y, yerr, path, plot=True)
if p_init < .5: # small periods raise an error with george.
p_init = 1.
# If using lnprob, plims = [pmin, pmax] for a uniform prior.
# If using Gprob, plims = [mu, sigma] for a Gaussian prior.
# plims = np.log([p_init - .5 * p_init, p_init + 2 * p_init])
plims = np.log([p_init - .5 * p_init, p_init + 2 * p_init])
# plims = np.log([p_init, p_init*.1]) # mean, sigma
print("Initial period and limits:", p_init, np.exp(plims))
# assign theta_init
theta_init = np.log([np.exp(-5), np.exp(7), np.exp(.6), np.exp(-16),
p_init])
print("\n", "log(theta_init) = ", theta_init)
print("theta_init = ", np.exp(theta_init), "\n")
# plot initialisation
t = np.exp(theta_init)
k = t[0] * ExpSquaredKernel(t[1]) * ExpSine2Kernel(t[2], t[3])
gp = george.GP(k)
gp.compute(x, yerr)
xs = np.linspace(x[0], x[-1], 1000)
mu, cov = gp.predict(y, xs)
plt.clf()
plt.errorbar(x, y, yerr=yerr, **reb)
plt.plot(xs, mu, color=cols.blue)
plt.savefig("{0}/{1}_init".format(path, id))
# set up MCMC
ndim, nwalkers = len(theta_init), nwalkers
p0 = [theta_init+1e-4*np.random.rand(ndim) for i in range(nwalkers)]
args = (x, y, yerr, plims)
# time the lhf call
start = time.time()
print("lnprob = ", lnprob(theta_init, x, y, yerr, plims))
# print("lnprob = ", Gprob(theta_init, x, y, yerr, plims))
end = time.time()
tm = end - start
print("1 lhf call takes ", tm, "seconds")
print("burn in will take", tm * nwalkers * burnin, "s")
print("run will take", tm * nwalkers * run, "s")
print("total = ", (tm*nwalkers*run + tm*nwalkers*burnin)/60, \
"mins,", (tm*nwalkers*run + tm*nwalkers*burnin)/3600, "hours")
# run MCMC
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=args)
# sampler = emcee.EnsembleSampler(nwalkers, ndim, Gprob, args=args)
print("burning in...")
start = time.time()
p0, lp, state = sampler.run_mcmc(p0, burnin)
sampler.reset()
print("production run...")
p0, lp, state = sampler.run_mcmc(p0, run)
end = time.time()
print("actual time = ", (end - start)/60, "mins")
# save samples
f = h5py.File("%s/%s_samples.h5" % (path, id), "w")
data = f.create_dataset("samples", np.shape(sampler.chain))
data[:, :] = np.array(sampler.chain)
f.close()
# make various plots
if plot:
with h5py.File("%s/%s_samples.h5" % (path, id), "r") as f:
samples = f["samples"][...]
mcmc_result = make_plot(samples, x, y, yerr, id, path, traces=True,
tri=True, prediction=True)
def acf_pgram_GP_LSST(id):
"""
Run acf, pgram and MCMC recovery on Suzanne's simulations
"""
id = str(int(id)).zfill(4)
path = "results" # where to save results
x, y, yerr = np.genfromtxt("simulations/{0}.txt".format(id)).T
yerr = np.ones_like(x) * .001
periodograms(id, x, y, yerr, path, plot=True) # pgram
burnin, run = 500, 1000
recover_injections(id, x, y, yerr, path, burnin, run, nwalkers=24,
plot=True)
if __name__ == "__main__":
# acf_pgram_GP_LSST(0)
ids = range(10)
pool = Pool()
pool.map(acf_pgram_GP_LSST, ids)
|
RuthAngus/LSST-max
|
code/recover_LSST.py
|
Python
|
mit
| 5,566
|
[
"Gaussian"
] |
fb1bdece3176cb3b657826478a13d6c53dd5c518705a0d28fefa07f7e6817511
|
import numpy as np
from unittest import TestCase
from diffprivlib.mechanisms import Gaussian
from diffprivlib.utils import global_seed
class TestGaussian(TestCase):
def setup_method(self, method):
if method.__name__ .endswith("prob"):
global_seed(314159)
self.mech = Gaussian
def teardown_method(self, method):
del self.mech
def test_class(self):
from diffprivlib.mechanisms import DPMechanism
self.assertTrue(issubclass(Gaussian, DPMechanism))
def test_zero_sensitivity(self):
mech = self.mech(epsilon=0.5, delta=0.1, sensitivity=0)
for i in range(1000):
self.assertAlmostEqual(mech.randomise(1), 1)
def test_zero_epsilon_delta(self):
self.assertRaises(ValueError, self.mech, epsilon=0, delta=0.1, sensitivity=1)
self.assertRaises(ValueError, self.mech, epsilon=0.5, delta=0, sensitivity=1)
def test_wrong_sensitivity(self):
with self.assertRaises(TypeError):
self.mech(epsilon=0.5, delta=0.1, sensitivity="1")
with self.assertRaises(ValueError):
self.mech(epsilon=0.5, delta=0.1, sensitivity=-1)
def test_large_epsilon(self):
with self.assertRaises(ValueError):
self.mech(epsilon=5, delta=0.1, sensitivity=1)
def test_complex_epsilon(self):
with self.assertRaises(TypeError):
self.mech(epsilon=0.5 + 0.2j, delta=0.1, sensitivity=1)
def test_string_epsilon(self):
with self.assertRaises(TypeError):
self.mech(epsilon="0.5", delta=0.1, sensitivity=1)
def test_non_numeric(self):
mech = self.mech(epsilon=0.5, delta=0.1, sensitivity=1)
with self.assertRaises(TypeError):
mech.randomise("Hello")
def test_zero_median_prob(self):
mech = self.mech(epsilon=0.75, delta=0.1, sensitivity=1)
vals = []
for i in range(20000):
vals.append(mech.randomise(0.5))
median = float(np.median(vals))
self.assertAlmostEqual(np.abs(median), 0.5, delta=0.1)
def test_neighbors_prob(self):
epsilon = 1
runs = 10000
mech = self.mech(epsilon=0.5, delta=0.1, sensitivity=1)
count = [0, 0]
for i in range(runs):
val0 = mech.randomise(0)
if val0 <= 0.5:
count[0] += 1
val1 = mech.randomise(1)
if val1 <= 0.5:
count[1] += 1
self.assertGreater(count[0], count[1])
self.assertLessEqual(count[0] / runs, np.exp(epsilon) * count[1] / runs + 0.1)
def test_repr(self):
repr_ = repr(self.mech(epsilon=0.5, delta=0.1, sensitivity=1))
self.assertIn(".Gaussian(", repr_)
def test_bias(self):
self.assertEqual(0.0, self.mech(epsilon=0.5, delta=0.1, sensitivity=1).bias(0))
def test_variance(self):
mech = self.mech(epsilon=0.5, delta=0.1, sensitivity=1)
self.assertGreater(mech.variance(0), 0.0)
|
IBM/differential-privacy-library
|
tests/mechanisms/test_Gaussian.py
|
Python
|
mit
| 2,994
|
[
"Gaussian"
] |
dc19559303e7460713e52ee2311406219827f28e09848be9713f942770a81d56
|
# GromacsWrapper environment.py
# Copyright (c) 2011 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
:mod:`gromacs.environment` -- Run time modification of behaviour
================================================================
Some aspects of GromacsWrapper can be determined globally. The
corresponding flags :class:`Flag` are set in the environment (think of
them like environment variables). They are accessible through the
pseudo-dictionary :data:`gromacs.environment.flags`.
The entries appear as 'name'-'value' pairs. Flags check values and illegal ones
raise a :exc:`ValueError`. Documentation on all flags can be obtained with ::
print gromacs.environment.flags.doc()
List of GromacsWrapper flags with default values
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: flagsDocs
Classes
~~~~~~~
.. data:: flags
.. autoclass:: Flags
:members:
.. autoclass:: Flag
:members:
"""
# Flags infrastructure taken from MDAnalysis.core.__init__ (same author ... :-) )
# set up flags for core routines (more convoluted than strictly necessary but should
# be clean to add more flags if needed)
class Flags(dict):
"""Global registry of flags. Acts like a dict for item access.
There are a number flags defined that influence how MDAnalysis behaves. They are
accessible through the pseudo-dictionary
:data:`gromacs.environment.flags`
The entries appear as 'name'-'value' pairs. Flags check values and illegal ones
raise a :exc:`ValueError`. Documentation on all flags can be obtained with ::
print gromacs.environment.flags.__doc__
New flags are added with the :meth:`Flags.register` method which takes a new :class:`Flag`
instance as an argument.
"""
def __init__(self,*args):
"""For **developers**: Initialize Flags registry with a *list* of :class:`Flag` instances."""
super(Flags,self).__init__([(flag.name,flag) for flag in args])
def get_flag(self,name):
return super(Flags,self).__getitem__(name)
def doc(self):
"""Shows doc strings for all flags."""
return "\n\n".join([flag.__doc__ for flag in self._itervalues()])
def register(self,flag):
"""Register a new :class:`Flag` instance with the Flags registry."""
super(Flags,self).__setitem__(flag.name,flag)
def update(self,*flags):
"""Update Flags registry with a list of :class:`Flag` instances."""
super(Flags,self).update([(flag.name,flag) for flag in flags])
def setdefault(self,k,d=None):
raise NotImplementedError
def __getitem__(self,name):
return self.get_flag(name).get()
def __setitem__(self,name,value):
self.get_flag(name).set(value)
def _itervalues(self):
return super(Flags,self).itervalues()
def _items(self):
return super(Flags,self).items()
def itervalues(self):
for flag in self._itervalues():
yield flag.value
def iteritems(self):
for flag in self._itervalues():
yield flag.name,flag.value
def values(self):
return [flag.value for flag in self._itervalues()]
def items(self):
return [(flag.name,flag.value) for flag in self._itervalues()]
def __repr__(self):
return str(self.items())
class FlagsDynamicDocs(Flags):
# docs are generated on the fly for interactive use; but because
# this does not work well with the sphinx documentation system
# ("AttributeError: 'property' object has no attribute
# 'expandtabs'") we split the class...
@property
def __doc__(self):
# generate dynamic docs on all flags
return self.doc()
class IdentityMapping(dict):
def __getitem__(self,key):
return key
class Flag(object):
"""A Flag, essentially a variable that knows its default and legal values."""
def __init__(self,name,default,mapping=None,doc=None):
"""Create a new flag which will be registered with FLags.
newflag = Flag(name,default,mapping,doc)
:Arguments:
*name*
name of the flag, must be a legal python name
*default*
default value
*mapping*
dict that maps allowed input values to canonical values;
if ``None`` then no argument checking will be performed and
all values are directly set.
*doc*
doc string; may contain string interpolation mappings for::
%%(name)s name of the flag
%%(default)r default value
%%(value)r current value
%%(mapping)r mapping
Doc strings are generated dynamically and reflect the current state.
"""
self.name = name
self.value = default
self.default = default
# {v1:v1,v2:v1,v3:v3, ...} mapping of allowed values to canonical ones
self.mapping = mapping or IdentityMapping()
self._doctemplate = "**%(name)s** = *%(value)r*\n" + (doc or "*undocumented flag*")
def get(self):
return self.value
def set(self,value):
if value is not None:
try:
self.value = self.mapping[value]
except KeyError:
raise ValueError("flag must be None or one of "+str(self.mapping.keys()))
return self.get()
def prop(self):
"""Use this for property(**flag.prop())"""
return {'fget':self.get, 'fset':self.set, 'doc':self.__doc__}
def __repr__(self):
return """Flag('{name!s}',{value!r})""".format(**self.__dict__)
class _Flag(Flag):
@property
def __doc__(self):
# generate dynamic docs with current values
return self._doctemplate % self.__dict__
_flags = [
_Flag('capture_output',
False,
{True: True,
False: False,
'file': 'file',
},
"""
Select if Gromacs command output is *always* captured.
>>> flags['%(name)s'] = %(value)r
By default a :class:`~gromacs.core.GromacsCommand` will
direct STDOUT and STDERR output from the command itself to
the screen (through /dev/stdout and /dev/stderr). When
running the command, this can be changed with the keywords
*stdout* and *stderr* as described in :mod:`gromacs.core`
and :class:`~gromacs.core.Command`.
If this flag is set to ``True`` then by default STDOUT and
STDERR are captured as if one had set ::
stdout=False, stderr=False
Explicitly setting *stdout* and/or *stderr* overrides the
behaviour described above.
If set to the special keyword ``"file"` then the command
writes to the file whose name is given by
``flags['capture_output_filename']``. This file is
*over-written* for each command. In this way one can
investigate the output from the last command (presumably
because it failed). STDOUT and STDERR are captured into
this file by default. STDERR is printed first and then
STDOUT, which does not necessarily reflect the order of
output one would see on the screen.
The default is %(default)r.
"""
),
_Flag('capture_output_filename',
'gromacs_captured_output.txt',
doc="""
Name of the file that captures output if ``flags['capture_output'] = "file"
>>> flags['%(name)s'] = %(value)r
This is an *experimental* feature. The default is %(default)r.
"""),
]
#: Global flag registry for :mod:`gromacs.environment`.
#: Can be accessed like a dictionary and appears to the casual user as such.
flags = FlagsDynamicDocs(*_flags)
del _flags
# only for sphinx docs
class flagsDocs(object):
__doc__ = flags.doc()
|
jandom/GromacsWrapper
|
gromacs/environment.py
|
Python
|
gpl-3.0
| 8,025
|
[
"Gromacs",
"MDAnalysis"
] |
310af7d6d79cfd17b4cab3787239ddc9a9cc3c15ee621a305fa8a7f53ca2bc2a
|
# es.po
val = {" days." : " días.",
"(all)" : "(todo)",
"(any)" : "(cualquier)",
"(anyone)" : "(cualquiera)",
"(available)" : "(disponible)",
"(blank)" : "(en blanco)",
"(both)" : "(ambos)",
"(everyone)" : "(todos)",
"(master user, not editable)" : "(usuario maestro, no editable)",
"(no change)" : "(sin cambios)",
"(no deduction)" : "(ningún descuento)",
"(none)" : "(nada)",
"(unknown)" : "(desconocido)",
"(use system)" : "(usa el sistema)",
"({0} given, {1} remaining)" : "({0}administradas, {1} pendientes)",
"1 treatment" : "1 tratamiento",
"1 week" : "1 semana",
"1 year" : "1 año",
"2 weeks" : "2 semanas",
"3 months" : "3 meses",
"4 weeks" : "4 semanas",
"5 Year" : "5 años",
"6 months" : "6 meses",
"6 weeks" : "6 semanas",
"8 weeks" : "8 semanas",
"9 months" : "9 meses",
"A (Stray Dog)" : "A (perro abandonado)",
"A description or other information about the animal" : "Descripción u otra información sobre el animal",
"A list of areas this person will homecheck - eg: S60 S61" : "Listado de zonas que esta persona revisará durante la visita a domicilio - ej: S60 S61",
"A movement must have a reservation date or type." : "Un movimiento debe tener una fecha o tipo de reserva.",
"A person is required for this movement type." : "Se necesita una persona para este tipo de movimiento.",
"A publish job is already running." : "Ya hay una tarea de publicación en proceso.",
"A short version of the reference number" : "Una versión corta del número de referencia",
"A task is already running." : "Todavía hay una tarea en curso.",
"A unique number to identify this movement" : "Referencia única para este movimiento",
"A unique reference for this litter" : "Referencia única para esta camada",
"A4" : "A4",
"ACO" : "ACO",
"AM" : "AM",
"ASM" : "ASM",
"ASM 3 is compatible with your iPad and other tablets." : "ASM 3 es compatible con su iPad y otras tabletas.",
"ASM News" : "Noticias de ASM",
"ASM can track detailed monthly and annual figures for your shelter. Install the Monthly Figures and Annual Figures reports from Settings-Reports-Browse sheltermanager.com" : "ASM puede mostrar las cifras mensuales y anuales de su refugio. Instale Mothly Figures and Annual Figures reports desde Settings-Reports-Browse sheltermanager.com",
"ASM comes with a dictionary of 4,000 animal names. Just click the generate random name button when adding an animal." : "ASM contiene un diccionario de 4.000 nombre de animales. Haga click sobre el botón generador automático cuando añada un animal.",
"ASM will remove this animal from the waiting list after a set number of weeks since the last owner contact date." : "ASM eliminará este animal de la lista de espera después de un número determinado de semanas desde la última fecha de contacto con el propietario.",
"Abandoned" : "Abandonado",
"Abuse" : "Maltrato",
"Abyssinian" : "Abisinio",
"Access System Menu" : "Acceder al menú del sistema",
"Account" : "Cuenta",
"Account Types" : "Tipos de cuenta",
"Account code '{0}' has already been used." : "El código de cuenta '{0}' ya ha sido usado.",
"Account code '{0}' is not valid." : "El código de cuenta '{0}' no es válido.",
"Account code cannot be blank." : "El código de cuenta no puede estar en blanco.",
"Account disabled." : "Cuenta deshabilitada.",
"Accountant" : "Contable",
"Accounts" : "Cuentas",
"Accounts need a code." : "Las cuentas necesitan un código.",
"Active" : "Activo",
"Active Incidents" : "Incidentes activos",
"Active Trap Loans" : "Préstamos de trampas activos",
"Active users: {0}" : "Usuarios activos: {0}",
"Add" : "Añadir",
"Add Accounts" : "Añadir cuentas",
"Add Animal" : "Añadir animal",
"Add Animals" : "Añadir animales",
"Add Appointment" : "Añadir una cita",
"Add Call" : "Añadir llamada",
"Add Citations" : "Añadir Citaciones",
"Add Clinic Appointment" : "Añadir una cita para la clínica",
"Add Cost" : "Añadir costes",
"Add Diary" : "Añadir agenda",
"Add Diets" : "Añadir dietas",
"Add Document to Repository" : "Añadir documento al repositorio",
"Add Flag" : "Añadir marca",
"Add Found Animal" : "Añadir animal encontrado",
"Add Incidents" : "Añadir Incidentes",
"Add Investigation" : "Añadir investigación",
"Add Invoice Item" : "Añade un concepto de la factura",
"Add Licenses" : "Añadir Licencias",
"Add Litter" : "Añadir camada",
"Add Log" : "Añadir registro",
"Add Log to Animal" : "Añadir registro al animal",
"Add Lost Animal" : "Añadir animal perdido",
"Add Media" : "Añadir media",
"Add Medical Records" : "Añadir historial clínico",
"Add Message" : "Añadir mensaje",
"Add Movement" : "Añadir movimientos",
"Add Payments" : "Añadir Pagos",
"Add Person" : "Añadir persona",
"Add Report" : "Añadir informe",
"Add Rota" : "Añadir turnos",
"Add Stock" : "Añadir existencias",
"Add Tests" : "Añadir pruebas",
"Add Transport" : "Añadir transporte",
"Add Trap Loans" : "Añadir préstamo de trampa",
"Add Users" : "Añadir usuarios",
"Add Vaccinations" : "Añadir vacunas",
"Add Vouchers" : "Añadir Vouchers",
"Add Waiting List" : "Añadir lista de espera",
"Add a diary note" : "Añadir una nota del diario",
"Add a found animal" : "Añadir un animal encontrado",
"Add a log entry" : "Añadir registro de entrada",
"Add a lost animal" : "Añadir un animal perdido",
"Add a medical regimen" : "Añadir un tratamiento médico",
"Add a new animal" : "Añadir nuevo animal",
"Add a new log" : "Añadir un nuevo registro",
"Add a new person" : "Añadir una nueva persona",
"Add a person" : "Añadir una persona",
"Add a photo" : "Agregar una foto",
"Add a test" : "Agregue una prueba",
"Add a vaccination" : "Añadir una vacuna",
"Add account" : "Añadir nueva cuenta",
"Add additional field" : "Agregar campo adicional",
"Add an animal to the waiting list" : "Añadir una animal a la lista de espera",
"Add citation" : "Añadir citación",
"Add cost" : "Añadir coste",
"Add details of this email to the log after sending" : "Añadir detalles de este e-mail para el registro después del envío",
"Add diary" : "Añadir diario",
"Add diary task" : "Añadir tarea de agenda",
"Add diet" : "Añadir dieta",
"Add extra images for use in reports and documents" : "Añadir imágenes adicionales para su uso en informes y documentos",
"Add form field" : "Añadir campo de formulario",
"Add found animal" : "Añadir animal encontrado",
"Add investigation" : "Añadir investigación",
"Add license" : "Añadir licencia",
"Add litter" : "Añadir camada",
"Add log" : "Añadir registro",
"Add lost animal" : "Agregar animal perdido",
"Add medical profile" : "Agregar perfil medico",
"Add medical regimen" : "Añadir tratamiento médico",
"Add message" : "Añadir mensaje",
"Add movement" : "Añadir movimiento",
"Add online form" : "Agregar formulario en línea",
"Add payment" : "Añadir pago",
"Add person" : "Añadir persona",
"Add report" : "Añadir informe",
"Add role" : "Añadir rol",
"Add rota item" : "Añadir al turno",
"Add stock" : "Añadir existencia",
"Add template" : "Agregar plantilla",
"Add test" : "Agregar prueba",
"Add this text to all animal descriptions" : "Añadir este texto a todas las descripciones de animales",
"Add to log" : "Añadir a registro",
"Add transport" : "Añadir transporte",
"Add trap loan" : "Agregar pago por préstamo de trampa",
"Add user" : "Añadir usuario",
"Add vaccination" : "Añadir vacuna",
"Add voucher" : "Añadir un vale",
"Add waiting list" : "Añadir lista de espera",
"Add {0}" : "Agregar {0}",
"Added" : "Añadido",
"Added by {0} on {1}" : "Añadido por {0} el {1}",
"Additional" : "Adicional",
"Additional Fields" : "Campos Adicionales",
"Additional date field '{0}' contains an invalid date." : "El campo de fecha adicional '(0)' contiene una fecha no válida.",
"Additional fields" : "Campos adicionales",
"Additional fields need a name, label and type." : "Los campos adicionales necesitan un nombre, etiqueta y tipo.",
"Address" : "Dirección",
"Address Contains" : "La dirección contiene",
"Address contains" : "La dirección contiene",
"Administered" : "Administrado",
"Administering Vet" : "Veterinario",
"Adopt" : "Adoptar",
"Adopt an animal" : "Adopta un animal",
"Adoptable" : "Adoptable",
"Adoptable Animal" : "Animal adoptable",
"Adoptable and published for the first time" : "Adoptable y publicado por primera vez",
"Adopted" : "Adoptado",
"Adopted Animals" : "Animales adoptados",
"Adopted Transferred In {0}" : "Adoptado y trasladado {0}",
"Adoption" : "Adopción",
"Adoption Coordinator" : "Coordinador adopciones",
"Adoption Coordinator and Fosterer" : "Coordinador de adopciones y casa de acogida",
"Adoption Event" : "En adopción",
"Adoption Fee" : "Tarifa de la adopción",
"Adoption Number" : "Número de adopción",
"Adoption fee donations" : "Tasa de donaciones de adopciones",
"Adoption movements must have a valid adoption date." : "Los movimientos de adopción deben tener una fecha de adopción válida.",
"Adoption successfully created." : "Adopción creada correctamente.",
"Adoptions {0}" : "Adopciones {0}",
"Adult" : "Adulto",
"Advanced" : "Avanzado",
"Advanced find animal screen defaults to on shelter" : "Ventana búsqueda avanzada de animales por defecto para el refugio",
"Affenpinscher" : "Affenpinscher",
"Afghan Hound" : "Lebrel Afgano",
"African Grey" : "Loro Gris Africano (Yaco)",
"After the user presses submit and ASM has accepted the form, redirect the user to this URL" : "Después de que el usuario presiona enviar y ASM ha aceptado la forma, redirigir al usuario a la URL",
"Age" : "Edad",
"Age Group" : "Grupo de edad",
"Age Group 1" : "Grupo de edad 1",
"Age Group 2" : "Grupo de edad 2",
"Age Group 3" : "Grupo de edad 3",
"Age Group 4" : "Grupo de edad 4",
"Age Group 5" : "Grupo de edad 5",
"Age Group 6" : "Grupo de edad 6",
"Age Group 7" : "Grupo de edad 7",
"Age Group 8" : "Grupo de edad 8",
"Age Groups" : "Grupos de edad",
"Age groups are assigned based on the age of an animal. The figure in the left column is the upper limit in years for that group." : "Los grupos de edad se asignan en base a la edad del animal. La cifra de la columna izquierda es el límite superior en años para el grupo.",
"Aged Between" : "De edad entre",
"Aged From" : "Desde la edad de",
"Aged To" : "Hasta la edad de",
"Aggression" : "Agresión",
"Airedale Terrier" : "Airedale Terrier",
"Akbash" : "Akbash",
"Akita" : "Akita",
"Alaskan Malamute" : "Malamute de Alaska",
"Alerts" : "Avisos",
"All Animals" : "Todos los animales",
"All On-Shelter Animals" : "Todos los animales del refugio",
"All Publishers" : "Todos los publicadores",
"All accounts" : "Todas las cuentas",
"All animal care officers on file." : "Todos los cuidadores de animales.",
"All animal shelters on file." : "Todos los refugios de animales en el archivo.",
"All animals matching current publishing options." : "Todos los animales que correspondan a las opciones de publicación actuales.",
"All animals on the shelter." : "Todos los animales del refugio.",
"All animals where the hold ends today." : "Todos los animales cuya retención acaba hoy.",
"All animals who are currently held in case of reclaim." : "Todos los animales que están retenidos actualmente por si son reclamados.",
"All animals who are currently quarantined." : "Todos los animales que están en cuarentena.",
"All animals who are flagged as not for adoption." : "Todos los animales que están marcados para no adopción.",
"All animals who have been on the shelter longer than {0} months." : "Todos los animales que llevan en el albergue más de {0} meses.",
"All animals who have not been microchipped" : "Todos los animales que no tienen microchip",
"All banned owners on file." : "Todos los propietarios bloqueados registrados.",
"All diary notes" : "Todas las notas del diario",
"All donors on file." : "Todos los donantes registrados.",
"All drivers on file." : "Todos los conductores registrados.",
"All existing data in your database will be REMOVED before importing the CSV file. This removal cannot be reversed." : "Todos los datos de la case de datos se ELIMINARÁN antes de importar el archivo CSV. Esta eliminación no se puede revocar.",
"All fields should be completed." : "Se deben completar todos los campos.",
"All fosterers on file." : "Todas las casas de acogida del archivo.",
"All homechecked owners on file." : "Todos los propietarios registrados con la visita a domicilio realizada.",
"All homecheckers on file." : "Todos los colaboradores registrados que realizan visitas a domicilio.",
"All members on file." : "Todos los miembros archivados.",
"All notes upto today" : "Todas las notas hasta hoy",
"All people on file." : "Todas las personas archivadas.",
"All retailers on file." : "Todos los minoristas archivados.",
"All staff on file." : "Todo el personal archivado.",
"All time" : "Todo el tiempo",
"All vets on file." : "Todos los veterinarios archivados.",
"All volunteers on file." : "Todos los voluntarios archivados.",
"Allergies" : "Alergias",
"Allow a fosterer to be selected" : "Permitir que se elija una casa de acogida",
"Allow an adoption coordinator to be selected" : "Permitir selección de coordinador de adopciones",
"Allow creation of payments on the Move-Reserve screen" : "Permitir creación de pagos en la pantalla de Movimientos-Reserva",
"Allow drag and drop to move animals between locations" : "Permitir arrastrar y soltar para mover animales entre ubicaciones",
"Allow duplicate license numbers" : "Permitir números de licencia duplicados",
"Allow duplicate microchip numbers" : "Permitir duplicado de número de microchip",
"Allow overriding of the movement number on the Move menu screens" : "Permitir anular del número de movimientos de las pantallas de menú Move",
"Allow use of OpenOffice document templates" : "Permitir el uso de plantillas de documentos de OpenOffice",
"Alphabetically A-Z" : "Alfabéticamente A-Z",
"Alphabetically Z-A" : "Alfabéticamente Z-A",
"Already Signed" : "Ya firmado",
"Already fostered to this person." : "Ya está acogido por esta persona.",
"Altered" : "Esterilizado",
"Altered Date" : "Fecha Castración",
"Altered Dog - 1 year" : "Perro Castrado- 1 año",
"Altered Dog - 3 year" : "Perro Castrado - 3 años",
"Altering Vet" : "Veterinario encargado de las castraciones",
"Always show an emblem to indicate the current location" : "Mostrar siempre un símbolo para indicar la ubicación actual",
"Amazon" : "Loro del Amazonas",
"Amber" : "Ámbar",
"American" : "Americano",
"American Bulldog" : "Bulldog Americano",
"American Curl" : "Curl Americano",
"American Eskimo Dog" : "Perro Esquimal Americano",
"American Fuzzy Lop" : "Conejo American Fuzzy Lop",
"American Sable" : "Conejo American Sable",
"American Shorthair" : "Pelo Corto Americano",
"American Staffordshire Terrier" : "Staffordshire Terrier Americano",
"American Water Spaniel" : "Perro de Agua Americano",
"American Wirehair" : "American Wirehair",
"Amount" : "Cantidad",
"An age in years, eg: 1, 0.5" : "Edad en años, ej: 1, 0.5",
"An animal cannot have multiple open movements." : "Un animal no puede múltiples movimientos abiertos.",
"An optional comma separated list of email addresses to send the output of this report to" : "Listado de direcciones de correo separados por una coma opcional para enviar este informe a",
"Anatolian Shepherd" : "Pastor de Anatolia",
"Angora Rabbit" : "Conejo Angora",
"Animal" : "Animal",
"Animal '{0}' created with code {1}" : "Animal '{0}' creado con código {1}",
"Animal '{0}' successfully marked deceased." : "Animal '{0}' se ha marcado como fallecido.",
"Animal (optional)" : "Animal (opcional)",
"Animal (via animalname field)" : "Animal (campo del Nombre del Animal)",
"Animal - Additional" : "Animal - Adicional",
"Animal - Death" : "Animal - Deceso",
"Animal - Details" : "Animal - Datos",
"Animal - Entry" : "Animal - Entrada",
"Animal - Health and Identification" : "Animal - Salud e Identificación",
"Animal - Notes" : "Animal - Notas",
"Animal Codes" : "Códigos de animal",
"Animal Control" : "Control de animales",
"Animal Control Caller" : "Llamada a control animal",
"Animal Control Incident" : "Incidente en el control de animales",
"Animal Control Officer" : "Oficial de control de Animales",
"Animal Control Victim" : "Víctima de Control de Animales",
"Animal Emblems" : "Iconos Animales",
"Animal Flags" : "Indicadores animales",
"Animal Links" : "Enlaces animales",
"Animal Name" : "Nombre del animal",
"Animal Selection" : "Selección de animal",
"Animal Shelter Manager" : "Animal Shelter Manager",
"Animal Shelter Manager Login" : "Acceso a Animal Shelter Manager",
"Animal Sponsorship" : "Patrocinio de los animales",
"Animal Type" : "Tipo de animal",
"Animal Types" : "Tipos de Animales",
"Animal board costs" : "Gastos de alojamiento del animal",
"Animal cannot be deceased before it was brought to the shelter" : "El animal no puede haber fallecido antes de ser traído al refugio",
"Animal code format" : "Formato de código del animal",
"Animal comments MUST contain this phrase in order to match." : "Los comentarios del animal DEBEN contener esta frase para coincidir.",
"Animal control calendar" : "Calendario de control de animales",
"Animal control incidents matching '{0}'." : "Incidentes de control de animales que coincidan con '{0}'.",
"Animal defecation" : "Defecacción de animales",
"Animal descriptions" : "Descripciones de animales",
"Animal destroyed" : "Animal eliminado",
"Animal emblems are the little icons that appear next to animal names in shelter view, the home page and search results." : "Iconos animales son los pequeños iconos que aparecen junto al nombre del animal en la vista refugio, la página de inicio y los resultados de búsqueda.",
"Animal food costs" : "Costos en comida para animales",
"Animal picked up" : "Animal recogido",
"Animal shortcode format" : "Formato de código corto del animal",
"Animals" : "Animales",
"Animals at large" : "Animales sueltos",
"Animals left in vehicle" : "Animales dejados en vehículo",
"Animals matching '{0}'." : "Animales encontrados '{0}'.",
"Animals per page" : "Animales por página",
"Annual" : "Anual",
"Annually" : "Anualmente",
"Anonymize" : "Anonimizar",
"Anonymize personal data after this many years" : "Anonimizar datos personales después de estos años",
"Any animal types, species, breeds, colors, locations, etc. in the CSV file that aren't already in the database will be created during the import." : "Cualquier tipo de animales, especies, razas, colores, ubicaciones, etc, en el archivo CSV que todavía no están en la base de datos se crearán durante la importación.",
"Any health problems the animal has" : "Cualquier tipo de problema de salud que tenga el animal",
"Any information about the animal" : "Cualquier información sobre el animal",
"Any markings or distinguishing features the animal has" : "Cualquier marca o característica distintiva del animal",
"Appaloosa" : "Appaloosa",
"Appenzell Mountain Dog" : "Boyero de Appenzell",
"Applehead Siamese" : "Siamés tradicional",
"Appointment" : "Cita",
"Appointment date must be a valid date" : "La fecha de la cita debe ser una fecha válida",
"Appointment {0}. {1} on {2} for {3}" : "Cita {0}. {1} el {2} para {3}",
"Appointments need a date and time." : "Las citas deben tener una fecha y una hora.",
"Approved" : "Aprobado",
"Apr" : "Abr",
"April" : "Abril",
"Arabian" : "Pura Sangre Árabe",
"Area" : "Área",
"Area Found" : "Zona en la que se encontró",
"Area Lost" : "Área de extravío",
"Area Postcode" : "Código postal de la zona",
"Area where the animal was found" : "Área en que se encontró el animal",
"Area where the animal was lost" : "Área en que se perdió el animal",
"Areas" : "Áreas",
"Arrived" : "Llegada",
"Asset" : "Activo",
"Asset::Premises" : "Activo::Instalaciones",
"At least the last name should be completed." : "Al menos se debe completar el nombre.",
"Attach" : "Adjuntar",
"Attach File" : "Adjuntar archivo",
"Attach Link" : "Adjuntar enlace",
"Attach a file" : "Adjuntar un archivo",
"Attach a link to a web resource" : "Adjuntar un enlace a un recurso web",
"Attach link" : "Adjuntar enlace",
"Audit Trail" : "Auditoría por",
"Aug" : "Ago",
"August" : "Agosto",
"Australian Cattle Dog/Blue Heeler" : "Boyero Australiano",
"Australian Kelpie" : "Kelpie Australiano",
"Australian Shepherd" : "Pastor Australiano",
"Australian Terrier" : "Terrier Australiano",
"Auto log users out after this many minutes of inactivity" : "La sesión de los usuarios se cierra automáticamente después estos minutos de inactividad",
"Auto removed due to lack of owner contact." : "Borrado automático por no disponer de datos acerca del contacto.",
"Automatically cancel any outstanding reservations on an animal when it is adopted" : "Cancelar automáticamente cualquier reserva pendiente de un animal cuando sea adoptado",
"Automatically remove" : "Eliminar automáticamente",
"Automatically return any outstanding foster movements on an animal when it is adopted" : "Devolver automáticamente un animal en acogida al refugio cuando es adoptado",
"Automatically return any outstanding foster movements on an animal when it is transferred" : "Devolver automáticamente un animal en acogida al refugio cuando es trasladado",
"Available for adoption" : "Disponibles para adopción",
"Available sheltermanager.com reports" : "Informes disponibles de sheltermanager.com",
"B (Boarding Animal)" : "A (Animal alojado)",
"Baby" : "Bebé",
"Balance" : "Saldo",
"Balinese" : "Balinés",
"Bank" : "Banco",
"Bank account interest" : "Interés de la cuenta bancaria",
"Bank current account" : "Cuenta corriente del banco",
"Bank deposit account" : "Cuenta bancaria de depósito",
"Bank savings account" : "Cuenta bancaria de ahorros",
"Bank::Current" : "Banco::cuenta corriente",
"Bank::Deposit" : "Banco::Depósito",
"Bank::Savings" : "Banco::Ahorros",
"Banned" : "Vetado",
"Base Color" : "Color base",
"Basenji" : "Basenji",
"Basset Hound" : "Sabueso Basset",
"Batch" : "Lote",
"Batch Number" : "Número de lote",
"Beagle" : "Beagle",
"Bearded Collie" : "Collie Barbudo",
"Beauceron" : "Beauceron",
"Bedlington Terrier" : "Bedlington Terrier",
"Beginning of month" : "Principios de mes",
"Belgian Hare" : "Belgian Hare",
"Belgian Shepherd Dog Sheepdog" : "Pastor Belga",
"Belgian Shepherd Laekenois" : "Pastor Belga Laeknois",
"Belgian Shepherd Malinois" : "Pastor Belga Malinois",
"Belgian Shepherd Tervuren" : "Pastor Belga Tervuren",
"Bengal" : "Bengala",
"Bernese Mountain Dog" : "Boyero de Montaña Bernés",
"Beveren" : "Beveren",
"Bichon Frise" : "Bichón de Pelo Rizado",
"Bird" : "Periquito",
"Birman" : "Sagrado de Birmania",
"Bite" : "Mordedura",
"Biting" : "Mordedura",
"Black" : "Negro",
"Black Labrador Retriever" : "Labrador Retriever Negro",
"Black Mouth Cur" : "Blackmouth Cur",
"Black Tortie" : "Tortie Negro",
"Black and Brindle" : "Negro y Manchado",
"Black and Brown" : "Negro y Marrón",
"Black and Tan" : "Negro y Canela",
"Black and Tan Coonhound" : "Sabueso Negro y Fuego",
"Black and White" : "Blanco y negro",
"Bloodhound" : "Perro de San Huberto",
"Blue" : "Azul",
"Blue Tortie" : "Tortie Azul",
"Bluetick Coonhound" : "Sabueso Bluetick",
"Board and Food" : "Alojamiento y alimentación",
"Boarding" : "Alojamiento",
"Boarding Cost" : "Costes de alojamiento",
"Boarding cost type" : "Tipo de coste de alojamiento",
"Bobtail" : "Bobtail",
"Body" : "Cuerpo",
"Bombay" : "Bombay",
"Bonded" : "Unidos",
"Bonded With" : "Unidos con",
"Books" : "Libros",
"Border Collie" : "Border Collie",
"Border Terrier" : "Border Terrier",
"Bordetella" : "Bordetella",
"Born in Shelter" : "Nacido en el albergue",
"Born on Foster {0}" : "Nacido en casa de acogida {0}",
"Born on Shelter {0}" : "Nacido en el albergue {0}",
"Borzoi" : "Lebrel Ruso Barsoi",
"Boston Terrier" : "Boston Terrier",
"Both" : "Ambos",
"Bouvier des Flanders" : "Bouvier de Flandes",
"Boxer" : "Boxer",
"Boykin Spaniel" : "Boykin Spaniel",
"Breed" : "Raza",
"Breed to use when publishing to third party services and adoption sites" : "Raza que se usará para la publicación en servicios de terceros y páginas de adopción",
"Breeds" : "Razas",
"Briard" : "Pastor de Brie",
"Brindle" : "Manchado",
"Brindle and Black" : "Manchado y Negro",
"Brindle and White" : "Manchado y Blanco",
"Britannia Petite" : "Britannia Petite",
"British Shorthair" : "British Shorthair",
"Brittany Spaniel" : "Spaniel Bretón",
"Brotogeris" : "Brotogeris",
"Brought In" : "Traído",
"Brought In By" : "Traído por",
"Brown" : "Marrón",
"Brown and Black" : "Marrón y Negro",
"Brown and White" : "Marrón y Blanco",
"Browse sheltermanager.com" : "Acceso a sheltermanager.com",
"Browse sheltermanager.com and install some reports, charts and mail merges into your new system." : "Navegar a shelterview.com e instalar en su nuevo systema informes, gráficas y combinaciones de correspondencia.",
"Brussels Griffon" : "Grifón de Bruselas",
"Budgie/Budgerigar" : "Periquito Australiano",
"Bulk Complete Diary" : "Diario completo",
"Bulk Complete Medical Records" : "Introducir el mismo registro médico a varios animales",
"Bulk Complete Vaccinations" : "Administrar la misma vacuna a varios animales",
"Bulk Complete Waiting List" : "Lista de espera completa",
"Bulk Regimen" : "Administrar el mismo tratamiento a varios animales",
"Bulk Test" : "Realizar el mismo test a varios animales",
"Bulk Transport" : "Transportar varios animales a la vez",
"Bulk Vaccination" : "Administrar la misma vacuna a varios animales",
"Bulk change animals" : "Modificar los datos de varios animales a la vez",
"Bull Terrier" : "Bull Terrier",
"Bullmastiff" : "Bullmastiff",
"Bunny Rabbit" : "Bunny Rabbit",
"Burmese" : "Burmés",
"Burmilla" : "Burmilla",
"By" : "Por",
"CC" : "CC",
"CSV of animal/adopter data" : "CSV de la información del animal/adoptante",
"CSV of animal/medical data" : "CSV de datos médicos de los animales",
"CSV of incident data" : "CSV de datos de incidentes",
"CSV of license data" : "CSV de datos de licencia",
"CSV of payment data" : "CSV de datos de pago",
"CSV of person data" : "CSV de la información de la persona",
"Caique" : "Caique",
"Cairn Terrier" : "Cairn Terrier",
"Calendar View" : "Vista de calendario",
"Calendar view" : "Vista de calendario",
"Calico" : "Gato Galano",
"Californian" : "Californian",
"Call" : "Llamar",
"Call Date/Time" : "Llamada Fecha/Hora",
"Caller" : "Llamada de",
"Caller Name" : "Nombre",
"Caller Phone" : "Teléfono móvil",
"Camel" : "Camello",
"Can Login" : "Puede registrar",
"Can afford donation?" : "¿Puede permitirse una donación?",
"Can't reserve an animal that has an active movement." : "No se puede reservar un animal que tiene un movimiento activado.",
"Canaan Dog" : "Perro de Canaan",
"Canadian Hairless" : "Sphynx - Gato Esfinge",
"Canary" : "Canario",
"Cancel" : "Cancelar",
"Cancel holds on animals this many days after the brought in date, or 0 to never cancel" : "Cancelar mantiene a los animales esta cantidad de días después de que el puesto en la fecha, o 0 para no cancelar",
"Cancel unadopted reservations after" : "Cancelar reservas de animales no adoptados después de",
"Cancel unadopted reservations after this many days, or 0 to never cancel" : "Cancelar reservas de animales no adoptados después de este número de días, o 0 para no cancelar nunca",
"Cancelled" : "Cancelado",
"Cancelled Reservation" : "Reserva cancelada",
"Cane Corso Mastiff" : "Cane Corso",
"Carolina Dog" : "Dingo Americano",
"Cash" : "Caja",
"Cat" : "Gato",
"Catahoula Leopard Dog" : "Catahoula Cur",
"Category" : "Categoría",
"Cats" : "Gatos",
"Cattery" : "Gatera",
"Cattle Dog" : "Boyero",
"Cavalier King Charles Spaniel" : "Cavalier King Charles Spaniel",
"Cell" : "Teléfono móvil",
"Cell Phone" : "Teléfono móvil",
"Champagne D'Argent" : "Champagne D’Argent",
"Change" : "Modificar",
"Change Accounts" : "Cambiar cuentas",
"Change Animals" : "Modificar animales",
"Change Citations" : "Cambiar Citaciones",
"Change Clinic Apointment" : "Cambiar cita de la clínica",
"Change Cost" : "Modificar coste",
"Change Date Required" : "Cambio de fecha requerido",
"Change Diets" : "Modificar dietas",
"Change Found Animal" : "Modificar animal encontrado",
"Change Incidents" : "Cambiar Incidentes",
"Change Investigation" : "Cambiar investigación",
"Change Licenses" : "Cambiar Licencias",
"Change Litter" : "Modificar camada",
"Change Log" : "Registro de cambios",
"Change Lost Animal" : "Modificar animal perdido",
"Change Media" : "Modificar soporte",
"Change Medical Records" : "Modificar historias clínicas",
"Change Movement" : "Modificar movimiento",
"Change Password" : "Cambiar la contraseña",
"Change Payments" : "Cambiar Pagos",
"Change Person" : "Modificar persona",
"Change Publishing Options" : "Cambiar opciones de publicación",
"Change Report" : "Modificar informe",
"Change Rota" : "Cambiar cuadrante",
"Change Stock" : "Modificar existencias",
"Change System Options" : "Modificar opciones del sistema",
"Change Tests" : "Cambiar pruebas",
"Change Transactions" : "Cambiar las transacciones",
"Change Transport" : "Modificar transporte",
"Change Trap Loans" : "Cambiar préstamos de trampas",
"Change User Settings" : "Cambie la configuración del usuario",
"Change Vaccinations" : "Modificar vacunaciones",
"Change Vouchers" : "Modificar cupones",
"Change Waiting List" : "Modificar liosta de espera",
"Change date required on selected treatments" : "Modificar fecha requerida de los tratamientos seleccionados",
"Changed Mind" : "Cambio de opinión",
"Chart" : "Gráfico",
"Chart (Bar)" : "Gráfico (barras)",
"Chart (Line)" : "Gráfico (líneas)",
"Chart (Pie)" : "Gráfico (tarta)",
"Chart (Point)" : "Gráfico (puntos)",
"Chart (Steps)" : "Gráfico (pasos)",
"Chartreux" : "Chartreux",
"Check" : "Revisar",
"Check License" : "Verificar licencia",
"Check No" : "Nº cheque",
"Checkbox" : "Casilla de verificación",
"Checked By" : "Comprobado por",
"Checkered Giant" : "Checkered Giant",
"Cheque" : "Cheque",
"Chesapeake Bay Retriever" : "Perdiguero Chesapeake Bay",
"Chicken" : "Pollo",
"Chihuahua" : "Chihuahua",
"Children" : "Niños",
"Chinchilla" : "Chinchilla",
"Chinese Crested Dog" : "Perro Crestado de China",
"Chinese Foo Dog" : "Perro Fu Chino",
"Chlamydophila" : "Chlamydophila",
"Chocolate" : "Chocolate",
"Chocolate Labrador Retriever" : "Labrador Retriever Chocolate",
"Chocolate Tortie" : "Tortie Chocolate",
"Chow Chow" : "Chow Chow",
"Cinnamon" : "Cinnamon",
"Cinnamon Tortoiseshell" : "Tortie Canela",
"Citation Type" : "Tipo de Citación",
"Citation Types" : "Tipo de Citaciones",
"Citations" : "Citaciones",
"City" : "Ciudad",
"City contains" : "La ciudad contiene",
"Class" : "Clase",
"Clear" : "Limpiar",
"Clear and sign again" : "Borrar y firmar de nuevo",
"Clear tables before importing" : "Limpiar las tablas antes de importar",
"Clinic" : "Clínica",
"Clinic Calendar" : "Calendario de la clínica",
"Clinic Invoice - {0}" : "Factura de la clínica - {0}",
"Clinic Statuses" : "Estados clínicos",
"Clone" : "Duplicar",
"Clone Animals" : "Clonar animales",
"Clone Rota" : "Clonar turnos",
"Clone the rota this week to another week" : "Clonar cuadrante de esta semana a otra semana",
"Cloning..." : "Clonando...",
"Close" : "Cerrar",
"Clumber Spaniel" : "Clumber Spaniel",
"Clydesdale" : "Clydesdale",
"Coat" : "Pelaje",
"Coat Type" : "Tipo de pelaje",
"Coat Types" : "Tipos de pelaje",
"Cockapoo" : "Cockerpoo",
"Cockatiel" : "Cockatiel (ninfa o carolina)",
"Cockatoo" : "Cacatúa",
"Cocker Spaniel" : "Cocker Spaniel",
"Code" : "Código",
"Code contains" : "Código contiene",
"Code format tokens:" : "Patrones de formato de código:",
"Collie" : "Collie",
"Color" : "Color",
"Color to use when publishing to third party services and adoption sites" : "Color a utilizar cuando se publica en servicios de terceros y páginas de adopción",
"Colors" : "Colores",
"Columns" : "Columnas",
"Columns displayed" : "Columnas visibles",
"Comma separated list of units for this location, eg: 1,2,3,4,Isolation,Pen 5" : "Lista de unidades para esta ubicación separadas por comas, por ejemplo: 1,2,3,4, aislamiento",
"Comments" : "Comentarios",
"Comments Contain" : "Los comentarios contienen",
"Comments contain" : "Los comentarios contienen",
"Comments copied to web preferred media." : "Los comentarios han sido copiados en los medios de comunicación preferidos de la web.",
"Complaint" : "Denuncia",
"Complete" : "Completo",
"Complete Tasks" : "Completar tareas",
"Completed" : "Completado",
"Completed Between" : "Completado entre",
"Completed Type" : "Tipo completado",
"Completed notes upto today" : "Notas completas hasta hoy",
"Completion Date" : "Fecha de vencimiento",
"Completion Type" : "Tipo de vencimiento",
"Configuration" : "Configuración",
"Confirm" : "Confirmar",
"Confirm Password" : "Confirmar contraseña",
"Confirmation message" : "Mensaje de confirmación",
"Confirmed" : "Confirmado",
"Consulting Room" : "Sala de consulta",
"Consulting Room - {0}" : "Sala de consulta - {0}",
"Consumed" : "Consumido",
"Contact" : "Contacto",
"Contact Contains" : "El contacto contiene",
"Conure" : "Cotorra",
"Convert this reservation to an adoption" : "Convertir esta reserva en adopción",
"Coonhound" : "Sabueso",
"Copy animal comments to the notes field of the web preferred media for this animal" : "Copiar comentarios del animal para las notas web de este animal",
"Copy from animal comments" : "Copiar de los comentarios del animal",
"Copy of {0}" : "Copia de {0}",
"Corded" : "Mechones",
"Corgi" : "Corgi Galés",
"Cornish Rex" : "Cornish rex",
"Cost" : "Coste",
"Cost For" : "Precio de",
"Cost Type" : "Tipo de coste",
"Cost Types" : "Tipos de gastos",
"Cost date must be a valid date" : "Fecha Costo debe ser una fecha válida",
"Cost record" : "Registro de costes",
"Costs" : "Gastos",
"Costs need a date and amount." : "Los costes necesitan una fecha y un importe.",
"Coton de Tulear" : "Coton de Tuléar",
"Could not find animal with name '{0}'" : "No se pudo encontrar el animal con el nombre '{0}'",
"Country" : "País",
"Courtesy Listing" : "Listado de cortesía (para la difusión de animales externos al refugio)",
"Cow" : "Vaca",
"Cream" : "Crema",
"Create" : "Crear",
"Create Animal" : "Crear animal",
"Create Log" : "Crear registro",
"Create Payment" : "Crear un pago",
"Create Waiting List" : "Crear lista de espera",
"Create a cost record" : "Crear un registros de costes",
"Create a due or received payment record from this appointment" : "Crear un pago pendiente o recibido a partir de esta cita",
"Create a new animal by copying this one" : "Crear un nuevo animal copiando éste",
"Create a new animal from this found animal record" : "Crear un animal para esta ficha de animal encontrado",
"Create a new animal from this incident" : "Crear un nuevo animal a partir de este incidente",
"Create a new animal from this waiting list entry" : "Crear un animal para esta entrada de lista de espera",
"Create a new document" : "Crear un nuevo documento",
"Create a new template" : "Crear una plantilla",
"Create a new template by copying the selected template" : "Crear una nueva plantilla copiando la plantilla seleccionada",
"Create a new waiting list entry from this found animal record" : "Crear una entrada de lista de espera para el registro de este animal encontrado",
"Create and edit" : "Crear y editar",
"Create boarding cost record when animal is adopted" : "Crear registro de costo de alojamiento cuando el animal es adoptado",
"Create diary notes from a task" : "Crear notas de agenda a partir de una tarea",
"Create missing lookup values" : "Crear cambios de lookup values",
"Create note this many days from today, or 9999 to ask" : "Crear nota a estos dias de hoy, o 9999 a preguntar",
"Create this message" : "Crear este mensaje",
"Create this person" : "Crear esta persona",
"Created By" : "Creado por",
"Creating cost and cost types creates matching accounts and transactions" : "Cuando se crean costes y tipos de costes se crean las cuentas y las transacciones correspondientes",
"Creating payments and payments types creates matching accounts and transactions" : "Cuando se crean pagos y tipos de pagos se crean las cuentas y las transacciones correspondientes",
"Creating..." : "Creando...",
"Credit Card" : "Tarjeta de Crédito",
"Creme D'Argent" : "Creme D’Argent",
"Criteria:" : "Criterio:",
"Crossbreed" : "Raza mestiza",
"Cruelty Case" : "Caso de crueldad",
"Culling" : "Eutanasia",
"Curly" : "Rizado",
"Current" : "Actual",
"Current Vet" : "Veterinario actual",
"Cymric" : "Cymric",
"D (Dog)" : "D (Perro)",
"DD = current day" : "DD = día en curso",
"DDL dump (DB2)" : "Volcado DDL (DB2)",
"DDL dump (MySQL)" : "Volcado DDL (MySQL)",
"DDL dump (PostgreSQL)" : "Volcado DDL (PostgreSQL)",
"DHLPP" : "DHLPP",
"DO NOT use this field to store notes about what the person is looking for." : "NO usar este campo para guardar notas sobre lo que la persona está buscando.",
"DOA {0}" : "FDA {0}",
"DOB" : "FDN",
"Dachshund" : "Teckel",
"Daily Boarding Cost" : "Coste diario de alojamiento",
"Dalmatian" : "Dálmata",
"Dandi Dinmont Terrier" : "Dandi Dinmont Terrier",
"Data" : "Data",
"Data Protection" : "Protección de datos",
"Database" : "Base de datos",
"Date" : "Fecha",
"Date '{0}' is not valid." : "La fecha ‘{0}’ no es válida.",
"Date Brought In" : "Fecha en que fue traído",
"Date Found" : "Fecha en la que se encontró",
"Date Lost" : "Fecha de extravío",
"Date Of Birth" : "Fecha de nacimiento",
"Date Put On" : "Fecha de administración",
"Date Removed" : "Fecha Eliminada",
"Date Reported" : "Fecha del informe",
"Date and notes are mandatory." : "Fecha y notas son obligatorias.",
"Date brought in cannot be blank" : "La fecha de traída no puede estar en blanco",
"Date brought in cannot be in the future." : "La fecha de entrada no puede ser posterior a la fecha de hoy.",
"Date brought in is not valid" : "La fecha de entrega no es válida",
"Date found cannot be blank" : "La campo fecha encontrado no puede estar vacía",
"Date found cannot be blank." : "El campo fecha de encuentro no puede estar vacía.",
"Date lost cannot be blank" : "La fecha de desaparición no puede estar vacía",
"Date lost cannot be blank." : "La fecha de pérdida no puede estar vacía.",
"Date of Birth" : "Fecha de nacimiento",
"Date of birth cannot be blank" : "La fecha de nacimiento no puede estar en blanco",
"Date of birth cannot be in the future." : "La fecha de nacimiento no puede ser posterior a la fecha de hoy.",
"Date of birth is not valid" : "La fecha de nacimiento no es válida",
"Date of last owner contact" : "Fecha último contacto con el propietario",
"Date put on" : "Fecha de administración",
"Date put on cannot be blank" : "La fecha de administración no puede estar en blanco",
"Date put on list" : "Fecha de inscripción en lista",
"Date removed" : "Fecha eliminada",
"Date reported cannot be blank" : "Fecha de informe no puede estar vacía",
"Date reported cannot be blank." : "Fecha de informe no puede estar vacía.",
"Date/Time" : "Fecha/hora",
"Day" : "Día",
"Day Pivot" : "Día pivote",
"Days On Shelter" : "Días en el refugio",
"Dead On Arrival" : "Muerto al llegar",
"Dead animal" : "Animal muerto",
"Dead on arrival" : "Muerto al llegar",
"Death" : "Muerte",
"Death Comments" : "Comentarios sobre fallecimiento",
"Death Reason" : "Razones de la muerte",
"Death Reasons" : "Razones de muerte",
"Debit Card" : "Tarjeta de débito",
"Dec" : "Dic",
"Deceased" : "Fallecido",
"Deceased Date" : "Fecha de defunción",
"December" : "Diciembre",
"Declawed" : "Uñas extirpadas",
"Declined" : "Declinado",
"Default Breed" : "Raza por defecto",
"Default Brought In By" : "Por defecto aportados por",
"Default Coat Type" : "Tipo de pelaje por defecto",
"Default Color" : "Color por defecto",
"Default Cost" : "Coste por defecto",
"Default Death Reason" : "Motivo de fallecimiento predeterminado",
"Default Diary Person" : "Diario predeterminado de la persona",
"Default Entry Reason" : "Motivo de entrada predeterminado",
"Default Incident Type" : "Tipo de incidente por defecto",
"Default Location" : "Ubicación por defecto",
"Default Log Filter" : "Por defecto filtro de registro",
"Default Log Type" : "Tipo de registro por defecto",
"Default Payment Method" : "Método de Pago por Defecto",
"Default Payment Type" : "Tipo de pago por defecto",
"Default Reservation Status" : "Estado en reserva por defecto",
"Default Return Reason" : "Motivo de devolución por defecto",
"Default Rota Shift" : "Turno del cuadrante por defecto",
"Default Size" : "Tamaño por defecto",
"Default Species" : "Especie por defecto",
"Default Test Type" : "Predeterminado Tipo de prueba",
"Default Type" : "Tipo por defecto",
"Default Vaccination Type" : "Tipo de vacunación por defecto",
"Default Value" : "Valor por defecto",
"Default daily boarding cost" : "Coste diario de estancia por defecto",
"Default destination account for payments" : "Cuenta de destino por defecto para pagos",
"Default image for documents" : "Imagen por defecto para los documentos",
"Default image for this record and the web" : "Imagen predeterminada para este archivo y la web",
"Default source account for costs" : "Cuenta de origen por defecto para gastos",
"Default to advanced find animal screen" : "Por defecto usar la búsqueda avanzada de animales",
"Default to advanced find person screen" : "Por defecto usar la búsqueda avanzada de personas",
"Default transaction view" : "Vista transacción por defecto",
"Default urgency" : "Urgente por defecto",
"Default video for publishing" : "Video predeterminado para publicar",
"Default view" : "Vista predeterminada",
"Defaults" : "Por defecto",
"Defaults formats for code and shortcode are TYYYYNNN and NNT" : "Por defecto el formato del codigo y codigo corto serán TYYYYNNN y NNT",
"Delete" : "Borrar",
"Delete Accounts" : "Eliminar cuentas",
"Delete Animals" : "Borrar animales",
"Delete Citations" : "Borrar citaciones",
"Delete Clinic Appointment" : "Borrar cita para la clínica",
"Delete Cost" : "Eliminar coste",
"Delete Diary" : "Eliminar agenda",
"Delete Diets" : "Borrar Dietas",
"Delete Document from Repository" : "Eliminar el documento del repositorio",
"Delete Found Animal" : "Borrar Animal Encontrado",
"Delete Incidents" : "Borrar incidentes",
"Delete Incoming Forms" : "Eliminar las formas entrantes",
"Delete Investigation" : "Borrar inverstigación",
"Delete Licenses" : "Borrar licencias",
"Delete Litter" : "Borrar camada",
"Delete Log" : "Borrar registro",
"Delete Lost Animal" : "Borrar Animal Perdido",
"Delete Media" : "Eliminar soporte",
"Delete Medical Records" : "Eliminar historia clínica",
"Delete Movement" : "Borrar movimiento",
"Delete Payments" : "Borrar pagos",
"Delete Person" : "Borrar Persona",
"Delete Regimen" : "Eliminar tratamiento",
"Delete Report" : "Borrar informe",
"Delete Rota" : "Borrar cuadrante",
"Delete Stock" : "Borrar existencia",
"Delete Tests" : "Eliminar pruebas",
"Delete Transport" : "Borrar transporte",
"Delete Trap Loans" : "Borrar préstamos de trampas",
"Delete Treatments" : "Eliminar Tratamientos",
"Delete Vaccinations" : "Borrar vacunaciones",
"Delete Vouchers" : "Borrar Vouchers",
"Delete Waiting List" : "Eliminar lista de espera",
"Delete all rota entries for this week" : "Borrar todas las entradas del cuadrante de esta semana",
"Delete this animal" : "Borrar este animal",
"Delete this incident" : "Borrar este incidente",
"Delete this person" : "Eliminar esta persona",
"Delete this record" : "Eliminar este registro",
"Delete this waiting list entry" : "Eliminar esta entrada en la lista de espera",
"Denied" : "Denegado",
"Deposit" : "Depósito",
"Deposit Account" : "Cuenta depósito",
"Deposit Returned" : "Depósito devuelto",
"Description" : "Descripción",
"Description Contains" : "La descripción contiene",
"Description cannot be blank" : "La descripción no se debe estar vacía",
"Deselect" : "No seleccionar",
"Details" : "Detalles",
"Devon Rex" : "Devon Rex",
"Dialog title" : "Título de diálogo",
"Diary" : "Diario",
"Diary Task" : "Tareas del diario",
"Diary Task: {0}" : "Tarea Diario: {0}",
"Diary Tasks" : "Tareas de agenda",
"Diary and Messages" : "Agenda y mensajes",
"Diary calendar" : "Calendario diario",
"Diary date cannot be blank" : "La fecha de la agenda no puede estar vacía",
"Diary date is not valid" : "La fecha de la agenda no es válida",
"Diary for {0}" : "Diario para {0}",
"Diary note cannot be blank" : "La nota de agenda no puede estar vacía",
"Diary note {0} marked completed" : "Nota del diario {0} marcada como completada",
"Diary note {0} rediarised for {1}" : "Nota de agenda {0} reagendada al {1}",
"Diary notes for: {0}" : "Notas del diario para: {0}",
"Diary notes need a date and subject." : "Las notas del diario necesitan de una fecha y asunto.",
"Diary subject cannot be blank" : "El asunto de la agenda no puede estar vacío",
"Diary task items need a pivot, subject and note." : "Los elementos de tareas del diario necesitan un pivote, el sujeto y nota.",
"Diary tasks need a name." : "Las tareas de agenda necesitan un nombre.",
"Did not ask" : "No preguntó",
"Did you know?" : "¿Sabías que?",
"Died" : "Muerto",
"Died off shelter" : "Muerto fuera del refugio",
"Died {0}" : "Muerte",
"Diet" : "Dieta",
"Diets" : "Dietas",
"Diets need a start date." : "Las dietas necesitan una fecha de inicio.",
"Dispatch" : "Enviar",
"Dispatch Address" : "Dirección de envío",
"Dispatch Between" : "Enviado entre",
"Dispatch Date/Time" : "Envío Fecha/Hora",
"Dispatch {0}: {1}" : "Envío {0}: {1}",
"Dispatched ACO" : "Remitido al oficial de control de los animales",
"Display" : "Visor",
"Display Index" : "Índice de visualización",
"Display a search button at the right side of the search box" : "Mostrar un botón de búsqueda en la parte derecha del cuadro de búsqueda",
"Distemper" : "Moquillo",
"Do Not Publish" : "No publicar",
"Do Not Register Microchip" : "No registrar microchip",
"Do not show" : "No mostrar",
"Doberman Pinscher" : "Doberman Pincher",
"Document" : "Documento",
"Document Link" : "Enlace del documento",
"Document Repository" : "Depósito de documentos",
"Document Templates" : "Plantillas de documentos",
"Document file" : "Documento del archivo",
"Document signed" : "Documento firmado",
"Document signing request" : "Solicitud de firma de documento",
"Document templates" : "Plantillas de documentos",
"Documents" : "Documentos",
"Dog" : "Perro",
"Dogo Argentino" : "Dogo Argentino",
"Dogs" : "Perros",
"Dogue de Bordeaux" : "Dogo de Burdeos",
"Domestic Long Hair" : "Doméstico de pelo largo",
"Domestic Medium Hair" : "Doméstico de pelo mediano",
"Domestic Short Hair" : "Doméstico de pelo corto",
"Don't create a cost record" : "No crear un archivo de costeo",
"Don't scale" : "No escalar",
"Donated" : "Donado",
"Donation" : "Donación",
"Donation?" : "¿Donación?",
"Donations for animals entering the shelter" : "Donaciones para animales ingresando al refugio",
"Done" : "Finalizado",
"Donkey" : "Burro",
"Donkey/Mule" : "Burro/Mula",
"Donor" : "Donante",
"Dosage" : "Dosis",
"Dove" : "Paloma",
"Download" : "Descargar",
"Draft" : "Caballo de tiro",
"Driver" : "Conductor",
"Drop files here..." : "Soltar archivos aquí...",
"Dropoff" : "Entrega",
"Duck" : "Pato",
"Due" : "Fecha límite",
"Due in next month" : "Debido, en el próximo mes",
"Due in next week" : "Debido, la siguiente semana",
"Due in next year" : "Debido, el siguiente año",
"Due today" : "Vence hoy",
"Duration" : "Duración",
"Dutch" : "Holandés",
"Dutch Shepherd" : "Pastor Holandés",
"Dwarf" : "Dwarf",
"Dwarf Eared" : "Dwarf Eared",
"E = first letter of animal entry category" : "E = primera letra de la categoría de entrada del animal",
"EE = first and second letter of animal entry category" : "EE = primera y segunda letra de la categoría de animal",
"Eclectus" : "Eclectus",
"Edit" : "Editar",
"Edit All Diary Notes" : "Editar todas las notas del diario",
"Edit Appointment" : "Editar cita",
"Edit Diary Tasks" : "Editar tareas del diario",
"Edit HTML publishing templates" : "Editar plantillas de publicación HTML",
"Edit Header/Footer" : "Editar encabezado/pie",
"Edit Invoice Item" : "Editar concepto de la factura",
"Edit Lookups" : "Editar búsquedas",
"Edit My Diary Notes" : "Editar mis notas del diario",
"Edit Online Forms" : "Edición de formularios en línea",
"Edit Reports" : "Editar Informes",
"Edit Roles" : "Editar Roles",
"Edit Users" : "Editar usuarios",
"Edit account" : "Editar cuenta",
"Edit additional field" : "Editar campo adicional",
"Edit citation" : "Editar citación",
"Edit cost" : "Editar coste",
"Edit diary" : "Editar diario",
"Edit diary notes" : "Editar notas del diario",
"Edit diary task" : "Editar tareas del diario",
"Edit diary tasks" : "Editar tareas del diario",
"Edit diet" : "Editar dieta",
"Edit document" : "Editar documento",
"Edit form field" : "Editar campo de formulario",
"Edit investigation" : "Editar investicagión",
"Edit invoice" : "Editar factura",
"Edit license" : "Editar licencia",
"Edit litter" : "Editar camada",
"Edit litters" : "Editar camadas",
"Edit log" : "Editar registro",
"Edit media notes" : "Editar notas multimedia",
"Edit medical profile" : "Editar perfil medico",
"Edit medical regimen" : "Editar tratamiento médico",
"Edit movement" : "Editar movimiento",
"Edit my diary notes" : "Editar mis notas del diario",
"Edit my diary notes" : "Editar mis notas de diario",
"Edit notes" : "Editar Notas",
"Edit online form" : "Editar formulario en línea",
"Edit online form HTML header/footer" : "Editar formulario en línea de encabezado / pie de página HTML",
"Edit payment" : "Editar pago",
"Edit report" : "Editar informe",
"Edit report template HTML header/footer" : "Editar informe de plantilla HTML header/footer",
"Edit role" : "Editar rol",
"Edit roles" : "Editar informes",
"Edit rota item" : "Editar un elemento del turno",
"Edit stock" : "Editar existencias",
"Edit system users" : "Editar usuarios del sistema",
"Edit template" : "Editar plantilla",
"Edit test" : "Editar prueba",
"Edit the current waiting list" : "Editar la lista de espera actual",
"Edit transaction" : "Editar transacción",
"Edit transport" : "Editar transporte",
"Edit trap loan" : "Editar prestamo de trampa",
"Edit user" : "Editar usuario",
"Edit vaccination" : "Editar vacuna",
"Edit voucher" : "Editar vale",
"Edit {0}" : "Editar {0}",
"Egyptian Mau" : "Mau Egipcio",
"Electricity Bills" : "Facturas de Electricidad",
"Email" : "Email",
"Email Address" : "Correo Electrónico",
"Email PDF" : "Enviar por correo electrónico un PDF",
"Email Person" : "Enviar correo a esta persona",
"Email To" : "Correo a",
"Email a copy of the selected HTML documents as PDFs" : "Enviar por correo electrónico una copia de los documentos HTML seleccionados en PDF",
"Email a copy of the selected media files" : "Enviar por correo electrónico una copia de los archivos multimedia seleccionados",
"Email address" : "Dirección de correo electrónico",
"Email document for electronic signature" : "Enviar por correo electrónico un documento para la firma electrónica",
"Email incident notes to ACO" : "Enviar por correo notas de incidentes a coordinador",
"Email incoming form submissions to this comma separated list of email addresses" : "Enviar los formularios de envío recibidos a este listado de direcciones de email separadas por comas",
"Email media" : "Email media",
"Email person" : "Mandar un email a esta persona",
"Email signature" : "Firma de e-mail",
"Email submissions to" : "Enviar por email a",
"Email this message to all matching users" : "Enviar este mensaje a todos los usuarios encontrados",
"Email this person" : "Enviar a esta persona",
"Email users their diary notes each day" : "Enviar a los usuarios sus notas de diario cada día",
"Emu" : "Emú",
"Enable FTP uploading" : "Habilitar FTP uploading",
"Enable accounts functionality" : "Habilitar la funcionalidad de cuentas",
"Enable location filters" : "Activa filtros de ubicación",
"Enable lost and found functionality" : "Hablitar funcionalidad de perdidos y encontrados",
"Enable multiple sites" : "Habilita múltiples sitios",
"Enable the waiting list functionality" : "Habilitar lista de espera",
"Enable visual effects" : "Habilitar efectos visuales",
"Enabled" : "Habilitado",
"End Of Day" : "Fin de día",
"End Time" : "Hora salida",
"End at" : "Finalizar en",
"End of month" : "Finl de mes",
"End of year" : "Fin de año",
"Ends" : "Salida",
"Ends after" : "Finalizar depués de",
"English Bulldog" : "Bulldog Inglés",
"English Cocker Spaniel" : "Cocker Spaniel Inglés",
"English Coonhound" : "Rastreador Inglés",
"English Lop" : "Belier Inglés",
"English Pointer" : "Pointer Inglés",
"English Setter" : "Setter Inglés",
"English Shepherd" : "Pastor Inglés",
"English Spot" : "English Spot",
"English Springer Spaniel" : "Springer Spaniel",
"English Toy Spaniel" : "Toy Spaniel Inglés",
"Entered (newest first)" : "Entradas (más recientes primero)",
"Entered (oldest first)" : "Entradas (más antiguas primero)",
"Entered From" : "Desde esta fecha de entrada",
"Entered To" : "Hasta esta fecha de entrada",
"Entered shelter" : "Entró en el refugio",
"Entering 'activelost' or 'activefound' in the search box will show you lost and found animals reported in the last 30 days." : "Al introducir 'activelost' or 'activefound' en el campo de búsqueda mostrará el informe de los animales encontrados y perdidos en los últimos 30 días.",
"Entering 'deceased' in the search box will show you recently deceased animals." : "Al introducir 'deceased' en el campo de búsqueda se mostrarán los animales fallecidos recientemente.",
"Entering 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' or 'members' in the search box will show you those groups of people." : "Al introducir 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' o 'members' en el campo de búsqueda mostrará estos grupos de personas.",
"Entering 'notforadoption' in the search box will show you all shelter animals with the not for adoption flag set." : "Al introducir 'notforadoption' en el campo de búsqueda mostrará todos los animales del refugio que tengan el campo de no para adopción activado.",
"Entering 'os' in the search box will show you all shelter animals." : "Al introducir 'os' en el campo de búsqueda se mostrarán todos los animales del refugio.",
"Entlebucher" : "Boyero de Entlebuch",
"Entry" : "Entrada",
"Entry Category" : "Introducir categoría",
"Entry Donation" : "Entrada Donación",
"Entry Reason" : "Razón de admisión",
"Entry Reason Category" : "Categoría de motivo de entrada",
"Entry Reasons" : "Motivos de entrada",
"Entry reason" : "Motivo de entrada",
"Error contacting server." : "Error conectando con el servidor.",
"Escaped" : "Escapó",
"Escaped {0}" : "Escapados {0}",
"Eskimo Dog" : "Perro Esquimal",
"Estimate" : "Estimado",
"Euthanized" : "Sacrificado",
"Euthanized {0}" : "Eutanasiados {0}",
"Every day" : "Todos los días",
"Exclude animals who are aged under" : "Excluir animales menores de",
"Exclude from bulk email" : "Excluir de la actualización de todos los correos electrónicos",
"Exclude new animal photos from publishing" : "Excluye fotos nuevas del animal para publicar",
"Exclude this image when publishing" : "Excluir esta imagen en la publicación",
"Execute" : "Ejecutar",
"Execute Script" : "Ejecutar script",
"Execute the SQL in the box below" : "Ejeccutar SQL en el campo inferior",
"Executing Task" : "Ejecutando una tarea",
"Executing..." : "Ejecutando...",
"Exotic Shorthair" : "Exótico de pelo corto",
"Expense" : "Gasto",
"Expense::" : "Gasto::",
"Expenses::Board" : "Gastos::Alojamiento",
"Expenses::Electricity" : "Expenses::Electricity",
"Expenses::Food" : "Expenses::Food",
"Expenses::Gas" : "Expenses::Gas",
"Expenses::Phone" : "Expenses::Phone",
"Expenses::Postage" : "Expenses::Postage",
"Expenses::Stationary" : "Expenses::Stationary",
"Expenses::Water" : "Expenses::Water",
"Expire in next month" : "Vence el próximo mes",
"Expired" : "Caducado",
"Expired in the last month" : "Caducado en el último mes",
"Expired in the last week" : "Caducado en la última semana",
"Expires" : "Caduca",
"Expiry" : "Expirar",
"Expiry date" : "Fecha de vencimiento",
"Export" : "Exportar",
"Export Animals as CSV" : "Exporta animales como CSV",
"Export Report" : "Exportar informe",
"Export Reports as CSV" : "Esportar informe como CSV",
"Export a CSV file of animal records that ASM can import into another database." : "Exporta un archivo CSV de registros de animales que ASM pueda importar a otra base de datos.",
"Export this database in various formats" : "Exportar esta base de datos en varios formatos",
"Exporting the complete database can take some time and generate a very large file, are you sure?" : "Exportar la base de datos completa llevará un tiempo y generará un archivo muy grande. ¿Esta seguro?",
"Extra Images" : "Imágenes extra",
"Extra images" : "Imágenes adicionales",
"Extra-Toes Cat (Hemingway Polydactyl)" : "Gato Polidactilo",
"F (Feral Cat)" : "A (Gato asilvestrado)",
"FECV/FeCoV" : "Coronavirus entérico felino/Coronavirus felino",
"FIPV" : "PIF (Peritonitis Infecciosa Felina)",
"FIV" : "VIF",
"FIV Result" : "Resultado del FIV",
"FIV+" : "Immunodeficiencia felina +",
"FIV/L Test Date" : "FIV/L Test Fecha",
"FIV/L Tested" : "FIV/L realizado",
"FLV" : "FLV",
"FLV Result" : "Resultado del FLV",
"FLV+" : "Leucemia felina +",
"FTP hostname" : "Nombre de host FTP",
"FTP password" : "Clave FTP",
"FTP username" : "Usiario FTP",
"FVRCP" : "Vacuna Panleucopenia/Herpesvirus/Calicivirus",
"Facebook" : "Facebook",
"Failed sending email" : "Fallo al enviar el correo",
"Failed to create payment." : "Fallo al crear pago.",
"Failed to renew license." : "Licencia no renovada.",
"Fawn" : "Amarillo Marrón",
"Fawn Tortoiseshell" : "Tortie Amarillo Marrón",
"FeLV" : "Leucemia felina",
"Features" : "Características",
"Feb" : "Feb",
"February" : "Febrero",
"Fee" : "Tarifa",
"Female" : "Hembra",
"Feral" : "Silvestre",
"Ferret" : "Hurón",
"Field Spaniel" : "Field Spaniel",
"Field names should not contain spaces." : "El nombre de los campos no puede contener espacios.",
"Fila Brasileiro" : "Fila Brasileño",
"File" : "Archivo",
"Filter" : "Filtro",
"Financial" : "Financiero",
"Finch" : "Pinzón",
"Find Animal" : "Buscar Animal",
"Find Animal/Person" : "Encontrar Animal/Persona",
"Find Found Animal" : "Buscar Animal Encontrado",
"Find Incident" : "Encontrar incidente",
"Find Lost Animal" : "Buscar Animal Perdido",
"Find Person" : "Buscar persona",
"Find a found animal" : "Buscar animal encontrado",
"Find a lost animal" : "Buscar animal perdido",
"Find aco" : "Encuentra un técnico de control de animales",
"Find an incident" : "Encontrar incidente",
"Find animal" : "Encontrar animal",
"Find animal columns" : "Encuentra las columnas de los animales",
"Find animal control incidents returned {0} results." : "Búsqueda de control de incidentes de animales devolvió {0} resultados.",
"Find animals matching the looking for criteria of this person" : "Encuentra animales que coincidan con la búsqueda de esta persona",
"Find donor" : "Encontrar donante",
"Find driver" : "Encontrar conductor",
"Find fosterer" : "Encontrar acogida",
"Find found animal returned {0} results." : "Animal encontrado ha regresado {0} resultados.",
"Find homechecked" : "Encontrar visitas a domicilio realizadas",
"Find homechecker" : "Encontrar personas que realicen visitas a domicilio",
"Find incident" : "Encontrar incidente",
"Find lost animal returned {0} results." : "Animal perdido ha regresado {0} resultados.",
"Find member" : "Encontrar socio",
"Find person" : "Encontrar persona",
"Find person columns" : "Encontrar columnas de personas",
"Find retailer" : "Encontrar vendedor",
"Find shelter" : "Buscar refugio",
"Find staff" : "Encuentra personal",
"Find staff/volunteer" : "Encuentra personal/voluntario",
"Find this address on a map" : "Encontrar esta dirección en el mapa",
"Find vet" : "Buscar veterinario/a",
"Find volunteer" : "Buscar voluntario/a",
"Fine Amount" : "Encuentra una cantidad",
"Finnish Lapphund" : "Perro Finlandés de Laponia",
"Finnish Spitz" : "Spitz Finlandés",
"First Last" : "Nombre Apellido",
"First Names" : "Primer nombre",
"First name(s)" : "Nombre(s)",
"First offence" : "Encuentra una infracción",
"Fish" : "Pez",
"Flag" : "Marca",
"Flags" : "Marcas",
"Flat-coated Retriever" : "Labrador Retriever de Pelo Liso",
"Flemish Giant" : "Flemish Giant",
"Florida White" : "Florida White",
"Followup" : "Seguimiento",
"Followup Between" : "Seguimiento entre",
"Followup Date/Time" : "Seguimiento Fecha/Hora",
"Footer" : "Pié de página",
"For" : "Para",
"Forbidden" : "Prohibido",
"Forenames" : "Apellidos",
"Forget" : "Olvidar",
"Form URL" : "URL Form",
"Forms need a name." : "Formas necesitan un nombre.",
"Foster" : "Acogida",
"Foster Book" : "Registro de Casas de Acogida",
"Foster Capacity" : "Capacidad de la casa de acogida",
"Foster Transfer" : "Traslado entre casas de acogida",
"Foster an animal" : "Acoge a un animal",
"Foster book" : "Registro casas de acogida",
"Foster movements must have a valid foster date." : "Los movimientos de acogida deben tener una fecha de acogida válida.",
"Foster successfully created." : "Acogida creada correctamente.",
"Fostered" : "Acogidos",
"Fostered Animals" : "Animales en acogida",
"Fostered to {0} since {1}" : "En acogida hasta {0} desde {1}",
"Fosterer" : "Casa de acogida",
"Fosterer (Active Only)" : "Casa de acogida (Sólo activo)",
"Fosterer Medical Report" : "Informe médico aportado por la casa de acogida",
"Found" : "Encontrado",
"Found Animal" : "Animal encontrado",
"Found Animal - Additional" : "Animal Encontrado - Otros",
"Found Animal - Details" : "Animal Encontrado - detalles",
"Found Animal Contact" : "Contacto de animal encontrado",
"Found Animal {0}" : "Animal Encontrado {0}",
"Found Animal: {0}" : "Animal encontrado: {0}",
"Found animal - {0} {1} [{2}]" : "Animal Encontrado - {0} {1} [{2}]",
"Found animal entries matching '{0}'." : "Entradas de animales que coinciden ‘{0}’.",
"Found animals must have a contact" : "Los animales encontrados deben tener un contacto",
"Found animals reported in the last 30 days." : "Informe de animales encontrados en los últimos 30 días.",
"Found from" : "Forma encontrada",
"Found to" : "Encontrado para",
"FoundLost animal entry {0} successfully created." : "Animal encontrado entrada {0} exitosamente creada.",
"Fox Terrier" : "Fox Terrier",
"Foxhound" : "Foxhound",
"Fr" : "Fr",
"French Bulldog" : "Bulldog Francés",
"French-Lop" : "French-Lop",
"Frequency" : "Frecuencia",
"Frequently Asked Questions" : "Preguntas frecuentes",
"Fri" : "Vie",
"Friday" : "Viernes",
"From" : "De",
"From Fostering" : "De acogida",
"From Other" : "Desde otros",
"From retailer is only valid on adoption movements." : "Desde vendedor sólo es válida en los movimientos de adopción.",
"Future notes" : "Notas futuras",
"GDPR Contact Opt-In" : "RGPD Opt-In",
"Gaited" : "Caballo de paso",
"Gas Bills" : "Facturas de Gas",
"Gecko" : "Lagarto Gecko",
"General" : "General",
"Generate" : "Generar",
"Generate Documents" : "Generar documentos",
"Generate HTML from this SQL" : "Generar HTML desde este SQL",
"Generate Report" : "Generar informe",
"Generate a document from this animal" : "Generar un documento desde este animal",
"Generate a document from this incident" : "Generar un documento de este incidente",
"Generate a document from this movement" : "Genera un documento para este movimiento",
"Generate a document from this person" : "Generar un documento para esta persona",
"Generate a document from this record" : "Genera un documento a partir de este registro",
"Generate a javascript database for the search page" : "Generar base de datos de javascript desde esta página de búqueda",
"Generate a new animal code" : "Generar un nuevo código de animal",
"Generate a random name for this animal" : "Generar un nombre aleatorio para este animal",
"Generate document from this appointment" : "Generar documento a partir de esta cita",
"Generate document from this license" : "Generar documento a partir de este registro de licencia",
"Generate document from this payment" : "Generar documento a partir de este pago",
"Generate document from this transport" : "Generar documento a partir de este transporte",
"Generate documentation" : "Generar documentación",
"Generate documents" : "Generar documentos",
"Generate image thumbnails as tn_$$IMAGE$$" : "Generar previsualización de imagenes como tn_$$IMAGE$$",
"Generated document '{0}'" : "Se generó el documento «{0}»",
"Gerbil" : "Jerbo",
"German Pinscher" : "Pinscher Alemán",
"German Shepherd Dog" : "Pastor Alemán",
"German Shorthaired Pointer" : "Pointer Alemán de Pelo Corto",
"German Wirehaired Pointer" : "Pointer Alemán de Pelo Largo",
"Get more reports from sheltermanager.com" : "Consigue más informes en sheltermanager.com",
"Gift Aid" : "Ayuda de regalo",
"GiftAid" : "GiftAid",
"Giftaid" : "GiftAid",
"Ginger" : "Pelirrojo",
"Ginger and White" : "Pelirrojo y Blanco",
"Give" : "Dar",
"Give Treatments" : "Dar tratamientos",
"Give Vaccination" : "Vacunar",
"Given" : "Fecha de vacunación/tratamiento",
"Glen of Imaal Terrier" : "Glen of Imaal Irlandés",
"Go" : "Ir",
"Go to the lookup data screen and add/remove breeds, species and animal types according to the animals your shelter deals with." : "Ve a la pantalla de opciones y añade/elimina razas, especies y tipos de animal de acuerdo con los animales que residen en tu refugio.",
"Go to the options screen and set your shelter's contact details and other settings." : "Ve a la pantalla de opciones y establecerdatos de contacto de su refugio y otros ajustes.",
"Go to the system users screen and add user accounts for your staff." : "Ve a la pantalla de usuarios del sistema y añade cuentas de usuario para tu personal.",
"Goat" : "Cabra",
"Golden" : "Dorado",
"Golden Retriever" : "Golden Retriever",
"Goldfish" : "Carpín (Pez)",
"Good With Cats" : "Bueno con Gatos",
"Good With Children" : "Bueno con niños",
"Good With Dogs" : "Bueno con perros",
"Good with Cats" : "Se lleva bien con los gatos",
"Good with Children" : "Se lleva bien con los niños",
"Good with Dogs" : "Se lleva bien con los perros",
"Good with cats" : "Bueno con los gatos",
"Good with children" : "Bueno con los niños",
"Good with dogs" : "Bueno con los perros",
"Good with kids" : "Bueno con los niños",
"Google+" : "Google+",
"Goose" : "Oca",
"Gordon Setter" : "Gordon Setter",
"Grade" : "Cruzado",
"Great Dane" : "Gran Danés",
"Great Pyrenees" : "Perro de Montaña de los Pirineos",
"Greater Swiss Mountain Dog" : "Gran Boyero Suizo",
"Green" : "Verde",
"Grey" : "Gris",
"Grey and White" : "Gris y Blanco",
"Greyhound" : "Galgo",
"Guinea Pig" : "Cobaya",
"Guinea fowl" : "Guineo/pintada",
"HMRC Gift Aid Spreadsheet" : "HMRC Gift Aid Spreadsheet",
"HTML" : "Lenguaje de marcado de hipertexto",
"HTML Publishing Templates" : "Plantillas de publicación HTML",
"HTML/FTP Publisher" : "HTML/FTP Publisher",
"Hairless" : "Sin pelo",
"Half-Yearly" : "Semestral",
"Hamster" : "Hamster",
"Harlequin" : "Harlequin",
"Havana" : "Habana",
"Havanese" : "Bichón Habanero",
"Header" : "Encabezado",
"Health Problems" : "Problemas de salud",
"Health and Identification" : "Salud e identificación",
"Healthy" : "Saludable",
"Heartworm" : "Dirofilariosis",
"Heartworm Test Date" : "Heartworm Test Fecha",
"Heartworm Test Result" : "Heartworm Test resultado",
"Heartworm Tested" : "Examinado de dirofilariosis",
"Heartworm+" : "Dirofilaria+",
"Hedgehog" : "Erizo",
"Held" : "Reservado",
"Help" : "Ayuda",
"Hepatitis" : "Hepatitis",
"Here are some things you should do before you start adding animals and people to your database." : "Aquí están algunas cosas que deberías hacer antes de empezar a añadir animales y personas a tu base de datos.",
"Hidden" : "Oculto",
"Hidden Comments" : "Comentarios confidenciales",
"Hidden comments about the animal" : "Comentarios ocultos sobre el animal",
"Hide deceased animals from the home page" : "Ocultar animales fallecidos de la página principal",
"High" : "Alta",
"Highlight" : "Resalte",
"Himalayan" : "Himalayo",
"History" : "Historia",
"Hold" : "Mantener",
"Hold the animal until this date or blank to hold indefinitely" : "Mantenga al animal hasta esta fecha o en blanco para mantener indefinidamente",
"Hold until" : "Retener hasta",
"Hold until {0}" : "Retener hasta {0}",
"Holland Lop" : "Holland Lop",
"Home" : "Inicio",
"Home Phone" : "Teléfono de casa",
"Home page" : "Página de inicio",
"Homecheck Areas" : "Áreas de visita a domicilio",
"Homecheck Date" : "Fecha de visita a domicilio",
"Homecheck History" : "Historial de visitas a domicilio",
"Homecheck areas" : "Áreas de visita a domicilio",
"Homechecked" : "Visita a domicilio realizada",
"Homechecked By" : "Visita a domicilio realizada por",
"Homechecked by" : "Visita a domicilio realizada por",
"Homechecker" : "Visitador a domicilio",
"Horizontal Pitch" : "Pitch Horizontal",
"Horse" : "Caballo",
"Hotot" : "Hotot",
"Hound" : "Sabueso",
"Hours" : "Horas",
"Housetrained" : "Adiestrado",
"Hovawart" : "Hovawart",
"How urgent is it that we take this animal?" : "¿Qué tan urgente es que nos llevemos al animal?",
"Husky" : "Husky",
"I've finished, Don't show me this popup again." : "Hemos terminado, No mostrar este popup otra vez.",
"IP Restriction" : "Restricción IP",
"IP restriction is a space-separated list of IP netblocks in CIDR notation that this user is *only* permitted to login from (eg: 192.168.0.0/24 127.0.0.0/8). If left blank, the user can login from any address." : "Restricción de IP es una lista separada por espacios de bloques de red IP en notación CIDR que este usuario es * sólo * permite iniciar sesión desde (por ejemplo: 192.168.0.0/24 127.0.0.0 / 8). Si se deja en blanco, el usuario puede iniciar sesión desde cualquier dirección.",
"Ibizan Hound" : "Podenco Ibicenco",
"If the shelter provides initial insurance cover to new adopters, the policy number" : "Si el albergue ofrece cobertura de seguro inicial para nuevos adoptantes, el número de póliza",
"If this form has a populated emailaddress field during submission, send a confirmation email to it" : "Si este formulario contiene un campo de correos ingresado en la entrega, enviar correo de confirmación",
"If this is the web preferred image, web publishers will use these notes as the animal description" : "Si esta es la imagen seleccionada para la web, los editores de páginas web usan estas notas como la descripción del animal",
"If this person is a fosterer, the maximum number of animals they can care for." : "Si esta persona es casa de acogida, número máximo de animales que puede acoger.",
"If this person is a member, the date that membership expires." : "Si esta persona es un miembro, la fecha en que expira membresía.",
"If this person is a member, their membership number" : "Si esta persona es un miembro, su número de miembro",
"If this person is a member, their membership number." : "Si esta persona es miembro, su número de membresía.",
"If this stock record is for a drug, the batch number from the container" : "Si este registro de stock es de un medicamento, el número de lote del recipiente",
"If this stock record is for a perishable good, the expiry date on the container" : "Si este registro de stock es para un bien perecedero, la fecha de caducidad del recipiente",
"If you assign view or edit roles, only users within those roles will be able to view and edit this account." : "Si asigna ver o editar los papeles, sólo los usuarios dentro de esas funciones serán capaces de ver y editar esta cuenta.",
"If you don't select any locations, publishers will include animals in all locations." : "Si no selecciona las ubicaciones, los editores incluyen animales en todos los lugares.",
"Iguana" : "Iguana",
"Illyrian Sheepdog" : "Illyrian Sheepdog",
"Image" : "Imagen",
"Image file" : "Archivo de imagen",
"Import" : "Importar",
"Import a CSV file" : "Importar un archivo CSV",
"Import a PayPal CSV file" : "Importa un archivo de CSV de PayPal",
"Import from file" : "Importar desde archivo",
"Important" : "Importante",
"In" : "En",
"In SubTotal" : "Es subtotal",
"In the last month" : "En el último mes",
"In the last quarter" : "En el último trimestre",
"In the last week" : "En la última semana",
"In the last year" : "En el último año",
"In-Kind Donation" : "Donativo en especie",
"Inactive" : "Inactivo",
"Inactive - do not include" : "Iniciativa - no incluir",
"Incident" : "Incidente",
"Incident - Additional" : "Incidente - Adicional",
"Incident - Citation" : "Incidente - Citación",
"Incident - Details" : "Incidente - Detalles",
"Incident - Dispatch" : "Incidente - Envío",
"Incident - Owner" : "Incidente - Propietario",
"Incident Between" : "Incidente entre",
"Incident Completed Types" : "Tipos de incidentes cpmpletos",
"Incident Date/Time" : "Incidente Fecha/Hora",
"Incident Type" : "Tipo de incidente",
"Incident Types" : "Tipos de incidentes",
"Incident date cannot be blank" : "La fecha del incidente no puede estar vacía",
"Incident followup" : "Seguimiento del incidente",
"Incident {0} successfully created." : "Incidente {0} creado satisfactoriamente.",
"Incident {0}, {1}: {2}" : "Incidente {0}, {1}: {2}",
"Incidents" : "Incidente",
"Incidents Requiring Followup" : "Incidentes que necesitan seguimiento",
"Include CSV header line" : "Incluir línea de cabecera CSV",
"Include Removed" : "Incluir eliminados",
"Include animals in the following locations" : "Incluir los animales en las siguientes ubicaciones",
"Include animals on trial adoption" : "Incluir animales en periodo de prueba de adopción",
"Include animals who don't have a description" : "Incluye animales que no tienen una descripción",
"Include animals who don't have a picture" : "Incluya los animales que no tienen una imagen",
"Include cruelty case animals" : "Incluya animales de casos de crueldad",
"Include deceased animals" : "Incluye animales fallecidos",
"Include fostered animals" : "Incluya animales acogidos",
"Include found" : "Incluir encontrados",
"Include held animals" : "Incluya animales portátiles",
"Include incomplete medical records when generating document templates" : "Incluir registros médicos incompletos al generar plantillas de documentos",
"Include incomplete vaccination and test records when generating document templates" : "Incluir registros de vacunas y chequeos incompletos al generar plantillas de documentos",
"Include non-shelter animals" : "Incluye animales que no pertenecen al refugio",
"Include off-shelter animals in medical calendar and books" : "Incluir animales fuera del albergue en el calendario y registro médico",
"Include preferred photo" : "Incluye",
"Include quarantined animals" : "Incluya los animales en cuarentena",
"Include reserved animals" : "Incluya animales reservados",
"Include retailer animals" : "Incluya animales minorista",
"Include returned" : "Incluir devueltos",
"Include this image when publishing" : "Incluir esta imagen al publicar",
"Include unaltered animals" : "Incluye animales no esterilizados",
"Income" : "Ingreso",
"Income from an on-site shop" : "Ingresos de una tienda en el centro",
"Income::" : "Ingresos::",
"Income::Adoption" : "Ingresos::Adopción",
"Income::Donation" : "Ingresos::Donación",
"Income::EntryDonation" : "Income::EntryDonation",
"Income::Interest" : "Income::Interest",
"Income::OpeningBalances" : "Income::OpeningBalances",
"Income::Shop" : "Income::Shop",
"Income::Sponsorship" : "Income::Sponsorship",
"Income::WaitingList" : "Ingresos:ListadeEspera",
"Incoming" : "Entrante",
"Incoming Forms" : "Formas entrantes",
"Incoming donations (misc)" : "Donaciones entrantes (misc)",
"Incoming forms are online forms that have been completed and submitted by people on the web." : "Formas entrantes son formularios en línea que se han completado y presentado por la gente en la web.",
"Incomplete incidents" : "Incidentes incompletos",
"Incomplete notes upto today" : "Notas incompletas hasta hoy",
"Index" : "Índice",
"Individual/Couple" : "Individuo/Pareja",
"Induct a new animal" : "Añadir Nuevo animal",
"Information" : "Información",
"Initials" : "Iniciales",
"Install" : "Instalados",
"Install the selected reports to your database" : "Instale los informes seleccionados de la base de datos",
"Insurance" : "Seguro",
"Insurance No" : "Número de seguro",
"Intake" : "Entrada",
"Intakes {0}" : "Tomas {0}",
"Internal Location" : "Ubicación interna",
"Internal Locations" : "Ubicaciones Internas",
"Invalid email address" : "Dirección de correo electrónico inválida",
"Invalid email address '{0}'" : "Dirección de correo electrónico no válida ‘{0}’",
"Invalid microchip number length" : "Longitud inválida del número de microchip",
"Invalid time '{0}', times should be in 00:00 format" : "Hora incorrecta '{0}', la hora se debe ser en el formato HH:MM",
"Invalid time, times should be in HH:MM format" : "Hora incorrecta, la hora se debe ser en el formato HH:MM",
"Invalid username or password." : "Nombre de usuario o contraseña inválidos.",
"Investigation" : "Investigación",
"Investigations" : "Investigaciones",
"Investigator" : "Investigador",
"Invoice Only" : "Solo factura",
"Invoice items need a description and amount." : "Los conceptos de la factura necesitan una descripción y una cantidad.",
"Irish Setter" : "Setter Irlandés",
"Irish Terrier" : "Terrier Irlandés",
"Irish Water Spaniel" : "Perro de Agua Irlandés",
"Irish Wolfhound" : "Lobero irlandés",
"Is this a permanent foster?" : "¿Es esto una acogida permanente?",
"Is this a trial adoption?" : "¿Es esta una adopción de prueba?",
"Issue a new insurance number for this animal/adoption" : "Emita un nuevo número de seguro para este animal/adopción",
"Issue date and expiry date must be valid dates." : "Fecha de expedición y fecha de caducidad deben ser fechas válidas.",
"Issued" : "Emitido",
"Issued in the last month" : "Emitido en el último mes",
"Issued in the last week" : "Expedido en la última semana",
"Italian Greyhound" : "Galgo Italiano",
"Italian Spinone" : "Spinone Italiano",
"Item" : "Artículo",
"Jack Russell Terrier" : "Jack Russell Terrier",
"Jan" : "Ene",
"January" : "Enero",
"Japanese Bobtail" : "Bobtail Japonés",
"Japanese Chin" : "Chin Japonés",
"Javanese" : "Javanés",
"Jersey Wooly" : "Jersey Wooly",
"Jindo" : "Jindo Coreano",
"Jul" : "Jul",
"July" : "Julio",
"Jump to diary" : "Cambiar a diario",
"Jump to donations" : "Cambiar a donaciones",
"Jump to media" : "Cambiar a media",
"Jump to movements" : "Cambiar a movimientos",
"Jun" : "Jun",
"June" : "Junio",
"Jurisdiction" : "Jurisdicción",
"Jurisdictions" : "Jurisdicciones",
"Kai Dog" : "Kai Japonés",
"Kakariki" : "Kakariki",
"Karelian Bear Dog" : "Perro Oso de Carelia",
"Keep table headers visible when scrolling" : "Mantenga encabezados de tabla visible al desplazarse",
"Keeshond" : "Keeshond",
"Kennel" : "Perrera",
"Kerry Blue Terrier" : "Terrier Kerry Blue",
"Kishu" : "Kishu",
"Kittens (under {0} months)" : "Gatitos (menores de {0} mes)",
"Km" : "Km",
"Komondor" : "Komondor",
"Korat" : "Korat",
"Kuvasz" : "Kuvasz",
"Kyi Leo" : "Kyi Leo",
"Label" : "Etiqueta",
"Labrador Retriever" : "Labrador Retriever",
"Lakeland Terrier" : "Terrier Lakeland",
"Lancashire Heeler" : "Lancashire Heeler",
"Large" : "Grande",
"Last First" : "Apellido Nombre",
"Last Location" : "Última ubicación",
"Last Month" : "Mes pasado",
"Last Name" : "Apellido",
"Last Week" : "Semana pasada",
"Last changed by {0} on {1}" : "Última modificación por {0} el {1}",
"Last name" : "Apellido",
"Last, First" : "Apellido, Nombre",
"Latency" : "Latencia",
"Latency Tester" : "Evaluador de latencia",
"Least recently changed" : "Modificaciones recientes",
"Leave" : "Salir",
"Leave of absence" : "Excedencia",
"Left Margin" : "Margen izquierdo",
"Left shelter" : "Dejó el refugio",
"Leonberger" : "Leonberger",
"Leptospirosis" : "Leptospirosis",
"Letter" : "Carta",
"Lhasa Apso" : "Lhasa Apso",
"Liability" : "Responsabilidad",
"Licence for {0} successfully renewed {1} - {2}" : "Licencia de {0} renovada {1} - {2}",
"License" : "Licencia",
"License Number" : "Numero de Licencia",
"License Types" : "Tipo de Licencia",
"License number '{0}' has already been issued." : "Número de licencia '{ 0 }' ya se ha emitido .",
"License numbers matching '{0}'." : "Los Número de Licencia correspondiente '{0}'.",
"License requires a number" : "Se necesita número de licencia",
"License requires a person" : "Se necesita persona para la licencia",
"License requires issued and expiry dates" : "Se necesita la fecha de emisión y caducidad de la licencia",
"Licenses" : "Licencias",
"Licensing" : "Licencias",
"Lifetime" : "De por vida",
"Light Amber" : "Ámbar claro",
"Lilac" : "Lila",
"Lilac Tortie" : "Tortie Lila",
"Limited to {0} matches" : "Limitado a {0} coincidencias",
"Link" : "Enlace",
"Link an animal" : "Vincular un animal",
"Link to an external web resource" : "Enlace a un recurso web externo",
"Link to this animal" : "Enlace a este animal",
"Links" : "Enlaces",
"List" : "Lista",
"Litter" : "Camada",
"Litter Ref" : "Ref Camada",
"Litter Reference" : "Referencia de Camada",
"Littermates" : "Compañeros de camada",
"Litters" : "Camadas",
"Litters need at least a required date and number." : "Las camadas necesitan por lo menos, fecha y número.",
"Live Releases {0}" : "Animales que han salido del centro por otros motivos que no sean eutanasia o muerte",
"Liver" : "Marrón oscuro",
"Liver and White" : "Marrón oscuro y blanco",
"Lizard" : "Lagarto",
"Llama" : "Llama",
"Loading..." : "Cargando…",
"Loan" : "Préstamo",
"Local" : "Local",
"Locale" : "Localización",
"Location" : "Ubicación",
"Location Filter" : "Filtro de ubicación",
"Location and Species" : "Ubicación y Especies",
"Location and Type" : "Ubicación y tipo",
"Location and Unit" : "Ubicación y Dependencia",
"Locations" : "Ubicaciones",
"Log" : "Historial",
"Log Text" : "Texto del registro",
"Log Type" : "Tipo de registro",
"Log Types" : "Tipos de historiales",
"Log date must be a valid date" : "Fecha de registro debe ser una fecha válida",
"Log entries need a date and text." : "Las entradas del registro necesitan de una fecha y un texto.",
"Log requires a date." : "El registro requiere una fecha.",
"Log requires a person." : "El registro requiere una persona.",
"Log requires an animal." : "El registro requiere un animal.",
"Log successfully added." : "Registro añadido correctamente.",
"Login" : "Iniciar sesión",
"Logout" : "Salir",
"Long" : "Largo",
"Long term" : "A largo plazo",
"Longest On Shelter" : "Mas tiempo en el refugio",
"Looking For" : "Buscando",
"Looking for" : "Buscando",
"Lookup" : "Buscar",
"Lookup (Multiple Select)" : "Buscar (seleccionar varios)",
"Lookup Values" : "Buscando valores",
"Lookup data" : "Buscar datos",
"Lookups" : "Búsquedas",
"Lop Eared" : "Lop Eared",
"Lory/Lorikeet" : "Lori/loriquito",
"Lost" : "Perdido",
"Lost Animal" : "Animal perdido",
"Lost Animal - Additional" : "Perdió Animal - adicional",
"Lost Animal - Details" : "Perdió Animal - detalles",
"Lost Animal Contact" : "Contacto de animal perdido",
"Lost Animal: {0}" : "Animal perdido: {0}",
"Lost and Found" : "Perdidos y encontrados",
"Lost and found entries must have a contact" : "Entradas de perdidos y encontrados deben tener un contacto",
"Lost animal - {0} {1} [{2}]" : "Animal perdido - {0} {1} [{2}]",
"Lost animal entries matching '{0}'." : "Entradas de animales perdidos encontradas '{0}'.",
"Lost animal entry {0} successfully created." : "Entradas de animales perdidos exitosamente creadas {0}",
"Lost animals must have a contact" : "Animales perdidos deben tener un contacto",
"Lost animals reported in the last 30 days." : "Avisos de animales perdidos en los últimos 30 días.",
"Lost from" : "Perdido de",
"Lost to" : "Perdido a",
"Lost/Found" : "Perdidos/Encontrados",
"Lots of reports installed? Clean up the Reports menu with Settings-Options- Display-Show report menu items in collapsed categories." : "¿Muchos informes instalados? Limpiar el menú de Informes con Configuración-Opciones-Mostrar- Mostrar menú de informes en categorías.",
"Lovebird" : "Agapornis",
"Low" : "Baja",
"Lowchen" : "Lowchen",
"Lowest" : "Más bajo",
"M (Miscellaneous)" : "M (Diversos)",
"MM = current month" : "MM = mes actual",
"Macaw" : "Guacamayo",
"Mail" : "Correo",
"Mail Merge" : "Avisos por correo",
"Mail Merge - {0}" : "Combinar correspondencia",
"Maine Coon" : "Maine Coon",
"Make this the default image when creating documents" : "Hacer esta imagen por defecto al crear documentos",
"Make this the default image when viewing this record and publishing to the web" : "Establecer esta imagen por defecto cuando se ve este registro y publicación a la web",
"Make this the default video link when publishing to the web" : "Hacen de este el enlace de vídeo por defecto cuando se publica en la web",
"Male" : "Macho",
"Maltese" : "Bichón Maltés",
"Manchester Terrier" : "Terrier Manchester",
"Mandatory" : "Obligatorio",
"Manual" : "Manual",
"Manually enter codes (do not generate)" : "Introducir códigos manualmente (no generar)",
"Manufacturer" : "Fabricante",
"Manx" : "Manx",
"Map" : "Mapa",
"Map of active incidents" : "Mapa de incidentes activos",
"Mar" : "Mar",
"March" : "Marzo",
"Maremma Sheepdog" : "Pastor de Maremma",
"Mark Deceased" : "Marcar como fallecido",
"Mark an animal deceased" : "Marcar un animal como fallecido",
"Mark dispatched now" : "Marca como enviado ahora",
"Mark new animals as not for adoption" : "Marcar nuevos animales como no adoptables",
"Mark responded now" : "Marca como respondido ahora",
"Mark selected payments received" : "Marca los pagos seleccionados como recibidos",
"Mark this owner homechecked" : "Marcar esta persona cuando se haya realizado la visita a domicilio",
"Mark treatments given" : "Marcar tratamientos suministrados",
"Marketer" : "Comercializador",
"Markings" : "Marcas",
"Markup" : "Marcado",
"Marriage/Relationship split" : "Separación",
"Mastiff" : "Mastiff",
"Match" : "Coincidencia",
"Match Lost and Found" : "Coincidencias en perdidos y encontrados",
"Match against other lost/found animals" : "Coincidencias entre otros perdidos y encontrados",
"Match lost and found animals" : "Emparejar animales perdidos y encontrados",
"Match this animal with the lost and found database" : "Hacer coincidir este animal con la base de datos de perdidos y encontrados",
"Maternity" : "Maternidad",
"May" : "Mayo",
"McNab" : "McNab",
"Media" : "Medios",
"Media Notes" : "Notas del soporte",
"Media notes contain" : "Las notas del soporte contienen",
"Medical" : "Médica",
"Medical Book" : "Libro Médico",
"Medical Profiles" : "Perfiles médicos",
"Medical book" : "Libro médico",
"Medical calendar" : "Calendario médico",
"Medical profiles" : "Perfiles Médicos",
"Medical profiles need a profile name, treatment, dosage and frequencies." : "Perfiles médicos necesitan un nombre de perfil, el tratamiento, la dosis y las frecuencias.",
"Medical regimens need an animal, name, dosage, a start date and frequencies." : "Los tratamientos médicos necesitan un animal, el nombre, la dosis, la fecha de inicio y frecuencias.",
"Medicate" : "Medicar",
"Medicate Animal" : "Medicar animal",
"Medium" : "Medio",
"Member" : "Socio",
"Membership Expiry" : "Membresia expirada",
"Membership Number" : "Número de membresía",
"Merge" : "Fusionar",
"Merge Person" : "Une persona",
"Merge another animal into this one" : "Combinar otro animal con este",
"Merge another person into this one" : "Fusionar otra persona en ésta",
"Merge bonded animals into a single record" : "Fusiona animales ligados en un único registro",
"Merge duplicate records" : "Unir registros duplicados",
"Message" : "Mensaje",
"Message Board" : "Tablero de mensajes",
"Message from {0}" : "Mensaje de {0}",
"Message successfully sent to {0}" : "Mensaje enviado correctamente a {0}",
"Messages" : "Mensajes",
"Messages successfully sent" : "Mensajes enviados correctamente",
"Method" : "Método",
"Microchip" : "Microchip",
"Microchip Date" : "Fecha del microchip",
"Microchip Number" : "Número de microchip",
"Microchip number {0} has already been allocated to another animal." : "Número de microchip {0} ya ha sido implantado a otro animal.",
"Microchipped" : "Con microchip",
"Miles" : "Millas",
"Mini Rex" : "Mini Rex",
"Mini-Lop" : "Mini-Lop",
"Miniature Pinscher" : "Pinscher Miniatura",
"Minutes" : "Minutos",
"Missouri Foxtrotter" : "Missouri Foxtrotter",
"Mixed Breed" : "Raza mestiza",
"Mo" : "Mo",
"Mobile signing pad" : "Superficie del móvil para firmar",
"Modify Additional Fields" : "Modificar campos adicionales",
"Modify Document Templates" : "Modificar plantillas de documentos",
"Modify Lookups" : "Modificar búsquedas",
"Mon" : "Lun",
"Monday" : "Lunes",
"Money" : "Dinero",
"Month" : "Mes",
"Monthly" : "Mensual",
"More Info Needed" : "Más información requerida",
"More Medications" : "Más medicaciones",
"More Tests" : "Más pruebas",
"More Vaccinations" : "Más vacunaciones",
"More diary notes" : "Más notas del diario",
"Morgan" : "Morgan",
"Most browsers let you search in dropdowns by typing the first few letters of the item you want." : "La mayoría de los navegadores le permiten buscar en menús desplegables, escriba las primeras letras del nombre del elemento que desea.",
"Most browsers will let you visit a record you have been to in this session by typing part of its name in the address bar." : "La mayoría de los navegadores le permitirán visitar un registro en el que ha estado en esta sesión tecleando parte de su nombre en la barra de direcciones.",
"Most recently changed" : "Las modificaciones más recientes",
"Most relevant" : "Más relevante",
"Mother" : "Madre",
"Mountain Cur" : "Mountain Cur",
"Mountain Dog" : "Perro de Montaña",
"Mouse" : "Ratón",
"Move" : "Mover",
"Move an animal to a retailer" : "Mover un animal a un vendedor",
"Moved to animal record {0}" : "Trasladado al registro de animales {0}",
"Movement" : "Movimiento",
"Movement Date" : "Fecha del movimiento",
"Movement Number" : "Número de movimiento",
"Movement Type" : "Tipo de movimiento",
"Movement Types" : "Tipos de Movimiento",
"Movement dates clash with an existing movement." : "La fecha del movimiento está en conflicto con otro movimiento.",
"Movement numbers must be unique." : "Números de movimiento deben ser únicos.",
"Movements" : "Movimientos",
"Movements require an animal" : "Los movimientos requieren a un animal",
"Movements require an animal." : "Los movimientos requieren un animal.",
"Moving..." : "Mover ...",
"Multi-Lookup" : "Búsqueda múltiple",
"Multiple Treatments" : "Tratamientos múltiples",
"Munchkin" : "Munchkin",
"Munsterlander" : "Munsterlander",
"Mustang" : "Mustang",
"My Fosters" : "Mis acogidas",
"My Incidents" : "Mis Incidentes",
"My Undispatched Incidents" : "Mis incidentes no expedidos",
"My diary notes" : "Mis notas del diario",
"My sheltermanager.com account" : "Mi cuenta de sheltermanager.com",
"Mynah" : "Miná",
"N (Non-Shelter Animal)" : "Animal externo al refugio",
"NNN or NN = number unique for this type of animal for this year" : "NNN or NN = número único para este tipo de animal para este año",
"Name" : "Nombre",
"Name Contains" : "El nombre contiene",
"Name and Address" : "Nombre y dirección",
"Name cannot be blank" : "El nombre no puede estar en blanco",
"Name contains" : "El nombre contiene",
"Neapolitan Mastiff" : "Mastín Napolitano",
"Negative" : "Negativo",
"Neglect" : "Abandono",
"Netherland Dwarf" : "Netherland Dwarf",
"Neuter/Spay" : "Esterilización",
"Neutered" : "Esterilizado",
"Neutered/Spayed Non-Shelter Animals In {0}" : "Animales externos al refugio castrados/esterilizados en {0}",
"Neutered/Spayed Shelter Animals In {0}" : "Animales castrados/esterilizados en {0}",
"New" : "Nuevo",
"New Account" : "Nueva Cuenta",
"New Appointment" : "Nueva cita",
"New Citation" : "Nueva cita",
"New Cost" : "Nuevo gasto",
"New Diary" : "Nuevo diario",
"New Diet" : "Nueva dieta",
"New Document" : "Nuevo Documento",
"New Field" : "Nuevo Campo",
"New Fosterer" : "Nueva casa de acogida",
"New Guinea Singing Dog" : "Perro Cantor de Nueva Guinea",
"New Item" : "Nuevo elemento",
"New License" : "Nueva licencia",
"New Litter" : "Nueva camada",
"New Log" : "Nuevo archivo de registro",
"New Movement" : "Nuevo movimiento",
"New Owner" : "Nuevo propietario",
"New Password" : "Nueva contraseña",
"New Payment" : "Nuevo pago",
"New Profile" : "Nuevo perfil",
"New Record" : "Nuevo registro",
"New Regimen" : "Nuevo tratamiento",
"New Report" : "Nuevo informe",
"New Role" : "Nuevo rol",
"New Stock" : "Nueva existencia",
"New Task" : "Nueva Tarea",
"New Template" : "Nueva Plantilla",
"New Test" : "Prueba nueva",
"New Transport" : "Añadir transporte",
"New Trap Loan" : "Nuevo préstamo de jaula trampa",
"New User" : "Nuevo Usuario",
"New Vaccination" : "Nueva vacunación",
"New Voucher" : "Nuevo vale",
"New Waiting List Entry" : "Nueva entrada de lista de espera",
"New Zealand" : "Nueva Zelanda",
"New diary task" : "Nueva entrada en el diario",
"New form field" : "Nuevo campo de formulario",
"New name" : "Nuevo nombre",
"New online form" : "Nuevo formulario en línea",
"New password and confirmation password don't match." : "Nueva contraseña y confirmación de contraseña no coinciden.",
"New task detail" : "Nuevo detalle de la tarea",
"New template" : "Nueva plantilla",
"Newfoundland Dog" : "Terranova",
"Next" : "Siguiente",
"No" : "No",
"No adjustment" : "Sin ajustes",
"No data to show on the report." : "No hay datos para mostrar en el informe.",
"No data." : "Sin datos.",
"No description" : "Sin descripción",
"No longer retained" : "Ya no están retenidos",
"No matches found." : "No se han encontrado coincidencias.",
"No picture" : "Sin imagen",
"No publishers are running." : "No hay editores se están ejecutando.",
"No results found." : "No se encontraron resultados.",
"No results." : "Ningún resultado.",
"No tasks are running." : "No se está realizando ninguna tarea.",
"No view permission for this report" : "Sin permiso de visualización para este informe",
"Noise" : "Ruido",
"Non-Shelter" : "No pertenece al refugio",
"Non-Shelter Animal" : "Animal externo al refugio",
"Non-Shelter Animals" : "Animales externos al refugio",
"Non-shelter Animals" : "Animales externos al refugio",
"None" : "Ninguno",
"Norfolk Terrier" : "Terrier Norfolk",
"Normal user" : "Usuario normal",
"Norwegian Buhund" : "Buhund Noruego",
"Norwegian Elkhound" : "Cazador de Alces Noruego Gris",
"Norwegian Forest Cat" : "Bosque de Noruega",
"Norwegian Lundehund" : "Lundehund Noruego",
"Norwich Terrier" : "Terrier Noruego",
"Not Arrived" : "Todavía no ha llegado",
"Not Available For Adoption" : "No disponible para adopción",
"Not Available for Adoption" : "No disponible para adopción",
"Not For Adoption" : "No adoptables",
"Not Microchipped" : "Sin microchip",
"Not Reconciled" : "No reconciliados",
"Not available for adoption" : "No disponible para adopción",
"Not dispatched" : "No enviado",
"Not for adoption" : "No adoptables",
"Not for adoption flag set" : "No está marcado para la adopción",
"Not in chosen publisher location" : "No se encuentra en la ubicación para la publicación",
"Not reconciled" : "No reconciliados",
"Note" : "Nota",
"Notes" : "Notas",
"Notes about the death of the animal" : "Notas acerca de la muerte del animal",
"Nov" : "Nov",
"Nova Scotia Duck-Tolling Retriever" : "Retriever de Nueva Escocia",
"November" : "Noviembre",
"Now" : "Ahora",
"Number" : "Número",
"Number in litter" : "Número de camada",
"Number of Tasks" : "Número de tareas",
"Number of animal links to show" : "Número de enlaces de origen animal para mostrar",
"Number of fields" : "Número de campos",
"Number of pets" : "Número de mascotas",
"Ocicat" : "Ocicat",
"Oct" : "Oct",
"October" : "Octubre",
"Office" : "Oficina",
"Old English Sheepdog" : "Viejo Pastor Inglés",
"Old Password" : "Contraseña antigua",
"Omit criteria" : "Omita criterios",
"Omit header/footer" : "Omita header/footer",
"On Foster (in figures)" : "En acogida (por cifras)",
"On Shelter" : "En refugio",
"On shelter for {0} days, daily cost {1}, cost record total <b>{2}</b>" : "En refugio durante {0} días, coste diario {1}, coste total de registro <b>{2}</b>",
"On shelter for {0} days. Total cost: {1}" : "El refugio para {0} días. Gasto total: {1}",
"Once assigned, codes cannot be changed" : "Una vez asignados, los códigos no pueden cambiarse",
"Once signed, this document cannot be edited or tampered with." : "Una vez firmado, este documento no puede ser editado ni falsificado.",
"One Off" : "Tratamiento único",
"One-Off" : "Puntual",
"Online Form: {0}" : "Formulario Online: {0}",
"Online Forms" : "Formularios en línea",
"Online form fields need a name and label." : "Campos de formulario en línea necesitan un nombre y etiqueta.",
"Online forms can be linked to from your website and used to take information from visitors for applications, etc." : "Formularios en línea que se pueden vincular a partir de su sitio web y se utilizan para tomar la información de los visitantes de las aplicaciones, etc.",
"Only PDF, HTML and JPG image files can be attached." : "Sólo los archivos de imagen PDF, HTML y JPG se pueden unir.",
"Only active accounts" : "Sólo las cuentas activas",
"Only allow users with one of these roles to view this incident" : "Sólo usuarios con uno de estos roles pueden ver este incidente",
"Only show account totals for the current period, which starts on " : "Mostrar sólo cuentas totales para el período en curso, que comienza el ",
"Only show declawed" : "Mostrar solo los que no tienen uñas",
"Only show pickups" : "Sólo muestra recogidas",
"Only show special needs" : "Mostrar sólo necesidades especiales",
"Only show transfers" : "Mostrar sólo traslados",
"Open Incidents" : "Incidentes abiertos",
"Open records in a new browser tab" : "Abrir los registros en una nueva pestaña del navegador",
"Open reports in a new browser tab" : "Abrir los informes en una nueva pestaña del navegador",
"Opening balances" : "Saldos de apertura",
"Optional, the date the vaccination \"wears off\" and needs to be administered again" : "Opcional, la fecha de vacunación caduca y necesita ser administrada de nuevo",
"Options" : "Opciones",
"Or move this diary on to" : "O mueva este diario a",
"Order published animals by" : "Orden publicada por animales",
"Organisation" : "Organización",
"Organization" : "Organización",
"Organization name" : "Nombre de la organización",
"Oriental Long Hair" : "Oriental de pelo largo",
"Oriental Short Hair" : "Oriental de pelo corto",
"Oriental Tabby" : "Oriental Tabby",
"Original Owner" : "Contacto original",
"Ostrich" : "Avestruz",
"Other Account" : "Otra Cuenta",
"Other Organisation" : "Otra organización",
"Other Shelter" : "Otro refugio",
"Otterhound" : "Perro de Nutria",
"Our shelter does trial adoptions, allow us to mark these on movement screens" : "Nuestro refugio hace adopciones de prueba, permítenos marcarlas en las pantallas de movimientos",
"Out" : "Salida",
"Out Between" : "Fecha de salida entre",
"Out SubTotal" : "Subtotal",
"Output a deceased animals page" : "Emite una página de animales que han fallecido",
"Output a page with links to available online forms" : "Emite una página con enlaces a formularios online disponibles",
"Output a separate page for each animal type" : "Emite una página independiente por cada tipo de animal",
"Output a separate page for each species" : "Emite una página independiente por cada especie",
"Output an adopted animals page" : "Emite una página de animales adoptados",
"Output an rss.xml page" : "Emite una página rss.xml",
"Overdue" : "Atrasada",
"Overdue medical items" : "Tratamientos médicos atrasados",
"Overtime" : "Horas extras",
"Owl" : "Búho",
"Owner" : "Contacto",
"Owner Vet" : "Vet del propietario",
"Owner given citation" : "Cita asignada al propietario",
"Owners Vet" : "Vet de los propietarios",
"PM" : "PM",
"Page extension" : "Extensión de página",
"Paid" : "Pagado",
"Paint/Pinto" : "Pinto",
"Palomino" : "Palomino",
"Paper Size" : "Tamaño del papel",
"Papillon" : "Epagneul Papillón",
"Parainfluenza" : "Parainfluenza",
"Parakeet (Other)" : "Perico",
"Parent" : "Progenitor",
"Parrot (Other)" : "Loro",
"Parrotlet" : "Periquito de Anteojos",
"Parvovirus" : "Parvovirosis",
"Paso Fino" : "Paso Fino",
"Pass Homecheck" : "Visita a domicilio aprobada",
"Password" : "Contraseña",
"Password for '{0}' has been reset." : "La contraseña para '{0}' se ha restablecido.",
"Password is incorrect." : "La contraseña es incorrecta.",
"Password successfully changed." : "Contraseña cambiada correctamente.",
"Passwords cannot be blank." : "Las contraseñas no pueden estar en blanco.",
"Path" : "Ruta",
"Patterdale Terrier (Fell Terrier)" : "Patterdale Terrier",
"PayPal" : "PayPal",
"Payment" : "Pago",
"Payment Book" : "Libro de pagos",
"Payment From" : "Pago desde",
"Payment Methods" : "Métodos de pago",
"Payment Type" : "Tipo de Pago",
"Payment Types" : "Formas de pago",
"Payment book" : "Libro de pagos",
"Payment calendar" : "Calendario de pagos",
"Payment of {0} successfully received ({1})." : "De {0} pagos ({1}) recibido correctamente.",
"Payments" : "Pagos",
"Payments need at least one date, an amount and a person." : "Los pagos necesitan al menos una fecha, una cantidad y una persona.",
"Payments of type" : "Pagos del tipo",
"Payments require a person" : "Los pagos requieren una persona",
"Payments require a received date" : "Los pagos requieren una fecha de recepción",
"Peacock/Pea fowl" : "Pavo Real",
"Pekingese" : "Pequinés",
"Pending Adoption" : "Adopción pendiente",
"Pending Apartment Verification" : "Pendiente comprobación del apartamento",
"Pending Home Visit" : "Pendiente visita de la casa",
"Pending Vet Check" : "Pendiente revisión del veterinario",
"Pension" : "Pensión",
"People" : "Personas",
"People Looking For" : "Las personas estan buscando",
"People matching '{0}'." : "Personas que contengan '{0}'.",
"People or animal records that already exist in the database will not be imported again and movement/payment data will be attached to the existing records instead." : "Los registros de personas o animales que ya existen en la base de datos no se importarán de nuevo y los datos de movimientos/pagos se adjuntarán a los registros ya existentes.",
"People with active reservations, but no homecheck has been done." : "Las personas con reservas activas, pero no se ha realizado la visita a domicilio.",
"People with overdue donations." : "Las personas con compromisos de donaciones vencidas.",
"Percheron" : "Percherón",
"Perform" : "Realizar",
"Perform Homecheck" : "Realizar la visita a domicilio",
"Perform Test" : "Realizar prueba",
"Performed" : "Fecha de realización del test",
"Permanent Foster" : "Acogida permanente",
"Persian" : "Persa",
"Person" : "Persona",
"Person - Additional" : "Persona - Otros",
"Person - Name and Address" : "Persona - nombre y dirección",
"Person - Type" : "Persona - Tipo",
"Person Flags" : "Marcadores de personas",
"Person looking for report" : "Personas buscando informes",
"Person successfully created" : "Persona creada correctamente",
"Personal" : "Personal",
"Peruvian Inca Orchid" : "Perro Sin Pelo del Perú",
"Peruvian Paso" : "Peruano de Paso",
"Petit Basset Griffon Vendeen" : "Pequeño Basset Grifón Vendeano",
"Pharaoh Hound" : "Perro del Faraón",
"Pheasant" : "Faisán",
"Phone" : "Teléfono",
"Phone contains" : "El número de teléfono contiene",
"Photo successfully uploaded." : "Fotografía subida correctamente.",
"Picked Up" : "Recogido",
"Picked Up By" : "Recogido por",
"Pickup" : "Recogida",
"Pickup Address" : "Dirección de recogida",
"Pickup Location" : "Localización de recogida",
"Pickup Locations" : "Localizaciones de recogida",
"Pig" : "Cerdo",
"Pig (Farm)" : "Cerdo (Granja)",
"Pigeon" : "Paloma",
"Pinterest" : "Pinterest",
"Pionus" : "Loro de cabeza azul",
"Pit Bull Terrier" : "Pit Bull Terrier",
"Pixie-Bob" : "Pixie Bob",
"Please click the Sign button when you are finished." : "Por favor, pulse en botón de firmar cuando haya terminado.",
"Please see the manual for more information." : "Por favor, lea el manual para más información.",
"Please select a PDF, HTML or JPG image file to attach" : "Por favor, seleccione un archivo de imagen PDF, HTML o JPG para adjuntar",
"Please tighten the scope of your email campaign to {0} emails or less." : "Por favor, ajuste el margen de su campaña de correos electrónicos a {0} emails o menos.",
"Please use the links below to electronically sign these documents." : "Por favor, use el enlace de abajo para firmar electrónicamente estos documentos.",
"Plott Hound" : "Plott Hound",
"Poicephalus/Senegal" : "Loro del Senegal",
"Pointer" : "Pointer",
"Points for being found within 2 weeks of being lost" : "Puntos para ser encontrados dentro de las 2 semanas de haber perdido",
"Points for matching age group" : "Puntos por grupo de edad correspondiente",
"Points for matching breed" : "Puntos por juego raza",
"Points for matching color" : "Puntos de color a juego",
"Points for matching features" : "Puntos de características emparejan",
"Points for matching lost/found area" : "Puntos coincidencias de perdido / encontrado",
"Points for matching sex" : "Puntos por coincidencia sexual",
"Points for matching species" : "Puntos por coincidencias en especie",
"Points for matching zipcode" : "Puntos por coincidencias en el codigo postal",
"Points required to appear on match report" : "Puntos requeridos que aparecerán en el informe de coincidencias",
"Polish" : "Polaco",
"Polish Lowland Sheepdog" : "Pastor Polaco de la Llanura",
"Pomeranian" : "Pomerania",
"Pony" : "Poni",
"Poodle" : "Caniche",
"Portugese Podengo" : "Podengo Portugués",
"Portuguese Water Dog" : "Perro de Agua Portugués",
"Positive" : "Positivo",
"Positive for Heartworm, FIV or FLV" : "Positivo para gusano del corazón, immunodeficiencia felina o leucemia felina",
"Positive/Negative" : "Positivo/Negativo",
"Post" : "Post",
"Postage costs" : "Gastos de envío",
"Pot Bellied" : "Puerco Vietnamita",
"Prairie Dog" : "Perro de la Pradera",
"Prefill new media notes for animal images with animal comments if left blank" : "Prellenar nuevas notas multimedia para imagenes de animales con comentarios de animales si se dejan en blanco",
"Prefill new media notes with the filename if left blank" : "Nombrar las nuevas notas multimedia con el nombre del archivo si éste se dejo en blanco",
"Premises" : "Premisas",
"Presa Canario" : "Presa Canario",
"Press F11 in HTML or SQL code editing boxes to edit in fullscreen mode" : "Pulse F11 en código HTML o SQL en los campos de edición para editar en pantalla completa",
"Preview" : "Previsualizar",
"Previous" : "Anterior",
"Previous Adopter" : "Adoptante anterior",
"Print" : "Imprimir",
"Print Preview" : "Previsualizar impresión",
"Print selected forms" : "Imprimir los formularios seleccionados",
"Printable Manual" : "Manual imprimible",
"Printing word processor documents uses hidden iframe and window.print" : "La impresión de documentos de procesador de textos utiliza iframe ocultos y window print",
"Priority" : "Prioridad",
"Priority Floor" : "Nivel de prioridad",
"Produce a CSV File" : "Producir un archivo CSV",
"Produce a PDF of printable labels" : "Producir un archivo PDF de etiquetas imprimibles",
"Profile" : "Perfil",
"Profile name cannot be blank" : "Nombre de perfíl no puede estar en blanco",
"Public Holiday" : "Día festivo",
"Publish Animals to the Internet" : "Publicar los Animales al Internet",
"Publish HTML via FTP" : "Publicar HTML mediante FTP",
"Publish now" : "Publicar ahora",
"Publish to folder" : "Publicar en un folder",
"Published to Website" : "Publicado en sitio web",
"Publisher" : "Editor",
"Publisher Breed" : "Raza de editor",
"Publisher Color" : "Color del editor",
"Publisher Logs" : "Registros del editor",
"Publisher Species" : "Especie del editor",
"Publishing" : "Publicar",
"Publishing History" : "Historial de publicación",
"Publishing Logs" : "Registros del editor",
"Publishing Options" : "Opciones de publicación",
"Publishing complete." : "Edición completada.",
"Publishing template" : "Plantilla del editor",
"Pug" : "Carlino",
"Puli" : "Puli",
"Pumi" : "Pumi",
"Puppies (under {0} months)" : "Cachorros (menos de {0} meses)",
"Purchased" : "Comprado",
"Qty" : "Cantidad",
"Quaker Parakeet" : "Cotorra argentina",
"Quantity" : "Cantidad",
"Quarantine" : "Cuarentena",
"Quarterhorse" : "Quarterhorse (Cuarto de Milla)",
"Quarterly" : "Trimestral",
"Quick Links" : "Enlaces Rápidos",
"Quicklinks" : "Enlaces rápidos",
"Quicklinks are shown on the home page and allow quick access to areas of the system." : "Enlaces rápidos se muestran en la página principal y permiten un rápido acceso a las zonas del sistema.",
"R" : "R",
"Rabbit" : "Conejo",
"Rabies" : "Rabia",
"Rabies Tag" : "Etiqueta de rabia",
"RabiesTag" : "Placa antirrábica",
"Radio Buttons" : "Botones",
"Ragamuffin" : "Ragamuffin",
"Ragdoll" : "Ragdoll",
"Rank" : "Puesto",
"Rat" : "Rata",
"Rat Terrier" : "Rat Terrier",
"Raw Markup" : "Marcador puro",
"Read the manual for more information about Animal Shelter Manager." : "Lea el manual para más información sobre Animal Shelter Manager.",
"Real name" : "Nombre real",
"Reason" : "Razón",
"Reason For Appointment" : "Motivo de la cita",
"Reason Not From Owner" : "No especificado por el dueño",
"Reason for Entry" : "Razón para entrar",
"Reason for entry" : "Razón para entrar",
"Reason not from Owner" : "La razón por la que el propietario no trajo el animal él mismo",
"Reason the owner did not bring in the animal themselves" : "Razón por la que el propietario no trajo el animal el mismo",
"Recalculate ALL animal ages/times" : "Recalcula todas las edades de los animales",
"Recalculate ALL animal locations" : "Recalcular la ubicación de TODOS los animales",
"Recalculate on-shelter animal locations" : "Recalcular las ubicaciones del animales del refugio",
"Receipt No" : "Recibo número",
"Receipt/Invoice" : "Recibo / Factura",
"Receive" : "Recibir",
"Receive a donation" : "Recibir una donación",
"Receive a payment" : "Recibir un pago",
"Received" : "Recibido",
"Received in last day" : "Recibido en el último día",
"Received in last month" : "Recibido en el mes pasado",
"Received in last week" : "Recibido en la última semana",
"Received in last year" : "Recibido en el último año",
"Received today" : "Recibido hoy",
"Recently Adopted" : "Recientemente adoptados",
"Recently Changed" : "Ultimos modificados",
"Recently Entered Shelter" : "Shelter introdujo recientemente",
"Recently Fostered" : "Recientemente acogido",
"Recently deceased" : "Recientemente fallecidos",
"Recently deceased shelter animals (last 30 days)." : "Fallecimientos recientes de animales del refugio (últimos 30 días)",
"Reception" : "Recepción",
"Reclaim" : "Recuperar",
"Reclaim an animal" : "Recuperar un animal",
"Reclaim movements must have a valid reclaim date." : "Los movimientos de recuperación de animales deben tener una fecha de recuperación válida.",
"Reclaim successfully created." : "Recuperación creada correctamente.",
"Reclaimed" : "Reclamado",
"Reconcile" : "Reconciliar",
"Reconciled" : "Conciliado",
"Redbone Coonhound" : "Redbone Coonhound",
"Rediarised" : "Incluir en el diario de nuevo",
"Redirect to URL after POST" : "Reenviar a URL después de la POST",
"Reference" : "Referencia",
"Refresh" : "Actualizar",
"Regenerate 'Match lost and found animals' report" : "Regenera el informe \"Correspondencias entre animales perdidos y encontrados\"",
"Regenerate 'Person looking for' report" : "Regenera el informe \"Personas buscando\"",
"Regenerate annual animal figures for" : "Regenera las cantidades anuales de animales para",
"Regenerate monthly animal figures for" : "Regenera las cantidades mensuales de animales para",
"Regenerate person names in selected format" : "Regenera los nombres de personas en el formato seleccionado",
"Register Microchip" : "Registra microchip",
"Register microchips after" : "Registrar microchips después",
"Released To Wild" : "Reinsertado",
"Released To Wild {0}" : "Soltar a la naturaleza {0}",
"Reload" : "Actualizar",
"Remaining" : "Restante",
"Remember me on this computer" : "Recordarme en este equipo",
"Removal" : "Eliminación automática",
"Removal Reason" : "Razón de borrado",
"Removal reason" : "Razón para eliminarlo",
"Remove" : "Eliminar",
"Remove HTML and PDF document media after this many years" : "Elimina los documentos HTML y PDF después de estos años",
"Remove clinic functionality from screens and menus" : "Elimina la funcionalidad de clínica de las pantallas y menús",
"Remove fine-grained animal control incident permissions" : "Elimina los permisos de los incidentes detallados de control de animales",
"Remove holds after" : "Retirar las retenciones después",
"Remove move menu and the movements tab from animal and person screens" : "Eliminar el menú mover y la pestaña de movimientos de las pantallas de animales y personas",
"Remove personally identifiable data" : "Eliminar datos personales identificables",
"Remove previously published files before uploading" : "Eliminar archivos publicados previamente antes de cargar",
"Remove retailer functionality from the movement screens and menus" : "Eliminar la funcionalidad de minorista de las pantallas y menús de Movimientos",
"Remove short shelter code box from the animal details screen" : "Eliminar el código corto de refugio de la pantalla de detalles del animal",
"Remove the FIV/L test fields from animal health details" : "Retirar los campos de prueba de VIF/L de los detalles de sanidad animal",
"Remove the Litter ID field from animal details" : "Retirar el campo ID de Camada de detalles de animales",
"Remove the Rabies Tag field from animal health details" : "Retirar el campo Tag rabia de los detalles de sanidad animal",
"Remove the adoption coordinator field from animal entry details" : "Elimina el campo de coordinador de adopciones de los detalles de entrada del animal",
"Remove the adoption fee field from animal details" : "Retirar el campo de tarifa de adopción de los datos de los animales",
"Remove the animal control functionality from menus and screens" : "Eliminar la funcionalidad de control de animales de los menús y pantallas",
"Remove the bonded with fields from animal entry details" : "Retirar el campo de union de los detalles de los detalles del animal",
"Remove the city/state fields from person details" : "Retirar los campos de ciudad / estado de los detalles persona",
"Remove the coat type field from animal details" : "Retirar el campo de tipo de pelaje de los detalles del animal",
"Remove the declawed box from animal health details" : "Eliminar la casilla de selección de “sin uñas” de la información de salud del animal",
"Remove the document repository functionality from menus" : "Retirar la funcionalidad depósito de documentos de los menus",
"Remove the good with fields from animal notes" : "Retirar el campo de bueno con de las notas del animal",
"Remove the heartworm test fields from animal health details" : "Retirar el campo de Gusano de corazón de los campos de salud del animal",
"Remove the insurance number field from the movement screens" : "Retirar el campo de número de seguro desde las pantallas de movimiento",
"Remove the location unit field from animal details" : "Elimine el campo de la unidad de ubicación de la pantalla de detalles del animal",
"Remove the microchip fields from animal identification details" : "Retirar los campos de microchips de los detalles de identificación de animales",
"Remove the neutered fields from animal health details" : "Retirar los campos castrados de datos zoosanitarios",
"Remove the online form functionality from menus" : "Retirar la funcionalidad formulario en línea de los menús",
"Remove the picked up fields from animal entry details" : "Retirar el campo de \"recogido en\" de los datos del animal",
"Remove the rota functionality from menus and screens" : "Eliminar la funcionalidad de rotación de los menús y pantallas",
"Remove the size field from animal details" : "Retirar el campo de tamaño de los detalles de animales",
"Remove the stock control functionality from menus and screens" : "Eliminar la funcionalidad de inventario de control de animales de los menús y pantallas",
"Remove the tattoo fields from animal identification details" : "Retirar los campos del tatuaje de datos de identificación de animales",
"Remove the transport functionality from menus and screens" : "Eliminar la funcionalidad de transporte de los menús y pantallas",
"Remove the trap loan functionality from menus and screens" : "Eliminar la funcionalidad de préstamo de jaulas trampa de los menús y pantallas",
"Remove the weight field from animal details" : "Retirar el campo de peso de los datos de animales",
"Removed" : "Eliminado",
"Rename" : "Renombrar",
"Renew License" : "Renovar licencia",
"Renew licence" : "Renovar licencia",
"Renew license" : "Renovar licencia",
"Report" : "Informe",
"Report Title" : "Título del Informe",
"Report a new incident" : "Registrar un nuevo incidente",
"Reports" : "Informes",
"Request signature by email" : "Requerir firma por correo eletrónico",
"Requested" : "Solicitado",
"Require followup" : "Se requiere un seguimiento",
"Required" : "Fecha en la que se tendría que vacunar/tratar",
"Required date must be a valid date" : "Fecha requeria debe de ser valida",
"Reschedule" : "Recalendarizar",
"Reservation" : "Reserva",
"Reservation Book" : "Registro de Animales Reservados",
"Reservation Cancelled" : "Reserva cancelada",
"Reservation Date" : "Fecha de la reserva",
"Reservation For" : "Reserva para",
"Reservation Status" : "Estado de la reserva",
"Reservation Statuses" : "Estados de la reserva",
"Reservation book" : "Libro de reservas",
"Reservation date cannot be after cancellation date." : "La fecha de la reserva no puede ser posterior a la fecha de cancelación.",
"Reservation successfully created." : "Reserva creada correctamente.",
"Reservations must have a valid reservation date." : "Las reservas deben tener una fecha de reserva válida.",
"Reserve" : "Reservar",
"Reserve an animal" : "Reservar un animal",
"Reserved" : "Reservado",
"Reset" : "Restablecer",
"Reset Password" : "Reinicializar contraseña",
"Respond" : "Responder",
"Responded" : "Respondido",
"Responded Between" : "Respondido entre",
"Responded Date/Time" : "Fecha/hora de respuesta",
"Result" : "Resultado",
"Results" : "Resultados",
"Results for '{0}'." : "Resultados para '{0}'.",
"Retailer" : "Vendedor",
"Retailer Animals" : "Animales de tienda",
"Retailer Book" : "Libro de colaboradores",
"Retailer book" : "Libro de colaboradores",
"Retailer movement successfully created." : "Movimiento de vendedor creado con éxito.",
"Retailer movements must have a valid movement date." : "Los movimientos de minorista deben tener una fecha de movimiento válida.",
"Retriever" : "Retriever",
"Return" : "Devolver",
"Return Category" : "Regresar categoría",
"Return Date" : "Fecha de devolución",
"Return a transferred animal" : "Devuelve al refugio un animal trasladado",
"Return an animal from adoption" : "Devolver animal adoptado",
"Return an animal from another movement" : "Devolver a un animal de otro movimiento",
"Return an animal from transfer" : "Devuelve un animal del traslado",
"Return date cannot be before the movement date." : "La fecha de devolución no puede ser anterior a la fecha del movimiento.",
"Return this movement and bring the animal back to the shelter" : "Regresa a este movimiento y devuelve el animal al refugio",
"Returned" : "Devuelto",
"Returned By" : "Devuelto por",
"Returned To Owner" : "Devuelto al contacto",
"Returned from" : "Devuelto de",
"Returned to" : "Devuelto a",
"Returned to Owner {0}" : "Regresado al dueño {0}",
"Returning" : "Devolución",
"Returns {0}" : "Devoluciones {0}",
"Reupload animal images every time" : "Volver a subir las imagenes de los animales cada vez",
"Rex" : "Rex",
"Rhea" : "Rhea (ñandú)",
"Rhinelander" : "Rhinelander (renano)",
"Rhodesian Ridgeback" : "Ridgeback de Rodesia",
"Ringneck/Psittacula" : "Periquito de collar",
"Role is in use and cannot be deleted." : "El role está en uso y no se puede borrar.",
"Roles" : "Cargos",
"Roles need a name." : "Los cargos necesitan un nombre.",
"Rosella" : "Periquito Oriental",
"Rostered day off" : "Día libre asignado",
"Rota" : "Turno",
"Rota Types" : "Tipos de turnos",
"Rota cloned successfully." : "Turno duplicado correctamente.",
"Rotate image 90 degrees anticlockwis" : "Rota la imagen 90 grados en sentido contrario a las agujas del reloj",
"Rotate image 90 degrees clockwise" : "Rotar la imagen 90 grados a la derecha",
"Rottweiler" : "Rottweiler",
"Rough" : "Áspero",
"Rows" : "Filas",
"Ruddy" : "Colorado",
"Russian Blue" : "Azul Ruso",
"S (Stray Cat)" : "S (Gato callejero)",
"S = first letter of animal species" : "S= primera letra de la especie de animal",
"SM Account" : "Cuenta SM",
"SMS" : "SMS",
"SQL" : "SQL",
"SQL Interface" : "Interface SQL",
"SQL dump" : "Vaciar SQL",
"SQL dump (ASM2 HSQLDB Format)" : "Dump SQL (ASM2 HSQLDB Format)",
"SQL editor: Press F11 to go full screen and press CTRL+SPACE to autocomplete table and column names" : "Editor SQL: Pulsa F11 para ir a la pantalla completa y pulsa CTRL+ESPACIO para autocompletar los nombres de tablas y columnas",
"SQL interface" : "Interface SQL",
"SQL is syntactically correct." : "La sintaxis de la sentencia SQL no es correcta.",
"SS = first and second letter of animal species" : "SS = primera y segunda letra de la especie de animal",
"Sa" : "Sa",
"Saddlebred" : "Saddlebred",
"Saint Bernard St. Bernard" : "San Bernardo",
"Sales Tax" : "Impuesto sobre la venta",
"Saluki" : "Saluki",
"Samoyed" : "Samoyedo",
"Sat" : "Sab",
"Satin" : "Satin",
"Saturday" : "Sábado",
"Save" : "Guardar",
"Save and leave" : "Guardar y salir",
"Save this incident" : "Guardar este incidente",
"Save this person" : "Guardar esta persona",
"Save this record" : "Guardar este registro",
"Save this waiting list entry" : "Grabar esta lista de espera",
"Saving..." : "Guardando...",
"Scale published animal images to" : "La escala de los animales publicados a",
"Scheduled" : "Previsto",
"Schipperke" : "Schipperke",
"Schnauzer" : "Schnauzer",
"Scottish Deerhound" : "Lebrel Escocés",
"Scottish Fold" : "Fold Escocés",
"Scottish Terrier Scottie" : "Terrier Escocés",
"Script" : "Escritura",
"Seal" : "Foca",
"Sealyham Terrier" : "Sealyham Terrier",
"Search" : "Buscar",
"Search Results for '{0}'" : "Resultados de busqueda para '{0}'",
"Search returned {0} results." : "La búsqueda devolvió {0} resultados.",
"Search sort order" : "Buscar orden de lista",
"Searchable" : "Buscable",
"Second offence" : "Segunda infracción",
"Select" : "Seleccionar",
"Select a person" : "Seleccionar a la persona",
"Select a person to attach this form to." : "Seleccione una persona para adjuntar este formulario.",
"Select a person to merge into this record. The selected person will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "Seleccionar una persona para unir con este archivo. La persona seleccionada será eliminada, y sus movimientos, notas del diario, registros, etc. serán adjuntadas a este registro.",
"Select all" : "Seleccionar todo",
"Select an animal" : "Seleccione un animal",
"Select an animal to attach this form to." : "Seleccione un animal para adjuntar este formulario.",
"Select an animal to merge into this record. The selected animal will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "Seleccionar un animal para incluir en este registro. El animal seleccionado será eliminado, y sus movimientos, notas de diario, entradas de registro, etc. se adjuntarán a este historial.",
"Select animal to merge" : "Selecciona un animal para fusionar",
"Select animals" : "Seleccione los animales",
"Select date for diary task" : "Seleccionar fecha para la entrada del diario",
"Select person to merge" : "Seleccionar una persona para unir",
"Select recommended" : "Seleccionar recomendaciones",
"Selected On-Shelter Animals" : "Animales del refugio seleccionados",
"Selkirk Rex" : "Rex Selkirk",
"Send" : "Enviar",
"Send Emails" : "Enviar emails",
"Send a weekly email to fosterers with medical information about their animals" : "Enviar un email semanal a las casas de acogida con información médica sobre sus animales",
"Send confirmation email to form submitter" : "Envía un email de confirmación al remitente del formulario",
"Send emails" : "Enviar emails",
"Send mass emails and perform mail merges" : "Enviar emails masivos y realizar la fusión de los mails",
"Send via email" : "Enviar por correo electrónico",
"Sending {0} emails is considered abusive and will damage the reputation of the email server." : "Enviar {0} emails se considera abusivo y puede dañar la reputación del servidor de correo electrónico.",
"Sending..." : "Enviando…",
"Senior" : "Senior",
"Sent to mobile signing pad." : "Enviado al móvil para firmar.",
"Sep" : "Sep",
"Separate waiting list rank by species" : "Separar rango de lista de espera por especies",
"September" : "Septiembre",
"Server clock adjustment" : "Ajuste del reloj del servidor",
"Set publishing options" : "Establecer opciones de publicación",
"Set this to 0 to never automatically remove." : "Establezca a 0 para no eliminar automáticamente.",
"Set to 0 to never update urgencies." : "Se establece en 0 para nunca actualizar urgencias.",
"Set wether or not this user account can log in to the user interface." : "Configura si esta cuenta de usuario puede registrarse en la interfaz de usuarios.",
"Setter" : "Setter",
"Setting a location filter will prevent this user seeing animals who are not in these locations on shelterview, find animal and search." : "Configurar un filtro de localización previene que este usuario pueda ver animales que no están en esta localización en \"vista de refugio\", \"encuentra un animal\" y \"busca\".",
"Settings" : "Configuración",
"Settings, Lookup data" : "Ajustes, Buscar datos",
"Settings, Options" : "Ajustes, opciones",
"Settings, Reports" : "Ajustes, informes",
"Settings, System user accounts" : "Ajustes, Usuarios del sistema",
"Sex" : "Sexo",
"Sex and Species" : "Sexo y especies",
"Sexes" : "Sexos",
"Shar Pei" : "Shar Pei",
"Share" : "Compartir",
"Shared weblink" : "Enlace web compartido",
"Shares" : "Compartidos",
"Sheep" : "Oveja",
"Sheep Dog" : "Pastor",
"Shelter" : "Refugio",
"Shelter Animal" : "Animal del refugio",
"Shelter Animals" : "Animales del refugio",
"Shelter Details" : "Detalles del refugio",
"Shelter animal {0} '{1}'" : "Animal del refugio {0} '{1}'",
"Shelter animals" : "Animales del refugio",
"Shelter code cannot be blank" : "El código del refugio no puede estar en blanco",
"Shelter code {0} has already been allocated to another animal." : "Codigo de refugio {0} ya ha sido asignado a otro animal.",
"Shelter stats (all time)" : "Estadísticas Shelter (todos los tiempos)",
"Shelter stats (this month)" : "Estadísticas Shelter (este mes)",
"Shelter stats (this week)" : "Estadísticas Shelter (esta semana)",
"Shelter stats (this year)" : "Estadísticas Shelter (este año)",
"Shelter stats (today)" : "Estadísticas Shelter (hoy)",
"Shelter view" : "Panoramica refugio",
"Shepherd" : "Ovejero",
"Shetland Sheepdog Sheltie" : "Pastor de Shetland",
"Shiba Inu" : "Shiba Inu",
"Shift" : "Cambio",
"Shih Tzu" : "Shih Tzu",
"Short" : "Corto",
"Show GDPR Contact Opt-In field on person screens" : "Mostrar el campo de protección de datos en las pantallas de la persona",
"Show PDF files inline instead of sending them as attachments" : "Mostrar archivos PDF encola en lugar de enviarlos como adjuntos",
"Show a cost field on medical/test/vaccination screens" : "Mostrar un campo de precio en las pantallas de medicación/pruebas/vacunas",
"Show a minimap of the address on person screens" : "Mostrar un mini-mapa de la dirección en las pantallas de personas",
"Show a separate paid date field with costs" : "Mostrar un campo separado de fechas de pagos con precios",
"Show alerts on the home page" : "Mostrar alertas en la página principal",
"Show animal thumbnails in movement and medical books" : "Mostrar miniaturas de animal en movimientos y libros médicos",
"Show animals adopted" : "Mostrar animales adoptados",
"Show codes on the shelter view screen" : "Mostrar códigos en la pantalla de vista de refugio",
"Show complete comments in table views" : "Mostrar comentarios completos en una tabla",
"Show empty locations" : "Mostrar localizaciones vacías",
"Show on new record screens" : "Mostrar en pantallas de nuevo registro",
"Show quick links on all pages" : "Mostrar enlaces rapidos en todas las páginas",
"Show quick links on the home page" : "Mostrar enlaces rápidos en la página principal",
"Show report menu items in collapsed categories" : "Mostrar un informe de elementos del menú en categorías colapsadas",
"Show short shelter codes on screens" : "Mostrar código corto del refugio en pantalla",
"Show the adoption fee field" : "Mostrar el campo de tarifa de adopción",
"Show the altered fields" : "Mostrar los campos de esterilización/castración",
"Show the breed fields" : "Mostrar el campo de raza",
"Show the brought in by field" : "Mostrar el campo de \"Traído por\"",
"Show the color field" : "Mostrar el campo de color de pelo",
"Show the date brought in field" : "Mostrar el campo de fecha de recibido",
"Show the entry category field" : "Mostrar el campo de categoría de entrada",
"Show the full diary (instead of just my notes) on the home page" : "Mostrar el diario completo (en vez de solo mis notas) en la página principal",
"Show the hold fields" : "Mostrar los campos de animales retenidos",
"Show the internal location field" : "Mostrar el campo de ubicación interna",
"Show the litter ID field" : "Mostrar el campo del ID de la camada",
"Show the location unit field" : "Mostrar el campo de la unidad de ubicación",
"Show the microchip fields" : "Mostrar los campos de microchip",
"Show the original owner field" : "Mostrar el campo del propietario original",
"Show the size field" : "Mostrar el campo de tamaño",
"Show the tattoo fields" : "Mostrar los campos de tatuaje",
"Show the time brought in field" : "Mostrar el campo de fecha de entrada",
"Show the transfer in field" : "Mostrar el campo de trasladado a",
"Show the weight field" : "Mostrar el campo de peso",
"Show timeline on the home page" : "Mostrar la línea de tiempo en la página de inicio",
"Show tips on the home page" : "Mostrar sugerencias en la página de inicio",
"Show transactions from" : "Mostrar las transacciones desde",
"Show weight as lb rather than kg" : "Mostrar el peso el libras en lugar de en quilógramos",
"Showing {0} timeline events." : "Mostrando {0} eventos del historial.",
"Siamese" : "Siamés",
"Siberian" : "Siberiano",
"Siberian Husky" : "Husky Siberiano",
"Sick leave" : "Baja por enfermedad",
"Sick/Injured" : "Enfermo/Herido",
"Sick/injured animal" : "Animal enfermo/herido",
"Sign" : "Firmar",
"Sign document" : "Firmar el documento",
"Sign on screen" : "Firma en la pantalla",
"Signature" : "Firma",
"Signed" : "Firmado",
"Signing" : "Firmando",
"Signing Pad" : "Superfície para firmar",
"Signup" : "Registrarse",
"Silky Terrier" : "Terrier Sedoso Australiano",
"Silver" : "Plateado",
"Silver Fox" : "Silver Fox",
"Silver Marten" : "Silver Marten",
"Similar Animal" : "Animal similar",
"Similar Person" : "Persona similar",
"Simple" : "Sencillo",
"Singapura" : "Singapur",
"Single Treatment" : "Un solo tratamiento",
"Site" : "Centro",
"Sites" : "Centros",
"Size" : "Tamaño",
"Sizes" : "Tamaños",
"Skunk" : "Mofeta",
"Skye Terrier" : "Skye Terrier",
"Sloughi" : "Sloughi o Galgo Árabe",
"Small" : "Pequeño",
"SmartTag PETID" : "PETID del SmartTag",
"Smooth Fox Terrier" : "Foxterrier de Pelo Corto",
"Snake" : "Culebra",
"Snowshoe" : "Snowshoe",
"Social" : "Social",
"Softbill (Other)" : "Pico Blando (Otros)",
"Sold" : "Vendido",
"Somali" : "Somalí",
"Some batch processes may take a few minutes to run and could prevent other users being able to use the system for a short time." : "Algunos procesos en lote pueden tardar algunos minutos para ejecutarse y podría impedir que otros usuarios puedan utilizar el sistema durante un período corto de tiempo.",
"Some browsers allow shortcut keys, press SHIFT+ALT+A in Chrome or Firefox to jump to the animal adoption screen." : "Algunos navegadores permiten accesos directos. Pulse MAYUS+ALT+A en Chrome o Firefox para ir a la pantalla de adopción de animales.",
"Some info text" : "Parte del texto info",
"Sorrel" : "Alazán",
"Sorrel Tortoiseshell" : "Tortie Alazán",
"Sorry, this document has already been signed" : "Lo sentimos, este documento ya ha sido firmado",
"South Russian Ovcharka" : "Pastor de Rusia Meridional",
"Spaniel" : "Spaniel",
"Special Needs" : "Necesidades especiales",
"Species" : "Especies",
"Species A-Z" : "Especies de la A-Z",
"Species Z-A" : "Especies de la Z-A",
"Species to use when publishing to third party services and adoption sites" : "Las especies que se usarán cuando se publica en los servicios de terceros y sitios de adopción",
"Specifying a reschedule date will make copies of the selected vaccinations and mark them to be given on the reschedule date. Example: If this vaccination needs to be given every year, set the reschedule date to be 1 year from today." : "Especificar una fecha de revacunación hará copias de las vacunas seleccionadas y las marcará para que se administren en la fecha de revacunación. Ejemplo: si una vacuna necesita ser administrada cada año, establece la fecha de revacunación de aquí a un año.",
"Sphynx (hairless cat)" : "Sphynx",
"Spitz" : "Spitz",
"Split baby/adult age at" : "Dividir la edad de bebé/adulto en",
"Split species pages with a baby/adult prefix" : "Dividir las páginas de especies con el prefijo de bebé/adulto",
"Sponsorship donations" : "Donaciones de patrocinadores",
"Staff" : "Personal",
"Staff Rota" : "Turnos del personal",
"Staff record" : "Registro personal",
"Staff rota" : "Turnos del personal",
"Staffordshire Bull Terrier" : "Staffordshire Bullterrier",
"Standard" : "Estándar",
"Standardbred" : "Standardbred",
"Start Date" : "Fecha de Inicio",
"Start Of Day" : "Inicio del día",
"Start Time" : "Hora de inicio",
"Start at" : "Comenzar en",
"Start date" : "Fecha de inicio",
"Start date must be a valid date" : "Fecha de inicio debe ser una fecha válida",
"Start of year" : "Comienzo del año",
"Started" : "Comenzado",
"Starts" : "Comienza",
"State" : "Estado",
"State contains" : "Estado contiene",
"Stationary costs" : "Costos estacionarios",
"Stats" : "Estadísticas",
"Stats period" : "Período de Estadísticas",
"Stats show running figures for the selected period of animals entering and leaving the shelter on the home page." : "Estadísticas muestran figuras de funcionamiento para el período seleccionado de los animales que entran y salen del refugio en la página principal.",
"Status" : "Estado",
"Status and Species" : "Estado y especies",
"Stay" : "Quieto",
"Stock" : "Existencias",
"Stock Control" : "Control de existencias",
"Stock Levels" : "Niveles de existencias",
"Stock Locations" : "Ubicaciones de existencias",
"Stock Take" : "Revisar manualmente el stock",
"Stock Usage Type" : "Tipo de uso del stock",
"Stock level must have a name" : "El nivel de stock necesita un nombre",
"Stock level must have a unit" : "El nivel de stock tiene que tener una unidad",
"Stock needs a name and unit." : "El stock necesita un nombre y una unidad.",
"Stocktake" : "Revisar manualmente el stock",
"Stolen" : "Robado",
"Stolen {0}" : "Robado {0}",
"Stop" : "Parar",
"Stop Publishing" : "Detener publicación",
"Stores" : "Almacenes",
"Stray" : "Callejero",
"Su" : "Do",
"SubTotal" : "Subtotal",
"Subject" : "Asunto",
"Submission received: {0}" : "Formulario recibido: {0}",
"Success" : "Éxito",
"Successfully attached to {0}" : "Adjuntado con éxito a {0}",
"Sugar Glider" : "Petauro del Azúcar",
"Sun" : "Dom",
"Sunday" : "Domingo",
"Super user" : "Super usuario",
"Superuser" : "Superusuario",
"Surname" : "Apellidos",
"Surrender" : "Renuncia",
"Surrender Pickup" : "Recogida de renuncia",
"Suspect" : "Sospechoso",
"Suspect 1" : "Sospechoso 1",
"Suspect 2" : "Sospechoso 2",
"Suspect 3" : "Sospechoso 3",
"Suspect/Animal" : "Sospechoso/Animal",
"Swan" : "Cisne",
"Swedish Vallhund" : "Perro de los Visigodos",
"Syntax check this SQL" : "Comprobar la sintaxis de este SQL",
"System" : "Sistema",
"System Admin" : "Administrador del sistema",
"System Options" : "Opciones del sistema",
"System user accounts" : "Cuentas de usuarios del sistema",
"T = first letter of animal type" : "T = primera letra del tipo de animal",
"TNR" : "CES",
"TNR - Trap/Neuter/Release" : "CES-Captura/Esterilización/Suelta",
"TT = first and second letter of animal type" : "TT = primera y segunda letra del tipo de animal",
"Tabby" : "Atigrado",
"Tabby and White" : "Atigrado y Blanco",
"Take another payment" : "Coger otro pago",
"Taken By" : "Cogido por",
"Tan" : "Canela",
"Tan and Black" : "Canela y Negro",
"Tan and White" : "Canela y Blanco",
"Task complete." : "Tarea completada.",
"Task items are executed in order of index, lowest to highest" : "Las tareas se ejecutan en orden de registro, de menor a mayor",
"Tattoo" : "Tatuaje",
"Tattoo Date" : "Fecha Tatuaje",
"Tattoo Number" : "Numero Tatuaje",
"Tax" : "Impuesto",
"Tax Amount" : "Cantidad de impuesto",
"Tax Rate %" : "Tipo de tasa %",
"Telephone" : "Teléfono",
"Telephone Bills" : "Facturas Telefonicas",
"Template" : "Plantilla",
"Template Name" : "Nombre de la plantilla",
"Template names can include a path portion with /, eg: Vets/Rabies Certificate" : "Nombres de plantillas que pueden incluir una parte de la ruta con /, por ejemplo: Veterinarios / Certificado Rabia",
"Tennessee Walker" : "Tennessee Walker",
"Terrapin" : "Galápago",
"Terrier" : "Terrier",
"Test" : "Prueba",
"Test Animal" : "Animal de prueba",
"Test Book" : "Prueba libro",
"Test Performed" : "Prueba realizada",
"Test Results" : "Resultados del test",
"Test Types" : "Tipos de prueba",
"Test book" : "Prueba libro",
"Test marked as performed for {0} - {1}" : "Prueba marcado como realizado para {0} - {1}",
"Tests" : "Pruebas",
"Tests need an animal and at least a required date." : "Las pruebas tienen un animal y por lo menos una fecha requerida.",
"Text" : "Texto",
"Text Encoding" : "Codificación de texto",
"Th" : "Ju",
"Thai Ridgeback" : "Perro Tailandés",
"Thank you for choosing Animal Shelter Manager for your shelter!" : "¡Gracias por utilizar Animal Shelter Manager para su refugio!",
"Thank you, the document is now signed." : "Gracias, el documento ha sido firmado.",
"That animal is already linked to the incident" : "Este animal ya está enlazado al incidente",
"The CSV file should be created by PayPal's \"All Activity\" report." : "El archivo CSV tendría que ser creado por un informe de “Actividad Completa” de PayPal.",
"The SmartTag PETID number" : "El n.º PETID del SmartTag",
"The SmartTag type" : "El tipo de SmartTag",
"The URL is the address of a web resource, eg: www.youtube.com/watch?v=xxxxxx" : "La URL es la dirección de un recurso web, por ejemplo: www.youtube.com/watch?v=xxxxxx",
"The animal name" : "El nombre del animal",
"The animal record to merge must be different from the original." : "El informe del animal para incluir debe ser diferente del original.",
"The animal sex" : "El sexo del animal",
"The base color of this animal" : "El color base de este animal",
"The coat type of this animal" : "El pelaje de este animal",
"The confirmation email message to send to the form submitter. Leave blank to send a copy of the completed form." : "El mail de confirmación se ha enviado al remitente del formulario. Dejar en blanco para enviar una copia del formulario rellenado.",
"The database will be inaccessible to all users while the export is in progress." : "La base de datos será inaccesible para todos los usuario mientras se lleva a cabo la exportación.",
"The date reported to the shelter" : "La fecha reportada en el refugio",
"The date the animal died" : "Fecha en la que falleció el animal",
"The date the animal was FIV/L tested" : "La fecha en la que el animal fue sometido a la prueba FIV/L",
"The date the animal was adopted" : "La fecha en que se adoptó al animal",
"The date the animal was altered" : "La fecha en la que el animal fue castrado",
"The date the animal was born" : "La fecha de nacimiento del animal",
"The date the animal was brought into the shelter" : "Fecha en la que el animal fue traído al refugio",
"The date the animal was heartworm tested" : "La fecha en la que el animal fue checado contra lombrices",
"The date the animal was microchipped" : "La fecha en la que al animal se le colocó el microchip",
"The date the animal was reclaimed" : "La fecha en la que el animal fue recuperado",
"The date the animal was tattooed" : "La fecha en que se tatuó a este animal",
"The date the foster animal will be returned if known" : "La fecha en la que el animal acogido será devuelto (si existe fecha)",
"The date the foster is effective from" : "La fecha en la que la casa de acogida es efectiva",
"The date the litter entered the shelter" : "La fecha en la que la camada entro en el refugio",
"The date the owner last contacted the shelter" : "La fecha en la que el dueño contacto al refugio por ultima vez",
"The date the payment was received" : "La fecha de pago fue recibida",
"The date the reservation is effective from" : "La fecha a partir de la cual la reserva es efectiva",
"The date the retailer movement is effective from" : "La fecha desde la que el movimiento de minorista es efectivo",
"The date the transfer is effective from" : "La fecha del traslado es efectiva a partir de",
"The date the trial adoption is over" : "La fecha en la que la adopción a prueba termina",
"The date the vaccination is required/due to be administered" : "Fecha en la que se debe administrar la vacuna",
"The date the vaccination was administered" : "Fecha en la que se administró la vacuna",
"The date this animal was found" : "La fecha en que se encontró este animal",
"The date this animal was lost" : "La fecha en que se perdió este animal",
"The date this animal was put on the waiting list" : "La fecha en que se registró este animal en la lista de espera",
"The date this animal was removed from the waiting list" : "La fecha en que este animal se quitó de la lista de espera",
"The date this animal was reserved" : "La fecha en que se reservó este animal",
"The date this animal was returned to its owner" : "La fecha en la que este animal fue devuelto a su contacto",
"The date this person was homechecked." : "La fecha en la que se realizó la visita a domicilio de esta persona.",
"The default username is 'user' with the password 'letmein'" : "El nombre de usuario predeterminado es «user», y la contraseña es «letmein»",
"The entry reason for this animal" : "El motivo de entrada de este animal",
"The litter this animal belongs to" : "La camada a la que pertenece este animal",
"The locale determines the language ASM will use when displaying text, dates and currencies." : "La configuración regional determina el idioma ASM utilizará al mostrar texto, fechas y monedas.",
"The location where the animal was picked up" : "Localización donde el animal se recogió",
"The microchip number" : "El número del microchip",
"The movement number '{0}' is not unique." : "El número de movimiento «{0}» no es único.",
"The number of stock records to create" : "El número de existencias a crear",
"The period in days before waiting list urgency is increased" : "El período en el día antes de la lista de espera de urgencia se incrementa",
"The person record to merge must be different from the original." : "El documento de persona para combinar debe ser diferente de la original.",
"The primary breed of this animal" : "La raza principal de este animal",
"The reason the owner wants to part with the animal" : "La razón por la que el propietario desea desprenderse de los animales",
"The reason this animal was removed from the waiting list" : "La razón de este animal fue retirado de la lista de espera",
"The remaining units in the container" : "Las unidades remanentes en el contenedor",
"The result of the FIV test" : "El resultado de la prueba de FIV",
"The result of the FLV test" : "El resultado de la prueba de FLV",
"The result of the heartworm test" : "El resultado de la prueba del parásito del corazón",
"The retail/resale price per unit" : "El precio de venta/reventa por unidad",
"The secondary breed of this animal" : "La raza secundaria de este animal",
"The selected file is not an image." : "El archivo seleccionado no es una imagen.",
"The shelter category for this animal" : "La categoría de refugio para este animal",
"The shelter reference number" : "El número de referencia del refugio",
"The sheltermanager.com admin account password cannot be changed here, please visit {0}" : "No se puede cambiar aquí la contraseña de la cuenta del administrador de sheltermanager.com, visite {0}",
"The size of this animal" : "El tamaño de este animal",
"The species of this animal" : "Las especies de este animal",
"The tattoo number" : "El número del tatuaje",
"The type of unit in the container, eg: tablet, vial, etc." : "El tipo de unidad en el contenedor, ej.: tableta, vial, etc.",
"The veterinary license number." : "Número de colegiado del veterinario.",
"The wholesale/trade price the container was bought for" : "El precio por el que fue comprado al por mayor/intercambiado el recipiente",
"There is not enough information in the form to attach to a shelter animal record (need an animal name)." : "No hay información suficiente en el formulario para adjuntarlo al registro del refugio de animales (se requiere un nombre de animal).",
"There is not enough information in the form to create a found animal record (need a description and area found)." : "No hay suficiente información en el formulario para crear un registro de animales encontrados (necesidad de una descripción y el área se encuentra).",
"There is not enough information in the form to create a lost animal record (need a description and area lost)." : "No hay suficiente información en el formulario para crear un registro de animales perdidos (necesidad de una descripción y un área perdida).",
"There is not enough information in the form to create a person record (need a surname)." : "No hay suficiente información en el formulario para crear un documento de persona (necesidad de un apellido).",
"There is not enough information in the form to create a transport record (need animalname)." : "No hay suficiente información en el formulario para crear un registro de transporte (se necesita el nombre del animal)",
"There is not enough information in the form to create a transport record (need pickupdate and dropoffdate)." : "No hay suficiente información en el formulario para crear un registro de transporte (se necesita la fecha de recogida y la fecha de entrega)",
"There is not enough information in the form to create a waiting list record (need a description)." : "No hay suficiente información en el formulario para crear un registro de lista de espera (se necesita una descripción).",
"There is not enough information in the form to create an incident record (need call notes and dispatch address)." : "No hay información suficiente en el formulario para crear un registro de incidente (se requieren las notas de llamada y una dirección de envío).",
"These are the HTML headers and footers used when displaying online forms." : "Estos son los encabezados y pies de página HTML utilizadas al mostrar formularios en línea.",
"These are the HTML headers and footers used when generating reports." : "Estos son los encabezados y pies de página HTML utilizados en la generación de informes.",
"These are the default values for these fields when creating new records." : "Estos son los valores predeterminados para estos campos al crear nuevos registros.",
"These batch processes are run each night by the system and should not need to be run manually." : "Este proceso por lotes se ejecuta cada noche por el sistema y no debería ser necesario ejecutarlo manualmente.",
"These fields allow you to deduct stock for the test(s) given. This single deduction should cover the selected tests being performed." : "Estos campos permiten descontar del inventario los tests que se realicen. Este descuento debería tener en cuenta los tests seleccionados.",
"These fields allow you to deduct stock for the treatment(s) given. This single deduction should cover the selected treatments being administered." : "Estos campos permiten descontar del inventario los tratamientos administrados. Este descuento debería tener en cuenta los tratamientos seleccionados.",
"These fields allow you to deduct stock for the vaccination(s) given. This single deduction should cover the selected vaccinations being administered." : "Estos campos permiten descontar del inventario las vacunas administradas. Este descuento debería tener en cuenta las vacunas seleccionadas.",
"These fields determine which columns are shown on the find animal and find person screens." : "Estos campos determinan las columnas que se muestran en las pantallas de RESULTADOS de encontrar animales y encontrar personas.",
"These numbers are for shelters who have agreements with insurance companies and are given blocks of policy numbers to allocate." : "Estos números son los refugios que tienen acuerdos con compañías de seguros y que se dan bloques de números de políticas para asignar.",
"These options change the behaviour of the search box at the top of the page." : "Estas opciones cambian el comportamiento del cuadro de búsqueda en la parte superior de la página.",
"These values are required for correct operation of the system. ONLY change them if you are translating to another language." : "Estos valores son necesarios para la correcta operación del sistema. Cámbielos SOLO si está traduciéndolo a otro idioma.",
"Third offence" : "Tercera infracción",
"This Month" : "Este mes",
"This Week" : "Esta semana",
"This Year" : "Este año",
"This animal already has an active reservation." : "Este animal ya tiene una reserva activa.",
"This animal has a SmartTag PETID" : "Este animal tiene un PETID SmartTag",
"This animal has a tattoo" : "Este animal tiene un tatuaje",
"This animal has active reservations, they will be cancelled." : "Este animal tiene resercaviones activas, serán canceladas.",
"This animal has an adoption fee of {0}" : "Este animal tiene una tarifa de adopción de {0}",
"This animal has been FIV/L tested" : "Este animal ha sido FIV/L probada",
"This animal has been altered" : "Este animal ha sido castrado",
"This animal has been declawed" : "A este animal se le han extirpado las uñas",
"This animal has been heartworm tested" : "Este animal ha sido probado gusano del corazón",
"This animal has movements and cannot be removed." : "Este animal tiene movimientos asociados y no se puede eliminar.",
"This animal has not been altered." : "Este animal no ha sido esterilizado.",
"This animal has not been microchipped." : "Este animal no ha sido microchipado.",
"This animal has special needs" : "Este animal tiene necesidades especiales",
"This animal has the same name as another animal recently added to the system." : "Este animal tiene el mismo nombre que otro animal recientemente añadido al sistema.",
"This animal is a crossbreed" : "Este animal es mestizo",
"This animal is bonded with {0}" : "Este animal está unido a {0}",
"This animal is bonded with {0}. Adoption movement records will be created for all bonded animals." : "Este animal está enlazado a {0}. Se crearán los registros de movimientos de adopción para todos los animales enlazados.",
"This animal is currently at a retailer, it will be automatically returned first." : "Este animal se encuentra actualmente en una tienda, se le devolverá automáticamente primero.",
"This animal is currently fostered, it will be automatically returned first." : "Este animal está actualmente en una casa de acogida, será devuelto al refugio automáticamente.",
"This animal is currently held and cannot be adopted." : "Actualmente este animal está retenido y no se puede adoptar.",
"This animal is currently quarantined and should not leave the shelter." : "Este animal está actualmente en cuarentena y no debe dejar el refugio.",
"This animal is marked not for adoption." : "Este animal está marcado para la no adopción.",
"This animal is microchipped" : "Este animal posee un microchip",
"This animal is not on the shelter." : "Este animal no está en el refugio.",
"This animal is part of a cruelty case and should not leave the shelter." : "Este animal es parte de un caso de crueldad y no debe dejar el refugio.",
"This animal should be held in case it is reclaimed" : "Este animal debe mantenerse en caso de ser reclamado",
"This animal should not be shown in figures and is not in the custody of the shelter" : "Este animal no debe ser mostrado en las figuras y no está en la custodia de la vivienda",
"This animal was dead on arrival to the shelter" : "Este animal murió al llegar al refugio",
"This animal was euthanized" : "Este animal fue sacrificado",
"This animal was picked up" : "Este animal fue recogido",
"This animal was transferred from another shelter" : "Este animal fue trasladado desde otro refugio",
"This code has already been used." : "El código ya ha sido utilizado.",
"This database is locked and in read-only mode. You cannot add, change or delete records." : "La base de datos esta bloqueada en modo lectura. No puede agregar, modificar o eliminar registros.",
"This database is locked." : "Esta base de datos está bloqueada.",
"This date of birth is an estimate" : "La fecha de nacimiento es estimada",
"This expense account is the source for costs of this type" : "Esta cuenta de gastos es la fuente para los costes de este tipo",
"This income account is the source for payments received of this type" : "Esta cuenta de ingresos es la fuente para los pagos recibidos de este tipo",
"This item is referred to in the database ({0}) and cannot be deleted until it is no longer in use." : "Este artículo se refiere a la base de datos ({0}) y no se puede eliminar hasta que ya no este en uso.",
"This many years after creation of a person record, the name, address and telephone data will be anonymized." : "Después de estos años de la creación de un registro de persona, el nombre, dirección y teléfono serán anonimizados.",
"This month" : "Este mes",
"This movement cannot be from a retailer when the animal has no prior retailer movements." : "Este movimiento no puede ser de un minorista cuando el animal no tiene movimientos de minorista anteriores.",
"This person has an animal control incident against them" : "Esta persona tiene un incidente de control de animales contra ella",
"This person has an animal control incident against them." : "Esta persona tiene asignado un incidente de control de animales.",
"This person has been banned from adopting animals" : "Esta persona ha sido excluida para la adopción de animales",
"This person has been banned from adopting animals." : "Esta persona se le ha prohibido la adopción de animales.",
"This person has been under investigation" : "Esta persona ha sido objeto de investigación",
"This person has been under investigation." : "Esta persona ha estado bajo investigación.",
"This person has movements and cannot be removed." : "Esta persona tiene movimientos y no se puede quitar.",
"This person has not passed a homecheck" : "Esta persona no ha aprobado la visita a domicilio",
"This person has not passed a homecheck." : "Esta persona no ha aprobado una visita a domicilio.",
"This person has payments and cannot be removed." : "Esta persona tiene pagos y no se puede quitar.",
"This person has previously surrendered an animal." : "Esta persona ha renunciado previamente a un animal.",
"This person is linked to a waiting list record and cannot be removed." : "Esta persona está vinculada a una lista de espera y no se puede quitar.",
"This person is linked to an animal and cannot be removed." : "Esta persona está vinculada a un animal y no se puede quitar.",
"This person is linked to an investigation and cannot be removed." : "Esta persona está vinculada a una investigación y no se puede quitar.",
"This person is linked to animal control and cannot be removed." : "Esta persona está vinculada a un control de animal y no se puede quitar.",
"This person is linked to animal licenses and cannot be removed." : "Esta persona está vinculada a una licencia de un animal y no se puede quitar.",
"This person is linked to animal transportation and cannot be removed." : "Esta persona está vinculada a un transporte de un animal y no se puede quitar.",
"This person is linked to citations and cannot be removed." : "Esta persona está vinculada a una citación y no se puede quitar.",
"This person is linked to found animals and cannot be removed." : "Esta persona está vinculada a un animal encontrado y no se puede quitar.",
"This person is linked to lost animals and cannot be removed." : "Esta persona está vinculada a un animal perdido y no se puede quitar.",
"This person is linked to trap loans and cannot be removed." : "Esta persona está vinculada a un préstamo de jaula trampa y no se puede quitar.",
"This person is not flagged as a fosterer and cannot foster animals." : "Esta persona no está marcada como casa de aocgida y no puede acoger animales.",
"This person is not flagged as a retailer and cannot handle retailer movements." : "Esta persona no se marca como minorista y no puede manejar los movimientos minoristas.",
"This person is very similar to another person on file, carry on creating this record?" : "Esta persona es muy similar a otra persona en el archivo, continuar la creación de este archivo?",
"This person lives in the same area as the person who brought the animal to the shelter." : "Esta persona vive en la mismo área que la persona que trajo el animal al refugio.",
"This record has been changed by another user, please reload." : "Este registro ha sido modificado por otro usuario, por favor actualiza.",
"This report cannot be sent by email as it requires criteria to run." : "Este informe no puede ser enviado por email porque requiere unos criterios para funcionar.",
"This screen allows you to add extra documents to your database, for staff training, reference materials, etc." : "Esta pantalla le permite agregar documentos adicionales a su base de datos, la capacitación del personal, materiales de referencia, etc.",
"This screen allows you to add extra images to your database, for use in reports and documents." : "Esta pantalla le permite añadir imágenes adicionales a su base de datos, para su uso en los informes y documentos.",
"This type of movement requires a date." : "Este tipo de movimiento requiere de una fecha.",
"This type of movement requires a person." : "Este tipo de movimiento requiere de una persona.",
"This week" : "Esta semana",
"This will permanently remove the selected records, are you sure?" : "Esto borrará permanentemente todos los registros seleccionados, ¿está seguro?",
"This will permanently remove the selected roles, are you sure?" : "Esto eliminará de forma permanente las funciones seleccionadas, ¿estás seguro?",
"This will permanently remove the selected user accounts. Are you sure?" : "Esto borrará permanentemente los usuarios seleccionados, ¿está seguro?",
"This will permanently remove this account and ALL TRANSACTIONS HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "Esta acción eliminará esta cuenta y todas las TRANSACCIONES QUE SE LE IMPUTAN. Esta acción es irreversible, ¿estás seguro de que quieres hacer esto?",
"This will permanently remove this additional field and ALL DATA CURRENTLY HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "Esta acción eliminará este campo adicional y TODOS LOS DATOS QUE SE LE IMPUTAN ACTUALMENTE. Esta acción es irreversible, ¿estás seguro de que quieres hacer esto?",
"This will permanently remove this animal, are you sure?" : "Esto borrará permanentemente este animal, ¿está seguro?",
"This will permanently remove this incident, are you sure?" : "Esto borrará permanentemente este incidente, ¿está seguro?",
"This will permanently remove this person, are you sure?" : "Esto eliminará permanentemente esta persona, ¿estás seguro?",
"This will permanently remove this record, are you sure?" : "Esta acción eliminará este disco, ¿estás seguro?",
"This will permanently remove this waiting list entry, are you sure?" : "Esto eliminará permanentemente esta entrada en la lista de espera, ¿estás seguro?",
"This will remove ALL rota entries for the week beginning {0}. This action is irreversible, are you sure?" : "Esto eliminará todos los turnos semanales que empiezan el {0}. Esta acción es irreversible, ¿está seguro?",
"This year" : "Este año",
"Thoroughbred" : "Thoroughbred (Purasangre)",
"Thu" : "Jue",
"Thumbnail size" : "Tamaño de miniaturas",
"Thursday" : "Jueves",
"Tibetan Mastiff" : "Mastín Tibetano",
"Tibetan Spaniel" : "Spaniel Tibetano",
"Tibetan Terrier" : "Terrier Tibetano",
"Tiger" : "Tigre",
"Time" : "Hora",
"Time Brought In" : "Hora en que fue traído",
"Time On List" : "Tiempo en la Lista",
"Time On Shelter" : "Tiempo en el refugio",
"Time on list" : "Tiempo en la lista",
"Time on shelter" : "Tiempo en el refugio",
"Timeline" : "Línea temporal",
"Timeline ({0})" : "Línea temporal ({0})",
"Times should be in HH:MM format, eg: 09:00, 16:30" : "Las horas deben estar en el formato HH:MM, p.ej.: 09:00, 16:30",
"Title" : "Título",
"Title First Last" : "Título Nombre Apellido",
"Title Initials Last" : "Iniciales de título",
"To" : "A",
"To Adoption" : "En adopción",
"To Fostering" : "Para acoger",
"To Other" : "A otros",
"To Retailer" : "A la tienda",
"To add people to the rota, create new person records with the staff or volunteer flag." : "Para añadir personas a los turnos, crea registros de nueva persona marcados como personal o como voluntario.",
"To continue using ASM, please renew {0}" : "Para continuar utilizando ASM, por favor renueve {0}",
"To week beginning" : "Al comienzo de la semana",
"Today" : "Hoy",
"Tonkinese" : "Tonquinés",
"Too Many Animals" : "Demasiados animales",
"Tooltip" : "Consejo",
"Top Margin" : "Márgen al Tope",
"Tortie" : "Tortie",
"Tortie and White" : "Tortie Blanco",
"Tortoise" : "Tortuga",
"Tosa Inu" : "Tosa Inu",
"Total" : "Total",
"Total number of units in the container" : "Número total de unidades en el contenedor",
"Total payments" : "Pago total",
"Toucan" : "Tucán",
"Toy Fox Terrier" : "Toy Fox Terrier",
"Training" : "Entrenando",
"Transactions" : "Transacciones",
"Transactions need a date and description." : "Las transacciones necesitan una fecha y descripción.",
"Transfer" : "Trasladar",
"Transfer In" : "Trasladar a",
"Transfer To" : "Trasladar a",
"Transfer an animal" : "Trasladar un animal",
"Transfer from Municipal Shelter" : "Trasladado desde la perrera municipal",
"Transfer from Other Shelter" : "Trasladado desde otro refugio",
"Transfer successfully created." : "Traslado creado correctamente.",
"Transfer?" : "¿Trasladar?",
"Transferred" : "Trasladado",
"Transferred From" : "Trasladado desde",
"Transferred In" : "Trasladado dentro",
"Transferred In {0}" : "Trasladado en {0}",
"Transferred Out" : "Trasladado fuera",
"Transferred Out {0}" : "Trasladado {0}",
"Transfers must have a valid transfer date." : "Los traslados deben tener una fecha de traslado válida.",
"Transport" : "Transporte",
"Transport Book" : "Libro de transportes",
"Transport Types" : "Tipos de transporte",
"Transport book" : "Libro de transportes",
"Transport requires an animal" : "El transporte requiere un animal",
"Transports must have valid pickup and dropoff dates and times." : "Los transportes deben tener una fecha y hora de recogida y entrega válidas.",
"Trap Loans" : "Préstamo de jaula trampa",
"Trap Number" : "Número de jaula trampa",
"Trap Types" : "Tipo de jaula trampa",
"Trap loan" : "Préstamo de jaula trampa",
"Trap loans" : "Préstamos de jaula trampa",
"Treat animals at retailers as part of the shelter inventory" : "Tratar adopciones prueba como parte del inventario del refugio",
"Treat foster animals as part of the shelter inventory" : "Tratar a los animales de acogida como parte del inventario del refugio",
"Treat trial adoptions as part of the shelter inventory" : "Tratar adopciones a prueba como parte del inventario del refugio",
"Treatment" : "Tratamiento",
"Treatment Given" : "Tratamiento administrado",
"Treatment marked as given for {0} - {1}" : "Tratamiento marcado como entregado por {0} - {1}",
"Treatment name cannot be blank" : "El nombre del tratamiento no puede estar en blanco",
"Treatments" : "Tratamientos",
"Treeing Walker Coonhound" : "Sabueso Treeing Walker",
"Trial Adoption" : "Adopción a prueba",
"Trial adoption" : "Adopción a prueba",
"Trial adoption book" : "Libro de adopciones a prueba",
"Trial ends on" : "Prueba acaba en",
"Tricolour" : "Tricolor",
"Trigger Batch Processes" : "Activa un proceso en grupo",
"Tu" : "Ma",
"Tue" : "Mar",
"Tuesday" : "Martes",
"Tumblr" : "Tumblr",
"Turkey" : "Pavo",
"Turkish Angora" : "Angora Turco",
"Turkish Van" : "Van Turco",
"Turtle" : "Tortuga",
"Twitter" : "Twitter",
"Type" : "Tipo",
"Type of animal links to show" : "Tipo de vínculos animales para mostrar",
"U (Unwanted Cat)" : "U (Gato abandonado)",
"UK Giftaid" : "UK GiftAid",
"URL" : "URL",
"UUUUUUUUUU or UUUU = unique number" : "UUUUUUUUUU o UUUU = número único",
"Unable to Afford" : "Falta de medios",
"Unable to Cope" : "No sociable",
"Unaltered" : "No castrados/esterilizados",
"Unaltered Adopted Animals" : "Animales adoptados castrados/esterilizados",
"Unaltered Dog - 1 year" : "Perro Castrado - 1 año",
"Unaltered Dog - 3 year" : "Perro No Castrado - 3 años",
"Unavailable" : "No disponible",
"Under {0} weeks old" : "Menos de {0} semanas de edad",
"Unit" : "Unidad",
"Unit Price" : "Precio de la unidad",
"Unit within the location, eg: pen or cage number" : "Unidad dentro de la ubicación, por ejemplo: pluma o número de la jaula",
"Units" : "Unidades",
"Unknown" : "Desconocido",
"Unknown microchip brand" : "Marca de microchip desconocida",
"Unpaid Fines" : "Multas impagadas",
"Unreserved" : "No reservado",
"Unsaved Changes" : "Cambios sin guardar",
"Unspecified" : "Sin especificar",
"Unsuitable Accomodation" : "Falta de espacio",
"Up for adoption" : "Adoptables",
"Upcoming medical items" : "Próximos tratamientos médicos",
"Update" : "Actualizar",
"Update publishing options" : "Actualización de las opciones de publicación",
"Update system options" : "Actualizar las opciones del sistema",
"Update the daily boarding cost for this animal" : "Actualizar el costo de alojamiento diario de este animal",
"Updated database to version {0}" : "Base de datos actualizada a la versión {0}",
"Updated." : "Actualizado.",
"Updating..." : "Actualizando…",
"Upload" : "Carga",
"Upload Document" : "Cargar documento",
"Upload ODT" : "Sube archivo .odt",
"Upload Photo" : "Cargar fotos",
"Upload a new OpenOffice template" : "Cargar una nueva plantilla de OpenOffice",
"Upload all available images for animals" : "Cargar todas las imágenes disponibles para los animales",
"Upload an SQL script" : "Cargar un script SQL",
"Upload splash.jpg and logo.jpg to override the login screen image and logo at the top left of ASM." : "Subir splash.jpg y logo.jpg para anular la imagen de la pantalla de inicio de sesión y el logotipo en la parte superior izquierda de la ASM.",
"Uploading..." : "Cargando…",
"Urgencies" : "Urgencias",
"Urgency" : "Urgencia",
"Urgent" : "Urgente",
"Usage Date" : "Fecha de uso",
"Usage Type" : "Tipo de uso",
"Usage explains why this stock record was created or adjusted. Usage records will only be created if the balance changes." : "El uso explica por que este registro de stock fue creado o ajustado. Los registros de uso sólo se crearán si el saldo cambia.",
"Use Automatic Insurance Numbers" : "Emplear números automáticos de seguro",
"Use HTML5 client side image scaling where available to speed up image uploads" : "Utilice HTML5 escalado de imagen del lado del cliente cuando sea posible para acelerar la subida de imágenes",
"Use SQL Interface" : "Emplear interface SQL",
"Use a single breed field" : "Usar un solo campo para la raza",
"Use animal comments" : "Utiliza comentarios de animal",
"Use fancy tooltips" : "Utilice tooltips lujo",
"Use notes from preferred photo" : "Utiliza notas de la foto escogida",
"Use the icon in the lower right of notes fields to view them in a separate window." : "Use el icono en la esquina inferior derecha de los campos de notas para verlos en otra ventana.",
"User Accounts" : "Cuentas de usuario",
"User Roles" : "Roles del usuario",
"User accounts that will only ever call the Service API should set this to No." : "Las cuentas de usuario que solo llamarán al servicio API deberán configurar esta opción con un “No”.",
"User roles" : "Roles de usuarios",
"Username" : "Usuario",
"Username '{0}' already exists" : "El nombre de usuario '{0}' ya existe",
"Users" : "Usuarios",
"Users need a username, password and at least one role or the superuser flag setting." : "Los usuarios necesitan un nombre de usuario, contraseña y por lo menos una función o ajuste configurados por el superusuario (administrador)",
"Vacation" : "Vacaciones",
"Vaccinate" : "Vacunar",
"Vaccinate Animal" : "Vacunar animal",
"Vaccination" : "Vacunación",
"Vaccination Book" : "Libro de Vacunas",
"Vaccination Given" : "Vacunación administrada",
"Vaccination Types" : "Tipos de vacuna",
"Vaccination book" : "Cartilla de vacunación",
"Vaccination marked as given for {0} - {1}" : "Vacunación marcada como administrada para {0} - {1}",
"Vaccinations" : "Vacunas",
"Vaccinations need an animal and at least a required date." : "Las vacunas tienen un animal y por lo menos una fecha requerida.",
"Vaccinations require an animal" : "Las vacunas requieren un animal",
"Vaccinations: {0}, Tests: {1}, Medical Treatments: {2}, Transport: {3}, Costs: {4}, Total Costs: {5} Total Payments: {6}, Balance: {7}" : "Vacunas: {0}, Tests: {1}, Tratamientos médicos: {2}, Transporte: {3}, Costes: {4}, Costes totales: {5} Pagos totales: {6}, Saldo: {7}",
"Valid tokens for the subject and text" : "Tokens válidos para el asunto y el texto",
"Value" : "Valor",
"Various" : "Multicolor",
"Vertical Pitch" : "Pitch Vertical",
"Very Large" : "Muy grande",
"Vet" : "Veterinario",
"Vet Visit" : "Visita veterinaria",
"Victim" : "Víctima",
"Victim Name" : "Nombre de la víctima",
"Video Link" : "Liga para video",
"Vietnamese Pot Bellied" : "Puerco Vietnamita",
"View" : "Vista",
"View Accounts" : "Ver cuentas",
"View Animals" : "Visualizar animales",
"View Audit Trail" : "Ver registro de auditoría",
"View Citations" : "Ver citas",
"View Clinic Appointment" : "Ver cita de la clínica",
"View Cost" : "Ver costes",
"View Diary" : "Examinar agenda",
"View Diets" : "Ver dietas",
"View Document" : "Ver Documento",
"View Document Repository" : "Ver el depósito de documentos",
"View Found Animal" : "Ver animal encontrado",
"View Incidents" : "Ver incidentes",
"View Incoming Forms" : "Ver las Formas entrantes",
"View Investigations" : "Ver investigaciones",
"View Licenses" : "Ver licencias",
"View Litter" : "Ver camada",
"View Log" : "Ver registro",
"View Lost Animal" : "Ver animal perdido",
"View Manual" : "Ver Manual",
"View Media" : "Examinar soporte",
"View Medical Records" : "Examinar historias clínicas",
"View Movement" : "Ver movimiento",
"View PDF" : "Ver PDF",
"View Payments" : "Ver pagos",
"View Person" : "Ver Persona",
"View Person Links" : "Ver ligas de personas",
"View Report" : "Ver informe",
"View Roles" : "Ver roles",
"View Rota" : "Ver turnos",
"View Shelter Animals" : "Ver los animales del refugio",
"View Staff Person Records" : "Ver registros de miembro del personal",
"View Stock" : "Ver existencias",
"View Tests" : "Ver pruebas",
"View Training Videos" : "Ver tutoriales en video",
"View Transport" : "Ver transporte",
"View Trap Loans" : "Ver préstamos de jaula trampa",
"View Vaccinations" : "Ver vacunaciones",
"View Volunteer Person Records" : "Ver registros de voluntarios",
"View Vouchers" : "Ver vales",
"View Waiting List" : "Visualizar lista de espera",
"View animals matching publishing options" : "Ver animales que coincidan con las opciones de publicación",
"View littermates" : "Ver compañeros de camada",
"View matching records" : "Ver registros coincidentes",
"View media" : "Ver archivos multimedia",
"View publishing logs" : "Ver los registros de publicación",
"Visual Theme" : "Tema visual",
"Vizsla" : "Vizsla",
"Volunteer" : "Voluntario",
"Voucher Types" : "Tipos de vales",
"Vouchers" : "Vales",
"Vouchers need an issue and expiry date." : "Los vales necesitan una fecha de emisión y vencimiento.",
"WARNING: This animal has not been microchipped" : "ATENCIÓN: Este animal no ha sido microchipado",
"WARNING: This animal is over 6 months old and has not been neutered/spayed" : "ATENCIÓN: Este animal tiene más de 6 meses y no ha sido esterilizado/castrado",
"Waiting" : "Esperando",
"Waiting List" : "Lista de espera",
"Waiting List - Additional" : "Adicional - Lista de Espera",
"Waiting List - Details" : "Detalles - Lista de Espera",
"Waiting List - Removal" : "Lista de espera - Eliminación",
"Waiting List Contact" : "Contacto de lista de espera",
"Waiting List Donation" : "Lista de espera de donación",
"Waiting List {0}" : "Lista de espera {0}",
"Waiting List: {0}" : "Lista de espera: {0}",
"Waiting Room" : "Sala de espera",
"Waiting for documents..." : "Esperando documentos...",
"Waiting list donations" : "Esperando la lista de donaciones",
"Waiting list entries matching '{0}'." : "Entradas de la lista de espera que coinciden con «{0}».",
"Waiting list entries must have a contact" : "Las entradas en la lista de espera debe tener un contacto",
"Waiting list entry for {0} ({1})" : "Entrada de la lista de espera para {0} ({1})",
"Waiting list entry successfully added." : "Se añadió la entrada de la lista de espera correctamente.",
"Waiting list urgency update period in days" : "Lista de espera período de actualización de urgencia en días",
"Warmblood" : "Warmblood",
"Warn if the name of the new animal is similar to one entered recently" : "Advierte si el nombre del nuevo animal es similar a uno entró recientemente",
"Warn when adopting an animal who has not been microchipped" : "Avisa cuando se adopta un animal que no ha sido microchipado",
"Warn when adopting an unaltered animal" : "Avisa cuando se adopta un animal que no ha sido esterilizado",
"Warn when adopting to a person who has been banned from adopting animals" : "Advierte a la hora de adoptar si la persona ha sido vetada para la adopción de animales",
"Warn when adopting to a person who has not been homechecked" : "Advierte en el momento de la adopción si todavía no se ha realizado la visita a domicilio",
"Warn when adopting to a person who has previously brought an animal to the shelter" : "Advierte a la hora de adoptar si una persona ha traído previamente un animal al refugio",
"Warn when adopting to a person who lives in the same area as the original owner" : "Advierte a la hora de adoptar si una persona vive en la misma zona que el propietario original",
"Warn when creating multiple reservations on the same animal" : "Advierte al crear múltiples reservas al mismo animal",
"Warnings" : "Avisos",
"Wasted" : "Malgastado",
"Water Bills" : "Factura de Agua",
"We" : "Mi",
"Wed" : "Mié",
"Wednesday" : "Miércoles",
"Week" : "Semana",
"Week beginning {0}" : "La semana empieza {0}",
"Weekly" : "Semanal",
"Weight" : "Peso",
"Weimaraner" : "Braco de Weimar",
"Welcome!" : "¡Bienvenido/a!",
"Welsh Corgi" : "Welsh Corgi",
"Welsh Springer Spaniel" : "Springer Spaniel Galés",
"Welsh Terrier" : "Terrier Galés",
"West Highland White Terrier Westie" : "Terrier West Highland Blanco",
"Wheaten Terrier" : "Wheaten Terrier",
"When" : "Cuando",
"When ASM should stop showing this message" : "Cuando ASM debe dejar de mostrar este mensaje",
"When I change the location of an animal, make a note of it in the log with this type" : "Cuando se haga un cambio de ubicación del animal, anotarlo con este tipo de registro",
"When I change the weight of an animal, make a note of it in the log with this type" : "Cuando se haga un cambio en el peso del animal, anotarlo con este tipo de registro",
"When I generate a document, make a note of it in the log with this type" : "Al generar un documento, hacer una nota de ello en el registro con este tipo",
"When I mark an animal held, make a note of it in the log with this type" : "Cuando se marca un animal como retenido, crear una nota de este tipo en el registro",
"When I set a new GDPR Opt-In contact option, make a note of it in the log with this type" : "Cuando se establece una nueva opción de Opt-In de protección de datos, crear una nota en el registro de este tipo",
"When a message is created, email it to each matching user" : "Al crear un mensaje, enviarlo para cada usuario coincidente",
"When creating payments from the Move menu screens, mark them due instead of received" : "Cuando se creen pagos desde el menú de movimientos, marcarlos como \"debido\" en lugar de \"recibido\"",
"When displaying calendars, the first day of the week is" : "Cuando se muestren los calendarios, el primer día de la semana es",
"When displaying person names, use the format" : "Cuando se muestren nombres de persona, usar el formato",
"When entering dates, hold down CTRL and use the cursor keys to move around the calendar. Press t to go to today." : "Al introducir fechas, mantenga oprimida Ctrl y use las flechas del teclado para moverse por el calendario. Oprima T para ir a hoy.",
"When entering vaccinations, default the last batch number and manufacturer for that type" : "Cuando se registran vacunaciones, anota el último número de lote y fabricante para este tipo de vacuna",
"When matching lost animals, include shelter animals" : "Cuando se emparejan animales perdidos, incluir animales residentes en el refugio",
"When publishing to third party services, add this extra text to the bottom of all animal descriptions" : "Al publicar en los servicios de terceros, añada el texto adicional a la parte inferior de todas las descripciones de animales",
"When receiving multiple payments, allow the due and received dates to be set" : "Cuando se reciban múltiples pagos, permite que se establezcan fechas de \"debido\" y \"recibido\"",
"When receiving payments, allow a quantity and unit price to be set" : "Cuando se reciban pagos, permite que se establezcan una cantidad y un precio por unidad",
"When receiving payments, allow recording of sales tax with a default rate of" : "Cuando se reciban pagos, permite el registro del impuesto con una tasa predeterminada de",
"When receiving payments, allow the deposit account to be overridden" : "Cuando se reciban pagos, permite que la cuenta de depósito se anule",
"When you use Move > Adopt an animal, ASM will automatically return any open foster or retailer movement before creating the adoption." : "Cuando utilice Mover> Adoptar un animal, ASM devolverá automáticamente al refugio cualquier animal en casa de acogida o en venta antes de la creación de la adopción.",
"When you use Move > Foster an animal, ASM will automatically return any open foster movement before moving the animal to its new home." : "Cuando utilice Mover> Acoger un animal, ASM devolverá automáticamente al refugio cualquier movimiento abierto de acogida antes de trasladarlo a su nuevo hogar.",
"Where this animal is located within the shelter" : "Donde está localizado este animal en el refugio",
"Whippet" : "Whippet",
"White" : "Blanco",
"White German Shepherd" : "Pastor Alemán Blanco",
"White and Black" : "Blanco y Negro",
"White and Brindle" : "Blanco y Manchado",
"White and Brown" : "Blanco y Marrón",
"White and Grey" : "Blanco y Gris",
"White and Liver" : "Blanco y Con lunares",
"White and Tabby" : "Blanco y Atigrado",
"White and Tan" : "Blanco y Canela",
"White and Torti" : "Blanco y Torti",
"Will this owner give a donation?" : "¿Este propietario hará una donación?",
"Wire-haired Pointing Griffon" : "Grifón de Pelo Duro",
"Wirehaired Terrier" : "Terrier de Pelo Duro",
"With Vet" : "Con el veterinario",
"With overnight batch" : "Procesamiento por lotes",
"Withdrawal" : "Retirada",
"Wk" : "Sem",
"Work" : "Trabajo",
"Work Phone" : "Teléfono del trabajo",
"Work Types" : "Tipos de trabajo",
"XXX or XX = number unique for this year" : "XXX o XX= número único para ese año",
"Xoloitzcuintle/Mexican Hairless" : "Perro Pelon Mexicano - Xoloitzquintle",
"YY or YYYY = current year" : "YY o YYYY = año en curso",
"Yellow Labrador Retriever" : "Labrador Retriever Amarillo",
"Yellow and Grey" : "Amarillo y Gris",
"Yes" : "Sí",
"Yes/No" : "Si/No",
"Yes/No/Unknown" : "Si/No/Desconocido",
"Yorkshire Terrier Yorkie" : "Yorkshire Terrier",
"You can bookmark search results, animals, people and most data entry screens." : "Puede crear marcadores de los resultados de búsquedas, animales, personas y la mayoría de las pantallas de entrada de datos.",
"You can drag and drop animals in shelter view to change their locations." : "Puede arrastrar y soltar los animales, tanto refugio para cambiar su ubicación.",
"You can middle click a link to open it in a new browser tab (push the wheel on most modern mice)." : "Puede hacer clic con el botón del medio en un enlace para abrirlo es una nueva pestaña del navegador (apretar la rueda en la mayoría de los ratones modernos).",
"You can override the search result sort by adding one of the following to the end of your search - sort:az, sort:za, sort:mr, sort:lr" : "Puede anular el resultado de la búsqueda al agregar una de las siguientes hasta el final de su búsqueda - sort:az, sort:za, sort:mr, sort:lr",
"You can prefix your term in the search box with a: to search only animals, p: to search only people, wl: to search waiting list entries, la: to search lost animals and fa: to search found animals." : "Puede utilizar estos códigos en el campo de búsqueda a: para buscar solamente los animales, p: para buscar sólo las personas, WL: para buscar entradas de la lista de espera, la: para buscar animales perdidos y, fa: para buscar animales que se han encontrado.",
"You can set a default amount for different payment types in the Settings- Lookup Data screen. Very handy when creating adoptions." : "Puede establecer una cantidad preestablecida para diferentes tipos de pago en Configuración-Buscar datos. Es muy práctico cuando se crean adopciones.",
"You can sort tables by clicking on the column headings." : "Puede ordenar haciendo clic en las cabeceras de columna.",
"You can upload images called logo.jpg and splash.jpg to the Settings- Reports-Extra Images screen to override the login splash screen and logo in the upper left corner of the application." : "Puedes subir imágenes llamadas logo.jpg y splash.jpg a la pantalla de imágenes Configuración-Informes-Extra para anular la pantalla de bienvenida de inicio de sesión y el logotipo en la esquina superior izquierda de la aplicación.",
"You can use incoming forms to create new records or attach them to existing records." : "Se pueden utilizar formularios entrantes para crear nuevos registros o adjuntarlos a registros existentes.",
"You can't have a return without a movement." : "No se puede realizar una devolución sin un movimiento.",
"You didn't specify any search criteria, so an on-shelter search was assumed." : "No se ha indicado ningún criterio de búsqueda, por lo tanto se asume búsqueda general en el refugio.",
"You have unsaved changes, are you sure you want to leave this page?" : "Hay cambios sin guardar, ¿confirma que quiere salir de la página?",
"You must supply a code." : "Usted debe ingresar un código.",
"Young Adult" : "Adulto joven",
"Your CSV file should have a header row with field names ASM recognises." : "Su archivo CSV debería tener una cabecera con nombres de campo que ASM pueda reconocer.",
"Your sheltermanager.com account is due to expire on {0}, please renew {1}" : "Su cuenta sheltermanager.com está a punto de expirar en fecha {0}, por favor renueve {1}",
"Zipcode" : "Código postal",
"Zipcode contains" : "El código postal contiene",
"[None]" : "[Ninguno]",
"after connecting, chdir to" : "después de conectar, chdir a",
"and" : "y",
"are sent to" : "se envían a",
"at" : "a",
"cm" : "cm",
"days" : "días",
"estimate" : "estimado",
"filters: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, aco, banned, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound" : "filtros: a:animal, p:persona, wl:listadeespera, la:animalperdido, fa:animalencontrado palabras clave: onshelter/os, notforadoption, aco, banned, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound",
"inches" : "pulgadas",
"invalid" : "no válido",
"kg" : "kg",
"lb" : "libras",
"less" : "menos",
"mins" : "minutos",
"months" : "meses",
"more" : "más",
"on" : "activado",
"or" : "o",
"or estimated age in years" : "o edad estimada en años",
"oz" : "onzas",
"to" : "a",
"today" : "hoy",
"treatments" : "tratamientos",
"treatments, every" : "tratamientos, cada",
"weekdays" : "días de la semana",
"weeks" : "semanas",
"weeks after last contact." : "semanas tras el último contacto.",
"years" : "años",
"yesterday" : "ayer",
"{0} (under {1} months)" : "{0} (menos de {1} meses)",
"{0} - {1} ({2} {3} aged {4})" : "{0} - {1} ({2} {3} edad {4})",
"{0} - {1} {2}" : "{0} - {1} {2}",
"{0} - {1} {2} ({3}), contact {4} ({5}) - lost in {6}, postcode {7}, on {8}" : "{0} - {1} {2} ({3}), contacto {4} ({5}) - perdido en {6}, código postal {7}, en {8}",
"{0} animals successfully updated." : "{0} animales actualizados correctamente.",
"{0} cannot be blank" : "{0} no puede estar vacío",
"{0} fine, paid" : "{0} multas pagadas",
"{0} fine, unpaid" : "{0} multas no pagadas",
"{0} incurred in costs" : "{0} incurrido en costes",
"{0} is running ({1}% complete)." : "{0} en proceso ({1}% completado).",
"{0} payment records created." : "{0} registros de pago creados.",
"{0} received" : "{0} recibido",
"{0} record(s) match the mail merge." : "Hay {0} registros que coinciden con la combinación de correspondencia.",
"{0} results." : "{0} resultados.",
"{0} rows affected." : "{0} filas afectadas.",
"{0} selected" : "{0} seleccionado",
"{0} treatments every {1} days" : "{0} tratamientos cada {1} días",
"{0} treatments every {1} months" : "{0} tratamientos cada {1} meses",
"{0} treatments every {1} weekdays" : "{0} tratamientos cada {1} día de la semana",
"{0} treatments every {1} weeks" : "{0} tratamientos cada {1} semanas",
"{0} treatments every {1} years" : "{0} tratamientos cada {1} años",
"{0} {1} ({2} treatments)" : "{0} {1} ({2} tratamientos)",
"{0} {1} aged {2}" : "{0} {1} edad {2}",
"{0} {1} {2} aged {3}" : "{0} {1} {2} edad {3}",
"{0} {1}: Moved from {2} to {3}" : "{0} {1}: movidos de {2} a {3}",
"{0} {1}: adopted by {2}" : "{0} {1}: adoptados por {2}",
"{0} {1}: altered" : "Castrado",
"{0} {1}: available for adoption" : "{0} {1}: disponibles para adopción",
"{0} {1}: died ({2})" : "{0} {1}: muertos ({2})",
"{0} {1}: entered the shelter" : "{0} {1}: ha entrado en el refugio",
"{0} {1}: escaped" : "{0} {1}: se escaparon",
"{0} {1}: euthanised ({2})" : "{0} {1}: eutanasiados ({2})",
"{0} {1}: fostered to {2}" : "{0} {1}: acogidos en {2}",
"{0} {1}: held" : "{0} {1}: retenidos",
"{0} {1}: microchipped" : "{0} {1}: microchipados",
"{0} {1}: not available for adoption" : "{0} {1}: no disponibles para adopción",
"{0} {1}: quarantined" : "{0} {1}: en cuarentena",
"{0} {1}: received {2}" : "{0} {1}: recibidos {2}",
"{0} {1}: reclaimed by {2}" : "{0} {1}: recuperados por {2}",
"{0} {1}: released" : "{0} {1}: liberados",
"{0} {1}: reserved by {2}" : "{0} {1}: reservados por {2}",
"{0} {1}: returned by {2}" : "{0} {1}: devueltos por {2}",
"{0} {1}: sent to retailer {2}" : "{0} {1}: enviados al vendedor {2}",
"{0} {1}: stolen" : "{0} {1}: robados",
"{0} {1}: tested positive for FIV" : "{0} {1}: positivos para immunodeficiencia felina",
"{0} {1}: tested positive for FeLV" : "{0} {1}: positivos para leucemia felina",
"{0} {1}: tested positive for Heartworm" : "{0} {1}: positivos para gusano del corazón",
"{0} {1}: transferred to {2}" : "{0} {1}: trasladados a {2}",
"{0}, Week {1}" : "{0}, semana {1}",
"{0}: Entered shelter {1}, Last changed on {2} by {3}. {4} {5} {6} aged {7}" : "{0}: Entró en el refugio {1}, fue modificado el {2} por {3}. {4} {5} {6} {7} edad",
"{0}: closed {1} ({2})" : "{0}: cerrado {1} ({2})",
"{0}: opened {1}" : "{0}: abierto {1}",
"{0}: waiting list - {1}" : "{0}: lista de espera - {1}",
"{0}: {1} {2} - {3} {4}" : "{0}: {1} {2} - {3} {4}",
"{2}: found in {1}: {0}" : "{2}: encontrados en {1}: {0}",
"{2}: lost in {1}: {0}" : "{2}: perdidos en {1}: {0}",
"{plural0} animal as dead on arrival" : "{plural0} animales llegaron muertos",
"{plural0} animal control call due for followup today" : "{plural0} llamadas de control de animales pendientes para el seguimiento de hoy",
"{plural0} animal died" : "{plural0} animales murieron",
"{plural0} animal entered the shelter" : "{plural0} animales entraron en el refugio",
"{plural0} animal has a hold ending today" : "{plural0} animales finalizan su retención hoy",
"{plural0} animal has been on the shelter longer than {0} months" : "{plural0} animal ha estado en el refugio más de {0} meses",
"{plural0} animal is not available for adoption" : "{plural0} animal no está disponible para su adopción",
"{plural0} animal was adopted" : "{plural0} animal fue adoptado",
"{plural0} animal was euthanized" : "{plural0} animal fue eutanasiado",
"{plural0} animal was reclaimed by its owner" : "{plural0} animal fue reclamado por su dueño",
"{plural0} animal was transferred to another shelter" : "{plural0} animal ha sido trasladado a otro refugio",
"{plural0} day." : "{plural0} día.",
"{plural0} incomplete animal control call" : "{plural0} llamadas de control de animales incompletas",
"{plural0} item of stock expires in the next month" : "{plural0} artículos de stock caducan el mes siguiente",
"{plural0} item of stock has expired" : "{plural0} artículos de stock han caducado",
"{plural0} medical treatment needs to be administered today" : "{plural0} tratamiento médico tiene que ser administrado hoy",
"{plural0} month." : "{plural0} mes.",
"{plural0} new online form submission" : "{plural0} nuevos formularios online recibidos",
"{plural0} person has an overdue payment" : "{plural0} personas tienen pagos pendientes",
"{plural0} person with an active reservation has not been homechecked" : "{plural0} persona que tiene una reserva activa y no se ha realizado la visita a domicilio",
"{plural0} potential match for a lost animal" : "{plural0} coincidencias potenciales para animales perdidos",
"{plural0} recent publisher run had errors" : "{plural0} publicaciones recientes tienen errores",
"{plural0} reservation has been active over a week without adoption" : "{plural0} reserva ha estado activa más de una semana sin adopción",
"{plural0} result found in {1} seconds. Order: {2}" : "{plural0} resultado encontrado en {1} segundos. Orden: {2}",
"{plural0} shelter animal has not been microchipped" : "{plural0} animales del refugio no han sido microchipados",
"{plural0} shelter animal has people looking for them" : "{plural0} refugio de animales tiene gente en busca de ellos",
"{plural0} test needs to be performed today" : "{plural0} de prueba debe ser realizada hoy",
"{plural0} transport does not have a driver assigned" : "{plural0} transportes no tienen ningún conductor asignado",
"{plural0} trap is overdue for return" : "{plural0} jaulas trampa están pendientes de devolución",
"{plural0} trial adoption has ended" : "{plural0} adopción de prueba ha terminado",
"{plural0} unaltered animal has been adopted in the last month" : "{plural0} animal no castrado/esterilizado ha sido adoptado en el último mes",
"{plural0} undispatched animal control call" : "{plural0} llamadas de control de animales no enviadas",
"{plural0} unpaid fine" : "{plural0} multas sin pagar",
"{plural0} urgent entry on the waiting list" : "{plural0} entrada urgente en lista de espera",
"{plural0} vaccination has expired" : "{plural0} vacunas han caducado",
"{plural0} vaccination needs to be administered today" : "{plural0} la vacunación debe ser administrada hoy",
"{plural0} week." : "{plural0} semana.",
"{plural0} year." : "{plural0} años.",
"{plural1} animal control calls due for followup today" : "{plural1} llamadas de control de animales pendientes de seguimiento",
"{plural1} animals are not available for adoption" : "{plural1} animales no están disponibles para la adopción",
"{plural1} animals died" : "{plural1} animales murieron",
"{plural1} animals entered the shelter" : "{plural1} animales han entrado en el refugio",
"{plural1} animals have been on the shelter longer than {0} months" : "{plural1} animales han estado en el refugio más de {0} meses",
"{plural1} animals have holds ending today" : "{plural1} animales finalizan su retención hoy",
"{plural1} animals were adopted" : "{plural1} animales fueron adoptados",
"{plural1} animals were dead on arrival" : "{plural1} animales llegaron muertos",
"{plural1} animals were euthanized" : "{plural1} los animales fueron sometidos a eutanasia",
"{plural1} animals were reclaimed by their owners" : "{plural1} animales fueron recuperados por sus dueños",
"{plural1} animals were transferred to other shelters" : "{plural1} animales fueron trasladados a otros centros de acogida",
"{plural1} days." : "{plural1} días.",
"{plural1} incomplete animal control calls" : "{plural1} llamadas de control de animales incompletas",
"{plural1} items of stock expire in the next month" : "{plural1} artículos de stock caducan el mes siguiente",
"{plural1} items of stock have expired" : "{plural1} artículos de stock han caducado",
"{plural1} medical treatments need to be administered today" : "{plural1} tratamientos médicos necesitan ser administrados hoy",
"{plural1} months." : "{plural1} meses.",
"{plural1} new online form submissions" : "{plural1} nuevos formularios enviados",
"{plural1} people have overdue payments" : "{plural1} personas tienen pagos pendientes",
"{plural1} people with active reservations have not been homechecked" : "{plural1} personas con reservas activas pero no se ha realizado la visita a domicilio",
"{plural1} potential matches for lost animals" : "{plural1} coincidencias potenciales para animales perdidos",
"{plural1} recent publisher runs had errors" : "{plural1} publicaciones recientes tienen errores",
"{plural1} reservations have been active over a week without adoption" : "{plural1} reservas han estado activas más de una semana sin adopción",
"{plural1} results found in {1} seconds. Order: {2}" : "{plural1} resultados encontrados en {1} segundos. Orden: {2}",
"{plural1} shelter animals have not been microchipped" : "{plural1} animales del refugio no han sido microchipados",
"{plural1} shelter animals have people looking for them" : "{plural1} animales del refugio tienen gente en busca de ellos",
"{plural1} tests need to be performed today" : "{plural1} pruebas deben realizarse hoy",
"{plural1} transports do not have a driver assigned" : "{plural1} transportes no tienen ningún conductor asignado",
"{plural1} traps are overdue for return" : "{plural1} jaulas trampa están pendientes de devolución",
"{plural1} trial adoptions have ended" : "{plural1} adopciones de prueba han terminado",
"{plural1} unaltered animals have been adopted in the last month" : "{plural1} animales no castrados/esterilizados se han adoptado en el último mes",
"{plural1} undispatched animal control calls" : "{plural1} llamadas de control de animales no enviadas",
"{plural1} unpaid fines" : "{plural1} multas no pagadas",
"{plural1} urgent entries on the waiting list" : "{plural1} entradas urgentes en lista de espera",
"{plural1} vaccinations have expired" : "{plural1} vacunas han expirado",
"{plural1} vaccinations need to be administered today" : "{plural1} vacunas necesitan ser administrados hoy",
"{plural1} weeks." : "{plural1} semana.",
"{plural1} years." : "{plural1} años.",
"{plural2} animal control calls due for followup today" : "{plural2} llamadas de control de animales pendientes de seguimiento",
"{plural2} animals are not available for adoption" : "{plural2} animales no están disponibles para la adopción",
"{plural2} animals died" : "{plural2} animales murieron",
"{plural2} animals entered the shelter" : "{plural2} animales han entrado en el refugio",
"{plural2} animals have been on the shelter longer than {0} months" : "{plural2} animales han estado en el refugio más de {0} meses",
"{plural2} animals have holds ending today" : "{plural2} animales finalizan su período de retención hoy",
"{plural2} animals were adopted" : "{plural2} animales fueron adoptados",
"{plural2} animals were dead on arrival" : "{plural2} animales llegaron muertos",
"{plural2} animals were euthanized" : "{plural2} los animales fueron sometidos a eutanasia",
"{plural2} animals were reclaimed by their owners" : "{plural2} animales fueron recuperados por sus dueños",
"{plural2} animals were transferred to other shelters" : "{plural2} animales fueron trasladados a otros centros de acogida",
"{plural2} days." : "{plural2} día.",
"{plural2} incomplete animal control calls" : "{plural2} llamadas de control de animales incompletas",
"{plural2} items of stock expire in the next month" : "{plural2} artículos de stock caducan el mes siguiente",
"{plural2} items of stock have expired" : "{plural2} artículos de stock han caducado",
"{plural2} medical treatments need to be administered today" : "{plural2} tratamientos médicos necesitan ser administrados hoy",
"{plural2} months." : "{plural2} meses.",
"{plural2} new online form submissions" : "{plural2} nuevos formularios enviados",
"{plural2} people have overdue payments" : "{plural2} people have overdue payments",
"{plural2} people with active reservations have not been homechecked" : "{plural2} personas con reservas activas pero no se ha realizado la visita a domicilio",
"{plural2} potential matches for lost animals" : "{plural2} coincidencias potenciales para animales perdidos",
"{plural2} recent publisher runs had errors" : "{plural2} publicaciones recientes tienen errores",
"{plural2} reservations have been active over a week without adoption" : "{plural2} reservas han estado activas más de una semana sin adopción",
"{plural2} results found in {1} seconds. Order: {2}" : "{plural2} resultados encontrados en {1} segundos. Orden: {2}",
"{plural2} shelter animals have not been microchipped" : "{plural2} animales del refugio no han sido microchipados",
"{plural2} shelter animals have people looking for them" : "{plural2} animales del refugio tienen gente en busca de ellos",
"{plural2} tests need to be performed today" : "{plural2} pruebas deben realizarse hoy",
"{plural2} transports do not have a driver assigned" : "{plural2} transportes no tienen ningún conductor asignado",
"{plural2} traps are overdue for return" : "{plural2} jaulas trampa están pendientes de devolución",
"{plural2} trial adoptions have ended" : "{plural2} adopciones de prueba han terminado",
"{plural2} unaltered animals have been adopted in the last month" : "{plural2} animales no castrados/esterilizados se han adoptado en el último mes",
"{plural2} undispatched animal control calls" : "{plural2} llamadas de control de animales no enviadas",
"{plural2} unpaid fines" : "{plural2} multas no pagadas",
"{plural2} urgent entries on the waiting list" : "{plural2} entradas urgentes en lista de espera",
"{plural2} vaccinations have expired" : "{plural2} vacunas han expirado",
"{plural2} vaccinations need to be administered today" : "{plural2} vacunas necesitan ser administrados hoy",
"{plural2} weeks." : "{plural2} semana.",
"{plural2} years." : "{plural2} años.",
"{plural3} animal control calls due for followup today" : "{plural3} llamadas de control de animales pendientes de seguimiento",
"{plural3} animals are not available for adoption" : "{plural3} animales no están disponibles para la adopción",
"{plural3} animals died" : "{plural3} animales murieron",
"{plural3} animals entered the shelter" : "{plural3} animales han entrado en el refugio",
"{plural3} animals have been on the shelter longer than {0} months" : "{plural3} animales han estado en el refugio más de {0} months",
"{plural3} animals have holds ending today" : "{plural3} animales finalizan su período de retención hoy",
"{plural3} animals were adopted" : "{plural3} han sido adoptados",
"{plural3} animals were dead on arrival" : "{plural3} animales llegaron muertos",
"{plural3} animals were euthanized" : "{plural3} los animales fueron sometidos a eutanasia",
"{plural3} animals were reclaimed by their owners" : "{plural3} animales fueron recuperados por sus dueños",
"{plural3} animals were transferred to other shelters" : "{plural3} animales fueron trasladados a otros centros de acogida",
"{plural3} days." : "{plural3} día.",
"{plural3} incomplete animal control calls" : "{plural3} llamadas de control de animales incompletas",
"{plural3} items of stock expire in the next month" : "{plural3} artículos de stock caducan el mes siguiente",
"{plural3} items of stock have expired" : "{plural3} artículos en stock caducados",
"{plural3} medical treatments need to be administered today" : "{plural3} tratamientos médicos necesitan ser administrados hoy",
"{plural3} months." : "{plural3} meses.",
"{plural3} new online form submissions" : "{plural3} nuevos formularios online presentados",
"{plural3} people have overdue payments" : "{plural3} personas tienen pagos atrasados",
"{plural3} people with active reservations have not been homechecked" : "{plural3} personas con reservas activas pero no se ha realizado la visita a domicilio",
"{plural3} potential matches for lost animals" : "{plural3} coincidencias potenciales para animales perdidos",
"{plural3} recent publisher runs had errors" : "{plural3} publicaciones recientes tienen errores",
"{plural3} reservations have been active over a week without adoption" : "{plural3} reservas han estado activas más de una semana sin adopción",
"{plural3} results found in {1} seconds. Order: {2}" : "{plural3} resultados encontrados en {1} segundos. Orden: {2}",
"{plural3} shelter animals have not been microchipped" : "{plural3} animales del refugio no han sido microchipados",
"{plural3} shelter animals have people looking for them" : "{plural3} animales del refugio tienen gente en busca de ellos",
"{plural3} tests need to be performed today" : "{plural3} pruebas deben realizarse hoy",
"{plural3} transports do not have a driver assigned" : "{plural3} transportes no tienen ningún conductor asignado",
"{plural3} traps are overdue for return" : "Se ha retrasado la devolución de {plural3} jaulas trampa",
"{plural3} trial adoptions have ended" : "{plural3} adopciones de prueba han terminado",
"{plural3} unaltered animals have been adopted in the last month" : "{plural3} animales no castrados/esterilizados se han adoptado en el último mes",
"{plural3} undispatched animal control calls" : "{plural3} llamadas no enviadas",
"{plural3} unpaid fines" : "{plural3} multas no pagadas",
"{plural3} urgent entries on the waiting list" : "{plural3} entradas urgentes en lista de espera",
"{plural3} vaccinations have expired" : "{plural3} vacunas han caducado",
"{plural3} vaccinations need to be administered today" : "{plural3} vacunas necesitan ser administrados hoy",
"{plural3} weeks." : "{plural3} semana."
}
|
bobintetley/asm3
|
src/asm3/locales/locale_es.py
|
Python
|
gpl-3.0
| 204,743
|
[
"Amber",
"VisIt"
] |
c789f15b240550901e606a2f0f5cfda6398ce33d48ee6692a7a4a34ce6086dd3
|
"""
====================
Voigt Profile Fitter
====================
"""
from . import model
import numpy as np
from ...spectrum.moments import moments
import types
try:
import scipy.special
scipyOK = True
except ImportError:
scipyOK = False
def voigt(xarr, amp, xcen, sigma, gamma, normalized=False):
"""
Normalized Voigt profile
z = (x+i*gam)/(sig*sqrt(2))
V(x,sig,gam) = Re(w(z))/(sig*sqrt(2*pi))
The area of V in this definition is 1.
If normalized=False, then you can divide the integral of V by
sigma*sqrt(2*pi) to get the area.
Original implementation converted from
http://mail.scipy.org/pipermail/scipy-user/2011-January/028327.html
(had an incorrect normalization and strange treatment of the input
parameters)
Modified implementation taken from wikipedia, using the definition.
http://en.wikipedia.org/wiki/Voigt_profile
Parameters
----------
xarr : np.ndarray
The X values over which to compute the Voigt profile
amp : float
Amplitude of the voigt profile
if normalized = True, amp is the AREA
xcen : float
The X-offset of the profile
sigma : float
The width / sigma parameter of the Gaussian distribution
gamma : float
The width / shape parameter of the Lorentzian distribution
normalized : bool
Determines whether "amp" refers to the area or the peak
of the voigt profile
"""
if scipyOK:
z = ((xarr.value-xcen) + 1j*gamma) / (sigma * np.sqrt(2))
V = amp * np.real(scipy.special.wofz(z))
if normalized:
return V / (sigma*np.sqrt(2*np.pi))
else:
return V
else:
raise ImportError("Couldn't import scipy, therefore cannot do "
"voigt profile stuff")
def voigt_fwhm(sigma, gamma):
"""
Approximation to the Voigt FWHM from wikipedia
http://en.wikipedia.org/wiki/Voigt_profile
Parameters
----------
sigma : float
The width / sigma parameter of the Gaussian distribution
gamma : float
The width / shape parameter of the Lorentzian distribution
"""
return 0.5346 * 2 * gamma + np.sqrt(0.2166*(2*gamma)**2 + sigma**2*8*np.log(2))
def voigt_moments(self, *args, **kwargs):
"""
Get the spectral moments from the moments package. Use the gaussian width
for the lorentzian width (not a great guess!)
"""
m = moments(*args,**kwargs)
return list(m) + [m[-1]]
def voigt_fitter():
"""
Generator for voigt fitter class
"""
myclass = model.SpectralModel(voigt, 4,
parnames=['amplitude','shift','gwidth','lwidth'],
parlimited=[(False,False),(False,False),(True,False),(True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0)],
shortvarnames=('A',r'\Delta x',r'\sigma_G',r'\sigma_L'),
centroid_par='shift',
fwhm_func=voigt_fwhm,
fwhm_pars=['gwidth','lwidth'],
)
myclass.__name__ = "voigt"
try:
myclass.moments = types.MethodType(voigt_moments, myclass,
myclass.__class__)
except TypeError: # indicates py3 is being used
# http://stackoverflow.com/questions/10729909/convert-builtin-function-type-to-method-type-in-python-3?lq=1
myclass.moments = voigt_moments.__get__(myclass)
return myclass
|
mikelum/pyspeckit
|
pyspeckit/spectrum/models/inherited_voigtfitter.py
|
Python
|
mit
| 3,431
|
[
"Gaussian"
] |
184c27aa45a522b2aae144b0d95b3442dd23aee4e8fc162632848b08daaa995a
|
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import division
from neon import NervanaObject
from neon.util.persist import load_class
import logging
import numpy as np
logger = logging.getLogger(__name__)
def get_param_list(layer_list):
'''
Returns a flattened list of parameters. Each element in the list
is a tuple ``((W, dW), states)`` for the parameters ``W``, parameter updates ``dW``,
and the current set of ``states``.
Args:
layer_list (list): List of layers
Returns:
param_list (list): List of parameters.
'''
plist = []
for l in layer_list:
ptuple = l.get_params()
plist.extend(ptuple) if isinstance(ptuple, list) else plist.append(ptuple)
return plist
class Optimizer(NervanaObject):
'''
The optimizer class handles the gradient update stage of training a neural network.
Given the current parameters :math:`w`, update parameters
:math:`\Delta w`, and current state :math:`s`, the optimizer specifies an
algorithm for performing the update.
This base class contains to helper functions for scaling the gradients.
specifices the abstract method optimize, which subclasses should implement. The optimize
method is called at every minibatch to update the layer parameters.
'''
def __init__(self, name=None):
"""
Class constructor.
"""
super(Optimizer, self).__init__(name=name)
def optimize(self, layer_list, epoch):
"""
Update the parameters for a provided list of layers.
Args:
layer_list (list): List of layers to optimize
epoch (integer): Epoch count of training
"""
raise NotImplementedError()
def clip_gradient_norm(self, param_list, clip_norm):
"""
Returns a scaling factor to apply to the gradients.
The scaling factor is computed such that the root mean squared
average of the scaled gradients across all layers will be less than
or equal to the provided clip_norm value. This factor is always <1, so
never scales up the gradients.
Arguments:
param_list (list): List of layer parameters
clip_norm (float, optional): Target norm for the gradients. If not provided
the returned scale_factor will equal 1.
Returns:
scale_factor (float): Computed scale factor.
"""
scale_factor = 1
if clip_norm:
grad_list = [grad for (param, grad), states in param_list]
grad_square_sums = sum(self.be.sum(self.be.square(grad)) for grad in grad_list)
grad_norm = self.be.zeros((1, 1))
grad_norm[:] = self.be.sqrt(grad_square_sums) / self.be.bsz
scale_factor = clip_norm / max(float(grad_norm.get()), float(clip_norm))
return scale_factor
def clip_value(self, v, abs_bound=None):
"""
Element-wise clip a gradient or parameter tensor to between
``-abs_bound`` and ``+abs_bound``.
Arguments:
v (tensor): Tensor of gradients or parameters for a single layer
abs_bound (float, optional): Value to element-wise clip gradients
or parameters. Defaults to None.
Returns:
v (tensor): Tensor of clipped gradients or parameters.
"""
if abs_bound:
return self.be.clip(v, -abs(abs_bound), abs(abs_bound))
else:
return v
class Schedule(NervanaObject):
"""
Learning rate schedule.
By default implements a constant learning rate:
.. code-block:: python
# Constant learning rate of 0.01 across training epochs
optimizer = GradientDescentMomentum(0.01, 0.9, schedule = Schedule())
Otherwise, the schedule multiplies the learning rate by change at every element in
``step_config``.
For example,
.. code-block:: python
schedule = Schedule(step_config=[2, 6], change=0.5)
optimizer = GradientDescentMomentum(1.0, 0.9, schedule = Schedule())
will yield a learning rate schedule of:
.. csv-table::
:header: "Epoch", "LR"
:widths: 20, 10
0, 1.0
1, 1.0
2, 0.5
3, 0.5
4, 0.5
5, 0.5
6, 0.25
7, 0.25
8, 0.25
9, 0.25
"""
def __init__(self, step_config=None, change=1.):
"""
Class constructor.
Arguments:
step_config (list, optional): Configure the step times (list of epoch indices).
Defaults to None (constant).
change (int, optional): The learning rate is
multiplied by ``change ** steps``, where ``steps`` is the
number of steps in the step schedule that have passed.
"""
if isinstance(step_config, list) and isinstance(change, list):
assert len(step_config) == len(change), "change and step_config must have the same" \
"length after step_config is deduplicated to do epoch-level LR assignment."
logger.warn("This functionality will be removed from Schedule in the future. "
"Please use the StepSchedule class instead.")
if isinstance(step_config, int):
logger.warn("This functionality will be removed from Schedule in the future. "
"Please use the PowerSchedule class instead.")
self.step_config = step_config
self.change = change
self.steps = 0
def get_learning_rate(self, learning_rate, epoch):
"""
Returns the current learning rate given the epoch and initial learning rate.
Arguments:
learning_rate (float): Initial learning rate
epoch (int): Current epoch, used to calculate the adjusted learning rate
Returns:
(float): The adjusted learning rate
"""
# will be moved to StepSchedule in the future
if isinstance(self.step_config, list) and isinstance(self.change, list):
if epoch in self.step_config:
# steps will store the current lr
self.steps = self.change[self.step_config.index(epoch)]
if self.steps == 0:
return learning_rate
else:
return self.steps
# will be moved to PowerSchedule in the future
elif isinstance(self.step_config, int):
self.steps = np.floor(epoch / self.step_config)
elif isinstance(self.step_config, list):
self.steps = np.sum(epoch >= np.array(self.step_config))
return float(learning_rate * self.change ** self.steps)
class StepSchedule(Schedule):
"""
Steps the learning rate over training time.
To set a step schedule, pass as arguments ``step_config`` and ``change``. The schedule
will set the learning rate at ``step[i]`` to ``change[i]``. For example, the call:
.. code-block:: python
schedule = Schedule(step_config=[2, 6], change=[0.6, 0.4])
will set the learning rate to 0.6 at step 2, and to 0.4 at step 6.
"""
def __init__(self, step_config, change):
"""
Class constructor.
Arguments:
step_config (list): Configure the step times (list of epoch indices)
change (list): List of learning rates. Must be same length as step_config
"""
assert isinstance(step_config, list) and isinstance(change, list), \
"The arguments change and step_config must be lists."
assert len(step_config) == len(change), \
"The arguments change and step_config must have the same length."
self.step_config = step_config
self.change = change
self.steps = 0
def get_learning_rate(self, learning_rate, epoch):
"""
Returns the current learning rate given the epoch and initial learning rate.
Arguments:
learning_rate (float): Initial learning rate
epoch (int): Current epoch, used to calculate the adjusted learning rate
Returns:
(float): The adjusted learning rate
"""
if epoch in self.step_config:
# steps will store the current lr
self.steps = self.change[self.step_config.index(epoch)]
if self.steps == 0:
return learning_rate
else:
return self.steps
class PowerSchedule(Schedule):
"""
Multiplies the learning rate by a factor at regular epoch intervals.
This schedule will multiply the learning rate by
the factor ``change`` every ``step_config`` epochs. For example,
.. code-block:: python
schedule = Schedule(step_config=2, change=0.5)
optimizer = GradientDescentMomentum(0.1, 0.9, schedule=schedule)
will yield a learning rate schedule of:
.. csv-table::
:header: "Epoch", "LR"
:widths: 20, 10
0, 0.1
1, 0.1
2, 0.05
3, 0.05
4, 0.025
5, 0.025
6, 0.0125
7, 0.0125
"""
def __init__(self, step_config, change):
"""
Class constructor.
Arguments:
step_config (int): Learning rate update interval (in epochs)
change (int): Update factor
"""
assert isinstance(step_config, int), \
"The argument step_config must be an integer."
assert not isinstance(change, list), \
"The argument change must be a float or integer."
self.step_config = step_config
self.change = change
self.steps = 0
def get_learning_rate(self, learning_rate, epoch):
"""
Returns the current learning rate given the epoch and initial learning rate.
Arguments:
learning_rate (float): Initial learning rate
epoch (int): Current epoch, used to calculate the adjusted learning rate.
Returns:
(float): The adjusted learning rate.
"""
self.steps = np.floor(epoch / self.step_config)
return float(learning_rate * self.change ** self.steps)
class ExpSchedule(Schedule):
"""
Exponential learning rate schedule. This schedule implements
.. math::
\\alpha(t) = \\frac{\\alpha_\\circ}{1 + \\beta t}
where :math:`\\beta` is the decay rate, and :math:`\\alpha_\\circ` is the
initial learning rate.
"""
def __init__(self, decay):
"""
Class constructor.
Arguments:
decay (float): Decay rate.
"""
self.decay = decay
def get_learning_rate(self, learning_rate, epoch):
"""
Returns the current learning rate given the epoch and initial learning rate.
Arguments:
learning_rate (float): Initial learning rate
epoch (int): Current epoch, used to calculate the adjusted learning rate.
Returns:
(float): The adjusted learning rate.
"""
return float(learning_rate / (1. + self.decay * epoch))
class PolySchedule(Schedule):
"""
Polynomial learning rate schedule.
This schedule takes as input the total number of epochs :math:`T` and a power :math:`\\beta`,
and produces the learning schedule:
.. math::
\\alpha(t) = \\alpha_\\circ \\times\\left(1-\\frac{t}{T}\\right)^\\beta
where :math:`\\alpha_\\circ` is the initial learning rate.
"""
def __init__(self, total_epochs, power):
"""
Class constructor.
Arguments:
total_epochs (int): Total number of epochs over which to calculate interpolated decay
power (float): Total decay parameter
"""
self.total_epochs = np.float32(total_epochs)
self.power = power
def get_learning_rate(self, learning_rate, epoch):
"""
Returns the current learning rate given the epoch and initial learning rate.
Arguments:
learning_rate (float): Initial learning rate
epoch (int): Current epoch, used to calculate the adjusted learning rate.
Returns:
(float): The adjusted learning rate.
"""
return float(learning_rate * (1. - epoch // self.total_epochs) ** self.power)
class ShiftSchedule(Schedule):
"""
Binary shift learning rate schedule.
Arguments:
interval (int): interval in epochs the learning rate is shifted
shift_size (int): amount to shift
"""
def __init__(self, interval, shift_size=1):
self.interval = interval
self.shift_size = shift_size
def get_learning_rate(self, learning_rate, epoch):
total_shift = -1 * self.shift_size * int(epoch/self.interval)
return float(self.be.shift(learning_rate, total_shift, value=False).get())
class GradientDescentMomentum(Optimizer):
"""
Stochastic gradient descent with momentum.
Given the parameters :math:`\\theta`, the learning rate :math:`\\alpha`,
and the gradients :math:`\\nabla J(\\theta; x)`
computed on the minibatch data :math:`x`, SGD updates the parameters via
.. math::
\\theta' = \\theta - \\alpha\\nabla J(\\theta; x)
Here we implement SGD with momentum. Momentum tracks the history of
gradient updates to help the system move faster through saddle points.
Given the additional parameters: momentum :math:`\gamma`, weight decay :math:`\lambda`,
and current velocity :math:`v`, we use the following update equations
.. math::
v' = \\gamma v - \\alpha(\\nabla J(\\theta; x) + \\lambda\\theta)
theta' = \\theta + v'
The optional `nesterov` parameter implements Nesterov Accelerated Gradient.
If this is set, we use the following update equations instead
.. math::
v' = \\gamma^2 v + \\alpha (\\gamma + 1) (\\nabla J(\\theta; x) + \\lambda\\theta)
theta' = \\theta + v'
Example usage:
.. code-block:: python
from neon.optimizers import GradientDescentMomentum
# use SGD with learning rate 0.01 and momentum 0.9, while
# clipping the gradient magnitude to between -5 and 5.
opt = GradientDescentMomentum(0.01, 0.9, gradient_clip_value = 5)
"""
def __init__(self, learning_rate, momentum_coef, stochastic_round=False,
wdecay=0.0, gradient_clip_norm=None, gradient_clip_value=None,
param_clip_value=None, name=None, schedule=Schedule(),
nesterov=False):
"""
Class constructor.
Arguments:
learning_rate (float): Multiplicative coefficient of updates
momentum_coef (float): Coefficient of momentum
stochastic_round (bool, optional): Set this to True for stochastic
rounding. If False (default)
rounding will be to nearest. If
True use default width
stochastic rounding. Note that
this only affects the GPU
backend.
wdecay (float, optional): Amount of weight decay. Defaults to 0
gradient_clip_norm (float, optional): Target gradient norm.
Defaults to None.
gradient_clip_value (float, optional): Value to element-wise clip
gradients.
Defaults to None.
param_clip_value (float, optional): Value to element-wise clip
parameters.
Defaults to None.
name (str, optional): the optimizer's layer's pretty-print name.
Defaults to "gdm".
schedule (neon.optimizers.optimizer.Schedule, optional): Learning
rate schedule. Defaults to a constant learning rate.
nesterov (bool, optional): Use nesterov accelerated gradient.
Defaults to False.
"""
super(GradientDescentMomentum, self).__init__(name=name)
self.learning_rate, self.momentum_coef = (learning_rate, momentum_coef)
self.gradient_clip_norm = gradient_clip_norm
self.gradient_clip_value = gradient_clip_value
self.param_clip_value = param_clip_value
self.wdecay = wdecay
self.schedule = schedule
self.stochastic_round = stochastic_round
self.nesterov = nesterov
if self.momentum_coef == 0 and self.nesterov:
raise ValueError("nesterov requires non-zero momentum")
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
layer_list (list): a list of Layer objects to optimize.
epoch (int): the current epoch, needed for the Schedule object.
"""
lrate = self.schedule.get_learning_rate(self.learning_rate, epoch)
param_list = get_param_list(layer_list)
scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0 and self.momentum_coef != 0:
states.append(self.be.zeros_like(grad))
grad = grad / self.be.bsz
grad = self.clip_value(grad, self.gradient_clip_value)
if self.momentum_coef == 0:
param[:] = (- lrate * scale_factor) * grad +\
(1 - lrate * self.wdecay) * param
param = self.clip_value(param, self.param_clip_value)
else:
grad = scale_factor * grad + self.wdecay * param
velocity = states[0]
velocity[:] = self.momentum_coef * velocity - lrate * grad
# Nesterov accelerated gradient (NAG) is implemented the same
# as in torch's "sgd.lua". It's a reformulation of Sutskever's
# NAG equation found in "On the importance of initialization
# and momentum in deep learning".
if self.nesterov:
param[:] = self.clip_value(
param + self.momentum_coef * velocity
- lrate * grad, self.param_clip_value)
else:
param[:] = self.clip_value(
param + velocity, self.param_clip_value)
class RMSProp(Optimizer):
"""
Root Mean Square propagation.
Root Mean Square (RMS) propagation protects against vanishing and
exploding gradients. In RMSprop, the gradient is divided by a running
average of recent gradients. Given the parameters :math:`\\theta`, gradient :math:`\\nabla J`,
we keep a running average :math:`\\mu` of the last :math:`1/\\lambda` gradients squared.
The update equations are then given by
.. math::
\\mu' &= \\lambda\\mu + (1-\\lambda)(\\nabla J)^2
.. math::
\\theta' &= \\theta - \\frac{\\alpha}{\\sqrt{\\mu + \\epsilon} + \\epsilon}\\nabla J
where we use :math:`\\epsilon` as a (small) smoothing factor to prevent from dividing by zero.
"""
def __init__(self, stochastic_round=False, decay_rate=0.95, learning_rate=2e-3, epsilon=1e-6,
gradient_clip_norm=None, gradient_clip_value=None, param_clip_value=None,
name=None, schedule=Schedule()):
"""
Class constructor.
Arguments:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
decay_rate (float): decay rate of states
learning_rate (float): the multiplication coefficent of updates
epsilon (float): smoothing epsilon to avoid divide by zeros
gradient_clip_norm (float, optional): Target gradient norm.
Defaults to None.
gradient_clip_value (float, optional): Value to element-wise clip
gradients.
Defaults to None.
param_clip_value (float, optional): Value to element-wise clip
parameters.
Defaults to None.
schedule (neon.optimizers.optimizer.Schedule, optional): Learning rate schedule.
Defaults to a constant.
Notes:
Only constant learning rate is supported currently.
"""
super(RMSProp, self).__init__(name=name)
self.state_list = None
self.epsilon = epsilon
self.decay_rate = decay_rate
self.learning_rate = learning_rate
self.schedule = schedule
self.gradient_clip_norm = gradient_clip_norm
self.gradient_clip_value = gradient_clip_value
self.param_clip_value = param_clip_value
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
layer_list (list): a list of Layer objects to optimize.
epoch (int): the current epoch, needed for the Schedule object.
"""
lrate = self.schedule.get_learning_rate(self.learning_rate, epoch)
epsilon, decay = (self.epsilon, self.decay_rate)
param_list = get_param_list(layer_list)
scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
states.append(self.be.zeros_like(grad))
grad = grad / self.be.bsz
grad = self.clip_value(grad, self.gradient_clip_value)
# update state
state = states[0]
state[:] = decay * state + self.be.square(grad) * (1.0 - decay)
param[:] = self.clip_value(
param - (scale_factor * grad * lrate)
/ (self.be.sqrt(state + epsilon) + epsilon),
self.param_clip_value)
class Adagrad(Optimizer):
"""
Adagrad optimization algorithm.
Adagrad is an algorithm that adapts the learning rate individually for each parameter
by dividing by the :math:`L_2`-norm of all previous gradients. Given the parameters
:math:`\\theta`, gradient :math:`\\nabla J`, accumulating norm :math:`G`, and smoothing
factor :math:`\\epsilon`, we use the update equations:
.. math::
G' = G + (\\nabla J)^2
.. math::
\\theta' = \\theta - \\frac{\\alpha}{\sqrt{G' + \\epsilon}} \\nabla J
where the smoothing factor :math:`\\epsilon` prevents from dividing by zero.
By adjusting the learning rate individually for each parameter, Adagrad adapts
to the geometry of the error surface. Differently scaled weights have appropriately scaled
update steps.
Example usage:
.. code-block:: python
from neon.optimizers import Adagrad
# use Adagrad with a learning rate of 0.01
optimizer = Adagrad(learning_rate=0.01, epsilon=1e-6)
"""
def __init__(self, stochastic_round=False, learning_rate=0.01, epsilon=1e-6,
gradient_clip_norm=None, gradient_clip_value=None,
param_clip_value=None, name=None):
"""
Class constructor.
Arguments:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
learning_rate (float): the multiplication coefficent of updates
epsilon (float): smoothing epsilon to avoid divide by zeros
gradient_clip_norm (float, optional): Target gradient norm.
Defaults to None.
gradient_clip_value (float, optional): Value to element-wise clip
gradients.
Defaults to None.
param_clip_value (float, optional): Value to element-wise clip
parameters.
Defaults to None.
Notes:
Only constant learning rate is supported currently.
"""
super(Adagrad, self).__init__(name=name)
self.state_list = None
self.epsilon = epsilon
self.learning_rate = learning_rate
self.gradient_clip_norm = gradient_clip_norm
self.gradient_clip_value = gradient_clip_value
self.param_clip_value = param_clip_value
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
layer_list (list): a list of Layer objects to optimize.
epoch (int): the current epoch, needed for the Schedule object.
"""
lrate, epsilon = (self.learning_rate, self.epsilon)
param_list = get_param_list(layer_list)
scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
states.append(self.be.zeros_like(grad))
grad = grad / self.be.bsz
grad = self.clip_value(grad, self.gradient_clip_value)
# update state
state = states[0]
state[:] = state + self.be.square(grad)
param[:] = self.clip_value(
param - (scale_factor * grad * lrate)
/ (self.be.sqrt(state + epsilon)), self.param_clip_value)
class Adadelta(Optimizer):
"""
Adadelta optimization algorithm.
Similar to RMSprop, Adadelta tracks the running average of the
gradients, :math:`\\mu_J`, over a window size :math:`1/\\lambda`, where
:math:`\\lambda` is the parameter ``decay``. Adadelta also tracks an average of the
recent update steps, which we denote as :math:`\\mu_\\theta`, and sets the learning rate
as the ratio of the two averages:
.. math::
\\mu_J' &= \\lambda\\mu_J + (1-\\lambda) (\\nabla J)^2
.. math::
\\Delta \\theta &= \\sqrt{\\frac{\\mu_\\theta + \\epsilon}{\\mu_J' + \\epsilon}} \\nabla J
.. math::
\\mu_\\theta &= \\lambda \\mu_\\theta + (1-\\rho) (\\Delta \\theta)^2
.. math::
\\theta &= \\theta - \\Delta \\theta
Note that the learning rate is a ratio of the average updates from the
previous step, :math:`\\mu_\\theta`, divided by the average gradients including the current
step, :math:`\\mu'_J`.
Example usage:
.. code-block:: python
from neon.optimizers import Adadelta
# use Adagrad with a learning rate of 0.01
optimizer = Adadelta(decay=0.95, epsilon=1e-6)
"""
def __init__(self, stochastic_round=False, decay=0.95, epsilon=1e-6,
param_clip_value=None, name=None):
"""
Class constructor.
Args:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
decay: decay parameter in Adadelta
epsilon: epsilon parameter in Adadelta
param_clip_value (float, optional): Value to element-wise clip
parameters.
Defaults to None.
"""
super(Adadelta, self).__init__(name=name)
self.decay = decay
self.epsilon = epsilon
self.stochastic_round = stochastic_round
self.param_clip_value = param_clip_value
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
param_list (list): a list of tuples of the form ((param, grad), state),
corresponding to parameters, grads,
and states of layers to be updated
epoch (int): the current epoch, needed for the Schedule object.
"""
epsilon, decay = (self.epsilon, self.decay)
param_list = get_param_list(layer_list)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
# E[Grad^2], E[Delt^2], updates
states.extend([self.be.zeros_like(grad) for i in range(3)])
grad = grad / self.be.bsz
states[0][:] = states[0] * decay + (1. - decay) * grad * grad
states[2][:] = self.be.sqrt((states[1] + epsilon) / (states[0] + epsilon)) * grad
states[1][:] = states[1] * decay + (1. - decay) * states[2] * states[2]
param[:] = self.clip_value(param - states[2], self.param_clip_value)
class Adam(Optimizer):
"""
Adam optimizer.
The Adam optimizer combines features from RMSprop and Adagrad. We
accumulate both the first and second moments of the gradient with decay
rates :math:`\\beta_1` and :math:`\\beta_2` corresponding to window sizes of
:math:`1/\\beta_1` and :math:`1/\\beta_2`, respectively.
.. math::
m' &= \\beta_1 m + (1-\\beta_1) \\nabla J
.. math::
v' &= \\beta_2 v + (1-\\beta_2) (\\nabla J)^2
We update the parameters by the ratio of the two moments:
.. math::
\\theta = \\theta - \\alpha \\frac{\\hat{m}'}{\\sqrt{\\hat{v}'}+\\epsilon}
where we compute the bias-corrected moments :math:`\\hat{m}'` and :math:`\\hat{v}'` via
.. math::
\\hat{m}' &= m'/(1-\\beta_1^t)
.. math::
\\hat{v}' &= v'/(1-\\beta_1^t)
Example usage:
.. code-block:: python
from neon.optimizers import Adam
# use Adam
optimizer = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999)
"""
def __init__(self, stochastic_round=False, learning_rate=0.001, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, gradient_clip_norm=None, gradient_clip_value=None,
param_clip_value=None, name="adam"):
"""
Class constructor.
Args:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
learning_rate (float): the multiplicative coefficient of updates
beta_1 (float): Adam parameter beta1
beta_2 (float): Adam parameter beta2
epsilon (float): numerical stability parameter
gradient_clip_norm (float, optional): Target gradient norm.
Defaults to None.
gradient_clip_value (float, optional): Value to element-wise clip gradients.
Defaults to None.
param_clip_value (float, optional): Value to element-wise clip parameters.
Defaults to None.
"""
super(Adam, self).__init__(name=name)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.learning_rate = learning_rate
self.stochastic_round = stochastic_round
self.gradient_clip_norm = gradient_clip_norm
self.gradient_clip_value = gradient_clip_value
self.param_clip_value = param_clip_value
self.t = 0
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
param_list (list): a list of tuples of the form ((param, grad), state),
corresponding to parameters, grads, and states of layers to be updated
epoch (int): the current epoch, needed for the Schedule object.
"""
self.t = self.t + 1
l = (self.learning_rate * self.be.sqrt(1 - self.beta_2 ** self.t) /
(1 - self.beta_1 ** self.t))
param_list = get_param_list(layer_list)
scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
# running_1st_mom, running_2nd_mom
states.extend([self.be.zeros_like(grad) for i in range(2)])
grad = grad / self.be.bsz
grad = self.clip_value(grad, self.gradient_clip_value)
m, v = states
m[:] = m * self.beta_1 + (1. - self.beta_1) * grad
v[:] = v * self.beta_2 + (1. - self.beta_2) * grad * grad
param[:] = self.clip_value(
param - (scale_factor * l * m)
/ (self.be.sqrt(v) + self.epsilon), self.param_clip_value)
class ShiftAdaMax(Optimizer):
"""
Shift based AdaMax. http://arxiv.org/pdf/1602.02830v3.pdf
"""
def __init__(self, stochastic_round=False, learning_rate=0.002, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, schedule=Schedule(), name="ShiftAdaMax"):
"""
Args:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
learning_rate (float): the multiplicative coefficient of updates
beta_1 (float): Adam parameter beta1
beta_2 (float): Adam parameter beta2
epsilon (float): numerical stability parameter
schedule (neon.optimizers.optimizer.Schedule, optional): Learning rate schedule.
Defaults to a constant.
"""
super(ShiftAdaMax, self).__init__(name=name)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.learning_rate = learning_rate
self.stochastic_round = stochastic_round
self.schedule = schedule
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
param_list (list): a list of tuples of the form ((param, grad), state),
corresponding to parameters, grads, and states of layers to be updated
epoch (int): the current epoch, needed for the Schedule object.
"""
t = epoch + 1
lrate = self.schedule.get_learning_rate(self.learning_rate, epoch)
l = lrate / (1 - self.beta_1 ** t)
param_list = get_param_list(layer_list)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
# running_1st_mom, running_2nd_mom
states.extend([self.be.zeros_like(grad) for i in range(3)])
grad = grad / self.be.bsz
m, v, inv_v = states
m[:] = m * self.beta_1 + (1. - self.beta_1) * grad
v[:] = self.be.maximum(v * self.beta_2, self.be.absolute(grad))
inv_v[:] = 1.0 / (v + self.epsilon)
param[:] = param - self.be.shift(self.be.shift(m, inv_v), l)
self.be.clip(param, -1, 1, param)
class MultiOptimizer(Optimizer):
"""
A wrapper class for using multiple Optimizers within the same model.
To assign different optimizers to different layers we first define
the different optimizers:
.. code-block:: python
from neon.optimizers import GradientDescentMomentum, RMSprop
optimizer_A = GradientDescentMomentum(learning_rate=0.01, momentum_coef=0.9)
optimizer_B = GradientDescentMomentum(learning_rate=0.05, momentum_coef=0.9)
optimizer_C = RMSprop(learning_rate=2e-3, decay_rate=0.95)
Then, we instantiate this class and pass a
dictionary mapping layers to optimizers. The keys can either be:
``default``, a layer class name (e.g. ``Bias``), or the Layer's name
attribute. The latter takes precedence for finer layer-to-layer control.
For example, if we have the following layers,
.. code-block:: python
layers = []
layers.append(Linear(nout = 100, init=Gaussian(), name="layer_one"))
layers.append(Linear(nout = 50, init=Gaussian(), name="layer_two"))
layers.append(Affine(nout = 5, init=Gaussian(), activation=Softmax()))
we can define multiple optimizers with
.. code-block:: python
from neon.optimizers import MultiOptimizer
# dictionary of mappings
mapping = {'default': optimizer_A, # default optimizer
'Linear': optimizer_B, # all layers from the Linear class
'layer_two': optimizer_C} # this overrides the previous entry
# use multiple optimizers
opt = MultiOptimizer(mapping)
After definition, we have the following mapping
+----------------------+----------------------------+
| Layer | Optimizer |
+======================+============================+
| ``layer_one`` | ``optimizer_B`` |
+----------------------+----------------------------+
| ``layer_two`` | ``optimizer_C`` |
+----------------------+----------------------------+
| ``Affine.Linear`` | ``optimizer_B`` |
+----------------------+----------------------------+
| ``Affine.Bias`` | ``optimizer_A`` |
+----------------------+----------------------------+
| ``Affine.Softmax`` | ``None (no parameters)`` |
+----------------------+----------------------------+
"""
def __init__(self, optimizer_mapping, name=None):
"""
Class constructor.
Args:
optimizer_mapping (dict): dictionary specifying the mapping of layers to optimizers.
Key: ``'default'``, layer class name or layer `name` attribute.
Don't name your layers ``'default'``. Value: the optimizer object to
use for those layers.
"""
super(MultiOptimizer, self).__init__(name=name)
self.optimizer_mapping = optimizer_mapping
assert 'default' in self.optimizer_mapping, "Must specify a default" \
"optimizer in layer type to optimizer mapping"
self.map_list = None
self.map_list_cache = dict()
@classmethod
def gen_class(cls, pdict):
for key in pdict['optimizer_mapping']:
# these should be optimizers
typ = pdict['optimizer_mapping'][key]['type']
ocls = load_class(typ)
if 'config' not in pdict['optimizer_mapping'][key]:
pdict['optimizer_mapping'][key]['config'] = {}
conf = pdict['optimizer_mapping'][key]['config']
pdict['optimizer_mapping'][key] = ocls.gen_class(conf)
return cls(**pdict)
def get_description(self):
desc = {'type': self.modulenm}
desc['config'] = {'optimizer_mapping': {}}
for key in self.optimizer_mapping:
opt_desc = self.optimizer_mapping[key].get_description()
desc['config']['optimizer_mapping'][key] = opt_desc
return desc
def _map_optimizers(self, layer_list):
"""
maps the optimizers to their corresponding layers
"""
map_list = dict()
for layer in layer_list:
classname = layer.__class__.__name__
name = layer.name
opt = None
if name in self.optimizer_mapping:
opt = self.optimizer_mapping[name]
elif classname in self.optimizer_mapping:
opt = self.optimizer_mapping[classname]
else:
opt = self.optimizer_mapping['default']
if opt not in map_list:
map_list[opt] = [layer]
else:
map_list[opt].append(layer)
return map_list
def _reset_mapping(self, new_mapping):
"""
Pass this optimizer a new mapping, and on subsequent optimize call, the
mapping will be refreshed (since map_list will be recreated)
"""
self.optimizer_mapping = new_mapping
self.map_list = None
def optimize(self, layer_list, epoch):
"""
Determine which optimizer in the container should go with which layers,
then apply their optimize functions to those layers.
Notes:
We can recalculate ``map_list`` in case ``optimizer_mapping`` changes
during training.
"""
if not id(layer_list) in self.map_list_cache:
self.map_list = self._map_optimizers(layer_list)
self.map_list_cache[id(layer_list)] = self.map_list
else:
self.map_list = self.map_list_cache[id(layer_list)]
for opt in self.map_list:
opt.optimize(self.map_list[opt], epoch)
|
NervanaSystems/neon
|
neon/optimizers/optimizer.py
|
Python
|
apache-2.0
| 43,255
|
[
"Gaussian"
] |
25d72d3ecf948198f7e57b50d49bb3d3095cc07249afa2b07bb5d126f4ebea40
|
import webapp2
from google.appengine.api import mail
from google.appengine.api import users
from google.appengine.api.taskqueue import taskqueue
import gig
import member
import assoc
import logging
import re
import pickle
import os
from webapp2_extras import i18n
from webapp2_extras.i18n import gettext as _
# need this for sending stuff to the superuser - can't use the decorated version
_bare_admin_email_address = 'gigomatic.superuser@gmail.com'
# The MailServiceStub class used by dev_appserver can't handle a sender address that's more
# than a raw email address, but production GAE doesn't have this limitation.
if os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/'):
_admin_email_address = 'Gig-o-matic <gigomatic.superuser@gmail.com>'
else:
_admin_email_address = 'gigomatic.superuser@gmail.com'
def validate_email(to):
# + and . are allowed in username, and . in the domain name, but neither can be
# the leading character. Alphanumerics, - and _ are allowed anywhere.
valid_address = r"^[_a-z0-9-]+((\.|\+)[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$"
if (not mail.is_email_valid(to)) or (re.match(valid_address, to.lower()) is None):
logging.error("invalid recipient address '{0}'".format(to))
return False
else:
return True
def _send_admin_mail(to, subject, body, html=None, reply_to=None):
if validate_email(to) is False:
return False
message = mail.EmailMessage()
message.sender = _admin_email_address
message.to = to
message.subject = subject
message.body = body
if reply_to is not None:
message.reply_to = reply_to
if html is not None:
message.html = html
try:
message.send()
return True
except Exception as e:
logging.error("Failed to send mail {0} to {1}.\n{2}".format(subject, to, e))
return False
def send_registration_email(the_email, the_url):
return _send_admin_mail(the_email, _('Welcome to Gig-o-Matic'), _('welcome_msg_email').format(the_url))
def send_band_accepted_email(the_email, the_band):
return _send_admin_mail(the_email, _('Gig-o-Matic: Confirmed!'),
_('member_confirmed_email').format(the_band.name, the_band.key.urlsafe()))
def send_forgot_email(the_email, the_url):
return _send_admin_mail(the_email, _('Gig-o-Matic Password Reset'), _('forgot_password_email').format(the_url))
# send an email announcing a new gig
def send_newgig_email(the_member, the_gig, the_band, the_gig_url, is_edit=False, is_reminder=False, change_string=""):
the_locale=the_member.preferences.locale
the_email_address = the_member.email_address
if not mail.is_email_valid(the_email_address):
return False
i18n.get_i18n().set_locale(the_locale)
contact_key=the_gig.contact
if contact_key:
contact = contact_key.get()
contact_name=contact.name
else:
contact = None
contact_name="??"
# get the special URLs for "yes" and "no" answers
the_yes_url, the_no_url, the_snooze_url = gig.get_confirm_urls(the_member, the_gig)
reply_to = None
if contact is not None:
reply_to = contact.email_address
if is_edit:
title_string='{0} ({1})'.format(_('Gig Edit'),change_string)
elif is_reminder:
title_string='Gig Reminder:'
else:
title_string=_('New Gig:')
the_date_string = "{0} ({1})".format(member.format_date_for_member(the_member, the_gig.date),
member.format_date_for_member(the_member, the_gig.date, "day"))
the_time_string = ""
if the_gig.calltime:
the_time_string = u'{0} ({1})'.format(the_gig.calltime, _('Call Time'))
if the_gig.settime:
if the_time_string:
the_time_string = u'{0}, '.format(the_time_string)
the_time_string = u'{0}{1} ({2})'.format(the_time_string,the_gig.settime, _('Set Time'))
if the_gig.endtime:
if the_time_string:
the_time_string = u'{0}, '.format(the_time_string)
the_time_string = u'{0}{1} ({2})'.format(the_time_string,the_gig.endtime, _('End Time'))
the_status_string = [_('Unconfirmed'), _('Confirmed!'), _('Cancelled!')][the_gig.status]
def format_body(body_format_str):
return body_format_str.format(the_band.name, the_gig.title, the_date_string, the_time_string, contact_name,
the_status_string, the_gig.details, the_gig_url, "", the_yes_url, the_no_url,
the_snooze_url)
if is_edit:
body = _('edited_gig_email').format(the_band.name, the_gig.title, the_date_string, the_time_string, contact_name,
the_status_string, the_gig.details, the_gig_url, change_string)
html = None
elif is_reminder:
body = format_body(_('reminder_gig_email'))
html = format_body(_('reminder_gig_email_html'))
else:
body = format_body(_('new_gig_email'))
html = format_body(_('new_gig_email_html'))
return _send_admin_mail(the_email_address, u'{0} {1}'.format(title_string, the_gig.title), body, html=html, reply_to=reply_to)
def announce_new_gig(the_gig, the_gig_url, is_edit=False, is_reminder=False, change_string="", the_members=[]):
the_params = pickle.dumps({'the_gig_key': the_gig.key,
'the_gig_url': the_gig_url,
'is_edit': is_edit,
'is_reminder': is_reminder,
'change_string': change_string,
'the_members': the_members})
taskqueue.add(
url='/announce_new_gig_handler',
params={'the_params': the_params
})
class AnnounceNewGigHandler(webapp2.RequestHandler):
def post(self):
the_params = pickle.loads(self.request.get('the_params'))
the_gig_key = the_params['the_gig_key']
the_gig_url = the_params['the_gig_url']
is_edit = the_params['is_edit']
is_reminder = the_params['is_reminder']
change_string = the_params['change_string']
the_members = the_params['the_members']
the_gig = the_gig_key.get()
the_band_key = the_gig_key.parent()
the_assocs = assoc.get_confirmed_assocs_of_band_key(the_band_key, include_occasional=the_gig.invite_occasionals)
if is_reminder and the_members:
recipient_assocs=[]
for a in the_assocs:
if a.member in the_members:
recipient_assocs.append(a)
else:
recipient_assocs = the_assocs
logging.info('announcing gig {0} to {1} people'.format(the_gig_key,len(recipient_assocs)))
the_shared_params = pickle.dumps({
'the_gig_key': the_gig_key,
'the_band_key': the_band_key,
'the_gig_url': the_gig_url,
'is_edit': is_edit,
'is_reminder': is_reminder,
'change_string': change_string
})
for an_assoc in recipient_assocs:
if an_assoc.email_me:
the_member_key = an_assoc.member
the_member_params = pickle.dumps({
'the_member_key': the_member_key
})
task = taskqueue.add(
queue_name='emailqueue',
url='/send_new_gig_handler',
params={'the_shared_params': the_shared_params,
'the_member_params': the_member_params
})
logging.info('announced gig {0}'.format(the_gig_key))
self.response.write( 200 )
class SendNewGigHandler(webapp2.RequestHandler):
def post(self):
the_shared_params = pickle.loads(self.request.get('the_shared_params'))
the_member_params = pickle.loads(self.request.get('the_member_params'))
the_member_key = the_member_params['the_member_key']
the_gig_key = the_shared_params['the_gig_key']
the_band_key = the_shared_params['the_band_key']
the_gig_url = the_shared_params['the_gig_url']
is_edit = the_shared_params['is_edit']
is_reminder = the_shared_params['is_reminder']
change_string = the_shared_params['change_string']
send_newgig_email(the_member_key.get(), the_gig_key.get(), the_band_key.get(), the_gig_url, is_edit, is_reminder, change_string)
self.response.write( 200 )
def send_new_member_email(band,new_member):
members=assoc.get_admin_members_from_band_key(band.key)
for the_member in members:
send_the_new_member_email(the_member.preferences.locale, the_member.email_address, new_member=new_member, the_band=band)
def send_the_new_member_email(the_locale, the_email_address, new_member, the_band):
i18n.get_i18n().set_locale(the_locale)
return _send_admin_mail(the_email_address,
_('Gig-o-Matic New Member for band {0}').format(the_band.name),
_('new_member_email').format('{0} ({1})'.format(new_member.name, new_member.email_address),
the_band.name, the_band.key.urlsafe()))
def send_new_band_via_invite_email(the_band, the_member):
return _send_admin_mail(the_member.email_address, _('Gig-o-Matic New Band Invite'),
_('new_band_via_invite_email').format(the_band.name))
def send_gigo_invite_email(the_band, the_member, the_url):
return _send_admin_mail(the_member.email_address, _('Invitation to Join Gig-o-Matic'),
_('gigo_invite_email').format(the_band.name, the_url))
def send_the_pending_email(the_email_address, the_confirm_link):
return _send_admin_mail(the_email_address, _('Gig-o-Matic Confirm Email Address'),
_('confirm_email_address_email').format(the_confirm_link))
def notify_superuser_of_archive(the_num):
return _send_admin_mail(_bare_admin_email_address, 'Gig-o-Matic Auto-Archiver'
"Yo! The Gig-o-Matic archived {0} gigs last night.".format(the_num))
def notify_superuser_of_old_tokens(the_num):
return _send_admin_mail(_bare_admin_email_address, 'Gig-o-Matic Old Tokens',
"Yo! The Gig-o-Matic found {0} old signup tokens last night.".format(the_num))
def send_band_request_email(the_email_address, the_name, the_info):
if not mail.is_email_valid(the_email_address):
return False
body = u"""
Hi there! Someone has requested to add their band to the Gig-o-Matic. SO EXCITING!
{0}
{1}
{2}
Enjoy,
Team Gig-o-Matic
""".format(the_email_address, the_name, the_info)
return _send_admin_mail(_bare_admin_email_address, 'Gig-o-Matic New Band Request', body)
|
bklang/GO2
|
goemail.py
|
Python
|
gpl-3.0
| 10,926
|
[
"exciting"
] |
1afd6c553bbb38530416db1bdc575a48429147f9b636622660b10e37f825de5c
|
"""
Tools for different procedure estimations
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, \
Pedro V. Amaral pedro.amaral@asu.edu, \
David C. Folch david.folch@asu.edu, \
Daniel Arribas-Bel darribas@asu.edu,\
Levi Wolf levi.john.wolf@gmail.com"
import numpy as np
from scipy import sparse as SP
from scipy.sparse import linalg as SPla
import scipy.optimize as op
import numpy.linalg as la
from pysal import lag_spatial
import copy
class RegressionPropsY(object):
"""
Helper class that adds common regression properties to any regression
class that inherits it. It takes no parameters. See BaseOLS for example
usage.
Parameters
----------
Attributes
----------
mean_y : float
Mean of the dependent variable
std_y : float
Standard deviation of the dependent variable
"""
@property
def mean_y(self):
try:
return self._cache['mean_y']
except AttributeError:
self._cache = {}
self._cache['mean_y'] = np.mean(self.y)
except KeyError:
self._cache['mean_y'] = np.mean(self.y)
return self._cache['mean_y']
@mean_y.setter
def mean_y(self, val):
try:
self._cache['mean_y'] = val
except AttributeError:
self._cache = {}
self._cache['mean_y'] = val
except KeyError:
self._cache['mean_y'] = val
@property
def std_y(self):
try:
return self._cache['std_y']
except AttributeError:
self._cache = {}
self._cache['std_y'] = np.std(self.y, ddof=1)
except KeyError:
self._cache['std_y'] = np.std(self.y, ddof=1)
return self._cache['std_y']
@std_y.setter
def std_y(self, val):
try:
self._cache['std_y'] = val
except AttributeError:
self._cache = {}
self._cache['std_y'] = val
except KeyError:
self._cache['std_y'] = val
class RegressionPropsVM(object):
"""
Helper class that adds common regression properties to any regression
class that inherits it. It takes no parameters. See BaseOLS for example
usage.
Parameters
----------
Attributes
----------
utu : float
Sum of the squared residuals
sig2n : float
Sigma squared with n in the denominator
sig2n_k : float
Sigma squared with n-k in the denominator
vm : array
Variance-covariance matrix (kxk)
"""
@property
def utu(self):
try:
return self._cache['utu']
except AttributeError:
self._cache = {}
self._cache['utu'] = np.sum(self.u ** 2)
except KeyError:
self._cache['utu'] = np.sum(self.u ** 2)
return self._cache['utu']
@utu.setter
def utu(self, val):
try:
self._cache['utu'] = val
except AttributeError:
self._cache = {}
self._cache['utu'] = val
except KeyError:
self._cache['utu'] = val
@property
def sig2n(self):
try:
return self._cache['sig2n']
except AttributeError:
self._cache = {}
self._cache['sig2n'] = self.utu / self.n
except KeyError:
self._cache['sig2n'] = self.utu / self.n
return self._cache['sig2n']
@sig2n.setter
def sig2n(self, val):
try:
self._cache['sig2n'] = val
except AttributeError:
self._cache = {}
self._cache['sig2n'] = val
except KeyError:
self._cache['sig2n'] = val
@property
def sig2n_k(self):
try:
return self._cache['sig2n_k']
except AttributeError:
self._cache = {}
self._cache['sig2n_k'] = self.utu / (self.n - self.k)
except KeyError:
self._cache['sig2n_k'] = self.utu / (self.n - self.k)
return self._cache['sig2n_k']
@sig2n_k.setter
def sig2n_k(self, val):
try:
self._cache['sig2n_k'] = val
except AttributeError:
self._cache = {}
self._cache['sig2n_k'] = val
except KeyError:
self._cache['sig2n_k'] = val
@property
def vm(self):
try:
return self._cache['vm']
except AttributeError:
self._cache = {}
self._cache['vm'] = np.dot(self.sig2, self.xtxi)
except KeyError:
self._cache['vm'] = np.dot(self.sig2, self.xtxi)
finally:
return self._cache['vm']
@vm.setter
def vm(self, val):
try:
self._cache['vm'] = val
except AttributeError:
self._cache = {}
self._cache['vm'] = val
except KeyError:
self._cache['vm'] = val
def get_A1_het(S):
"""
Builds A1 as in Arraiz et al [Arraiz2010]_
.. math::
A_1 = W' W - diag(w'_{.i} w_{.i})
...
Parameters
----------
S : csr_matrix
PySAL W object converted into Scipy sparse matrix
Returns
-------
Implicit : csr_matrix
A1 matrix in scipy sparse format
"""
StS = S.T * S
d = SP.spdiags([StS.diagonal()], [0], S.get_shape()[0], S.get_shape()[1])
d = d.asformat('csr')
return StS - d
def get_A1_hom(s, scalarKP=False):
"""
Builds A1 for the spatial error GM estimation with homoscedasticity as in
Drukker et al. [Drukker2011]_ (p. 9).
.. math::
A_1 = \{1 + [n^{-1} tr(W'W)]^2\}^{-1} \[W'W - n^{-1} tr(W'W) I\]
...
Parameters
----------
s : csr_matrix
PySAL W object converted into Scipy sparse matrix
scalarKP : boolean
Flag to include scalar corresponding to the first moment
condition as in Drukker et al. [1]_ (Defaults to False)
Returns
-------
Implicit : csr_matrix
A1 matrix in scipy sparse format
"""
n = float(s.shape[0])
wpw = s.T * s
twpw = np.sum(wpw.diagonal())
e = SP.eye(n, n, format='csr')
e.data = np.ones(n) * (twpw / n)
num = wpw - e
if not scalarKP:
return num
else:
den = 1. + (twpw / n) ** 2.
return num / den
def get_A2_hom(s):
"""
Builds A2 for the spatial error GM estimation with homoscedasticity as in
Anselin (2011) [Anselin2011]_
.. math::
A_2 = \dfrac{(W + W')}{2}
...
Parameters
----------
s : csr_matrix
PySAL W object converted into Scipy sparse matrix
Returns
-------
Implicit : csr_matrix
A2 matrix in scipy sparse format
"""
return (s + s.T) / 2.
def _moments2eqs(A1, s, u):
'''
Helper to compute G and g in a system of two equations as in
the heteroskedastic error models from Drukker et al. [Drukker2011]_
...
Parameters
----------
A1 : scipy.sparse.csr
A1 matrix as in the paper, different deppending on whether
it's homocedastic or heteroskedastic model
s : W.sparse
Sparse representation of spatial weights instance
u : array
Residuals. nx1 array assumed to be aligned with w
Attributes
----------
moments : list
List of two arrays corresponding to the matrices 'G' and
'g', respectively.
'''
n = float(s.shape[0])
A1u = A1 * u
wu = s * u
g1 = np.dot(u.T, A1u)
g2 = np.dot(u.T, wu)
g = np.array([[g1][0][0], [g2][0][0]]) / n
G11 = np.dot(u.T, ((A1 + A1.T) * wu))
G12 = -np.dot((wu.T * A1), wu)
G21 = np.dot(u.T, ((s + s.T) * wu))
G22 = -np.dot(wu.T, (s * wu))
G = np.array([[G11[0][0], G12[0][0]], [G21[0][0], G22[0][0]]]) / n
return [G, g]
def optim_moments(moments_in, vcX=np.array([0])):
"""
Optimization of moments
...
Parameters
----------
moments : Moments
Instance of gmm_utils.moments_het with G and g
vcX : array
Optional. 2x2 array with the Variance-Covariance matrix to be used as
weights in the optimization (applies Cholesky
decomposition). Set empty by default.
Returns
-------
x, f, d : tuple
x -- position of the minimum
f -- value of func at the minimum
d -- dictionary of information from routine
d['warnflag'] is
0 if converged
1 if too many function evaluations
2 if stopped for another reason, given in d['task']
d['grad'] is the gradient at the minimum (should be 0 ish)
d['funcalls'] is the number of function calls made
"""
moments = copy.deepcopy(moments_in)
if vcX.any():
Ec = np.transpose(la.cholesky(la.inv(vcX)))
moments[0] = np.dot(Ec, moments_in[0])
moments[1] = np.dot(Ec, moments_in[1])
scale = np.min([[np.min(moments[0]), np.min(moments[1])]])
moments[0], moments[1] = moments[0] / scale, moments[1] / scale
if moments[0].shape[0] == 2:
optim_par = lambda par: foptim_par(
np.array([[float(par[0]), float(par[0]) ** 2.]]).T, moments)
start = [0.0]
bounds = [(-1.0, 1.0)]
if moments[0].shape[0] == 3:
optim_par = lambda par: foptim_par(
np.array([[float(par[0]), float(par[0]) ** 2., float(par[1])]]).T, moments)
start = [0.0, 0.0]
bounds = [(-1.0, 1.0), (0.0, None)]
lambdaX = op.fmin_l_bfgs_b(
optim_par, start, approx_grad=True, bounds=bounds)
return lambdaX[0][0]
def foptim_par(par, moments):
"""
Preparation of the function of moments for minimization
...
Parameters
----------
lambdapar : float
Spatial autoregressive parameter
moments : list
List of Moments with G (moments[0]) and g (moments[1])
Returns
-------
minimum : float
sum of square residuals (e) of the equation system
moments.g - moments.G * lambdapar = e
"""
vv = np.dot(moments[0], par)
vv2 = moments[1] - vv
return sum(vv2 ** 2)
def get_spFilter(w, lamb, sf):
'''
Compute the spatially filtered variables
Parameters
----------
w : weight
PySAL weights instance
lamb : double
spatial autoregressive parameter
sf : array
the variable needed to compute the filter
Returns
--------
rs : array
spatially filtered variable
Examples
--------
>>> import numpy as np
>>> import pysal
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> w=pysal.open(pysal.examples.get_path("columbus.gal")).read()
>>> solu = get_spFilter(w,0.5,y)
>>> print solu[0:5]
[[ -8.9882875]
[ -20.5685065]
[ -28.196721 ]
[ -36.9051915]
[-111.1298 ]]
'''
try:
result = sf - lamb * (w.sparse * sf)
except:
result = sf - lamb * (w * sf)
return result
def get_lags(w, x, w_lags):
'''
Calculates a given order of spatial lags and all the smaller orders
Parameters
----------
w : weight
PySAL weights instance
x : array
nxk arrays with the variables to be lagged
w_lags : integer
Maximum order of spatial lag
Returns
--------
rs : array
nxk*(w_lags+1) array with original and spatially lagged variables
'''
lag = lag_spatial(w, x)
spat_lags = lag
for i in range(w_lags - 1):
lag = lag_spatial(w, lag)
spat_lags = sphstack(spat_lags, lag)
return spat_lags
def inverse_prod(w, data, scalar, post_multiply=False, inv_method="power_exp", threshold=0.0000000001, max_iterations=None):
"""
Parameters
----------
w : Pysal W object
nxn Pysal spatial weights object
data : Numpy array
nx1 vector of data
scalar : float
Scalar value (typically rho or lambda)
post_multiply : boolean
If True then post-multiplies the data vector by the
inverse of the spatial filter, if false then
pre-multiplies.
inv_method : string
If "true_inv" uses the true inverse of W (slow);
If "power_exp" uses the power expansion method (default)
threshold : float
Test value to stop the iterations. Test is against
sqrt(increment' * increment), where increment is a
vector representing the contribution from each
iteration.
max_iterations : integer
Maximum number of iterations for the expansion.
Examples
--------
>>> import numpy, pysal
>>> import numpy.linalg as la
>>> np.random.seed(10)
>>> w = pysal.lat2W(5, 5)
>>> w.transform = 'r'
>>> data = np.random.randn(w.n)
>>> data.shape = (w.n, 1)
>>> rho = 0.4
>>> inv_pow = inverse_prod(w, data, rho, inv_method="power_exp")
>>> # true matrix inverse
>>> inv_reg = inverse_prod(w, data, rho, inv_method="true_inv")
>>> np.allclose(inv_pow, inv_reg, atol=0.0001)
True
>>> # test the transpose version
>>> inv_pow = inverse_prod(w, data, rho, inv_method="power_exp", post_multiply=True)
>>> inv_reg = inverse_prod(w, data, rho, inv_method="true_inv", post_multiply=True)
>>> np.allclose(inv_pow, inv_reg, atol=0.0001)
True
"""
if inv_method == "power_exp":
inv_prod = power_expansion(
w, data, scalar, post_multiply=post_multiply,
threshold=threshold, max_iterations=max_iterations)
elif inv_method == "true_inv":
try:
matrix = la.inv(np.eye(w.n) - (scalar * w.full()[0]))
except:
matrix = la.inv(np.eye(w.shape[0]) - (scalar * w))
if post_multiply:
inv_prod = spdot(data.T, matrix)
else:
inv_prod = spdot(matrix, data)
else:
raise Exception, "Invalid method selected for inversion."
return inv_prod
def power_expansion(w, data, scalar, post_multiply=False, threshold=0.0000000001, max_iterations=None):
"""
Compute the inverse of a matrix using the power expansion (Leontief
expansion). General form is:
.. math::
x &= (I - \rho W)^{-1}v = [I + \rho W + \rho^2 WW + \dots]v \\
&= v + \rho Wv + \rho^2 WWv + \dots
Examples
--------
Tests for this function are in inverse_prod()
"""
try:
ws = w.sparse
except:
ws = w
if post_multiply:
data = data.T
running_total = copy.copy(data)
increment = copy.copy(data)
count = 1
test = 10000000
if max_iterations == None:
max_iterations = 10000000
while test > threshold and count <= max_iterations:
if post_multiply:
increment = increment * ws * scalar
else:
increment = ws * increment * scalar
running_total += increment
test_old = test
test = la.norm(increment)
if test > test_old:
raise Exception, "power expansion will not converge, check model specification and that weight are less than 1"
count += 1
return running_total
def set_endog(y, x, w, yend, q, w_lags, lag_q):
# Create spatial lag of y
yl = lag_spatial(w, y)
# spatial and non-spatial instruments
if issubclass(type(yend), np.ndarray):
if lag_q:
lag_vars = sphstack(x, q)
else:
lag_vars = x
spatial_inst = get_lags(w, lag_vars, w_lags)
q = sphstack(q, spatial_inst)
yend = sphstack(yend, yl)
elif yend == None: # spatial instruments only
q = get_lags(w, x, w_lags)
yend = yl
else:
raise Exception, "invalid value passed to yend"
return yend, q
lag = lag_spatial(w, x)
spat_lags = lag
for i in range(w_lags - 1):
lag = lag_spatial(w, lag)
spat_lags = sphstack(spat_lags, lag)
return spat_lags
def set_endog_sparse(y, x, w, yend, q, w_lags, lag_q):
"""
Same as set_endog, but with a sparse object passed as weights instead of W object.
"""
yl = w * y
# spatial and non-spatial instruments
if issubclass(type(yend), np.ndarray):
if lag_q:
lag_vars = sphstack(x, q)
else:
lag_vars = x
spatial_inst = w * lag_vars
for i in range(w_lags - 1):
spatial_inst = sphstack(spatial_inst, w * spatial_inst)
q = sphstack(q, spatial_inst)
yend = sphstack(yend, yl)
elif yend == None: # spatial instruments only
q = w * x
for i in range(w_lags - 1):
q = sphstack(q, w * q)
yend = yl
else:
raise Exception, "invalid value passed to yend"
return yend, q
def iter_msg(iteration, max_iter):
if iteration == max_iter:
iter_stop = "Maximum number of iterations reached."
else:
iter_stop = "Convergence threshold (epsilon) reached."
return iter_stop
def sp_att(w, y, predy, w_y, rho):
xb = predy - rho * w_y
if np.abs(rho) < 1:
predy_sp = inverse_prod(w, xb, rho)
warn = None
# Note 1: Here if omitting pseudo-R2; If not, see Note 2.
resid_sp = y - predy_sp
else:
#warn = "Warning: Estimate for rho is outside the boundary (-1, 1). Computation of true inverse of W was required (slow)."
#predy_sp = inverse_prod(w, xb, rho, inv_method="true_inv")
warn = "*** WARNING: Estimate for spatial lag coefficient is outside the boundary (-1, 1). ***"
predy_sp = np.zeros(y.shape, float)
resid_sp = np.zeros(y.shape, float)
# resid_sp = y - predy_sp #Note 2: Here if computing true inverse; If not,
# see Note 1.
return predy_sp, resid_sp, warn
def spdot(a, b, array_out=True):
"""
Matrix multiplication function to deal with sparse and dense objects
Parameters
----------
a : array
first multiplication factor. Can either be sparse or dense.
b : array
second multiplication factor. Can either be sparse or dense.
array_out : boolean
If True (default) the output object is always a np.array
Returns
-------
ab : array
product of a times b. Sparse if a and b are sparse. Dense otherwise.
"""
if type(a).__name__ == 'ndarray' and type(b).__name__ == 'ndarray':
ab = np.dot(a, b)
elif type(a).__name__ == 'csr_matrix' or type(b).__name__ == 'csr_matrix' \
or type(a).__name__ == 'csc_matrix' or type(b).__name__ == 'csc_matrix':
ab = a * b
if array_out:
if type(ab).__name__ == 'csc_matrix' or type(ab).__name__ == 'csr_matrix':
ab = ab.toarray()
else:
raise Exception, "Invalid format for 'spdot' argument: %s and %s" % (
type(a).__name__, type(b).__name__)
return ab
def spmultiply(a, b, array_out=True):
"""
Element-wise multiplication function to deal with sparse and dense
objects. Both objects must be of the same type.
Parameters
----------
a : array
first multiplication factor. Can either be sparse or dense.
b : array
second multiplication factor. Can either be sparse or dense.
integer.
array_out : boolean
If True (default) the output object is always a np.array
Returns
-------
ab : array
elementwise multiplied object. Sparse if a is sparse. Dense otherwise.
"""
if type(a).__name__ == 'ndarray' and type(b).__name__ == 'ndarray':
ab = a * b
elif (type(a).__name__ == 'csr_matrix' or type(a).__name__ == 'csc_matrix') \
and (type(b).__name__ == 'csr_matrix' or type(b).__name__ == 'csc_matrix'):
ab = a.multiply(b)
if array_out:
if type(ab).__name__ == 'csc_matrix' or type(ab).__name__ == 'csr_matrix':
ab = ab.toarray()
else:
raise Exception, "Invalid format for 'spmultiply' argument: %s and %s" % (
type(a).__name__, type(b).__name__)
return ab
def sphstack(a, b, array_out=False):
"""
Horizontal stacking of vectors (or matrices) to deal with sparse and dense objects
Parameters
----------
a : array or sparse matrix
First object.
b : array or sparse matrix
Object to be stacked next to a
array_out : boolean
If True the output object is a np.array; if False (default)
the output object is an np.array if both inputs are
arrays or CSR matrix if at least one input is a CSR matrix
Returns
-------
ab : array or sparse matrix
Horizontally stacked objects
"""
if type(a).__name__ == 'ndarray' and type(b).__name__ == 'ndarray':
ab = np.hstack((a, b))
elif type(a).__name__ == 'csr_matrix' or type(b).__name__ == 'csr_matrix':
ab = SP.hstack((a, b), format='csr')
if array_out:
if type(ab).__name__ == 'csr_matrix':
ab = ab.toarray()
else:
raise Exception, "Invalid format for 'sphstack' argument: %s and %s" % (
type(a).__name__, type(b).__name__)
return ab
def spbroadcast(a, b, array_out=False):
"""
Element-wise multiplication of a matrix and vector to deal with sparse
and dense objects
Parameters
----------
a : array or sparse matrix
Object with one or more columns.
b : array
Object with only one column
array_out : boolean
If True the output object is a np.array; if False (default)
the output object is an np.array if both inputs are
arrays or CSR matrix if at least one input is a CSR matrix
Returns
-------
ab : array or sparse matrix
Element-wise multiplication of a and b
"""
if type(a).__name__ == 'ndarray' and type(b).__name__ == 'ndarray':
ab = a * b
elif type(a).__name__ == 'csr_matrix':
b_mod = SP.lil_matrix((b.shape[0], b.shape[0]))
b_mod.setdiag(b)
ab = (a.T * b_mod).T
if array_out:
if type(ab).__name__ == 'csr_matrix':
ab = ab.toarray()
else:
raise Exception, "Invalid format for 'spbroadcast' argument: %s and %s" % (
type(a).__name__, type(b).__name__)
return ab
def spmin(a):
"""
Minimum value in a matrix or vector to deal with sparse and dense objects
Parameters
----------
a : array or sparse matrix
Object with one or more columns.
Returns
-------
min a : int or float
minimum value in a
"""
if type(a).__name__ == 'ndarray':
return a.min()
elif type(a).__name__ == 'csr_matrix' or type(a).__name__ == 'csc_matrix':
try:
return min(a.data)
except:
if np.sum(a.data) == 0:
return 0
else:
raise Exception, "Error: could not evaluate the minimum value."
else:
raise Exception, "Invalid format for 'spmultiply' argument: %s and %s" % (
type(a).__name__, type(b).__name__)
def spmax(a):
"""
Maximum value in a matrix or vector to deal with sparse and dense objects
Parameters
----------
a : array or sparse matrix
Object with one or more columns.
Returns
-------
max a : int or float
maximum value in a
"""
if type(a).__name__ == 'ndarray':
return a.max()
elif type(a).__name__ == 'csr_matrix' or type(a).__name__ == 'csc_matrix':
try:
return max(a.data)
except:
if np.sum(a.data) == 0:
return 0
else:
raise Exception, "Error: could not evaluate the maximum value."
else:
raise Exception, "Invalid format for 'spmultiply' argument: %s and %s" % (
type(a).__name__, type(b).__name__)
def set_warn(reg, warn):
''' Groups warning messages for printout. '''
if warn:
try:
reg.warning += "Warning: " + warn + "\n"
except:
reg.warning = "Warning: " + warn + "\n"
else:
pass
def splogdet(spmat):
''' Large dimension log determinant computation. '''
#symmetric = np.allclose(spmat.T, spmat) #could branch for cholmod
symmetric = False
if symmetric:
#CHOLMOD could be used like:
#from scikits.sparse import cholmod as CHOLMOD
#det = np.sum(np.log(np.abs(CHOLMOD.cholesky(spmat).D())))
pass #leave in for scaffolding.
else:
if isinstance(spmat, SP.csc_matrix) or isinstance(spmat, SP.csr_matrix):
LU = SPla.splu(spmat)
det = np.sum(np.log(np.abs(LU.U.diagonal())))
elif SP.isspmatrix(spmat):
return spdet(spmat.tocsc())
else:
det = la.slogdet(spmat)
return det
def RegressionProps_basic(reg, betas=None, predy=None, u=None, sig2=None, sig2n_k=None, vm=None):
''' Set props based on arguments passed. '''
if betas is not None:
reg.betas = betas
if predy is not None:
reg.predy = predy
else:
try:
reg.predy = spdot(reg.z, reg.betas)
except:
reg.predy = spdot(reg.x, reg.betas)
if u is not None:
reg.u = u
else:
reg.u = reg.y - reg.predy
if sig2 is not None:
reg.sig2 = sig2
elif sig2n_k:
reg.sig2 = np.sum(reg.u ** 2) / (reg.n - reg.k)
else:
reg.sig2 = np.sum(reg.u ** 2) / reg.n
if vm is not None:
reg.vm = vm
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
|
schmidtc/pysal
|
pysal/spreg/utils.py
|
Python
|
bsd-3-clause
| 27,878
|
[
"COLUMBUS"
] |
d53c6ba6777be1e2ebb25755703aa088d06ff83bafddf3a6d55708235a40331f
|
import os
import util
import urllib
import helper
from util import *
from xml.etree.ElementTree import *
#friendly name : db column, missing filter statement
gameproperties = {'Title' : ['name', "name = ''"],
'Description' : ['description', "description = ''"],
'Genre' : ['genre', "Id NOT IN (SELECT GameId From GenreGame)"],
'Developer' : ['developerId', "developerId is NULL"],
'Publisher' : ['publisherId', "publisherId is NULL"],
'Reviewer' : ['reviewerId', "reviewerId is NULL"],
'Release Year' : ['yearId', "yearId is NULL"],
'Rating' : ['rating', "rating = ''"],
'Votes' : ['numVotes', "numVotes is NULL"],
'Region' : ['region', "region = ''"],
'Media' : ['media', "media = ''"],
'Max. Players' : ['maxPlayers', "maxPlayers = ''"],
'Controller' : ['controllerType', "controllerType = ''"],
'Perspective' : ['perspective', "perspective = ''"],
'Original Title' : ['originalTitle', "originalTitle = ''"],
'Alternate Title' : ['alternateTitle', "alternateTitle = ''"],
'Translated By' : ['translatedBy', "translatedBy = ''"],
'Version' : ['version', "version = ''"],
'Url' : ['url', "url = ''"]
}
consoleDict = {
#name, mobygames-id, thegamesdb, archive vg
'Other' : ['0', '', ''],
'3DO' : ['35', '3DO', '3do'],
'Amiga' : ['19', 'Amiga', 'amiga'],
'Amiga CD32' : ['56', '', 'cd32'],
'Amstrad CPC' : ['60', 'Amstrad CPC', 'cpc'],
'Apple II' : ['31', '', 'appleii'],
'Atari 2600' : ['28', 'Atari 2600', 'atari2600'],
'Atari 5200' : ['33', 'Atari 5200', 'atari5200'],
'Atari 7800' : ['34', 'Atari 7800', 'atari7800'],
'Atari 8-bit' : ['39', '', 'atari8bit'],
'Atari ST' : ['24', '', 'ast'],
'BBC Micro' : ['92', '', 'bbc'],
'BREW' : ['63', '', ''],
'CD-i' : ['73', '', 'cdi'],
'Channel F' : ['76', '', 'channelf'],
'ColecoVision' : ['29', 'Colecovision', 'colecovision'],
'Commodore 128' : ['61', '', ''],
'Commodore 64' : ['27', 'Commodore 64', 'c64'],
'Commodore PET/CBM' : ['77', '', 'pet'],
'DoJa' : ['72', '', ''],
'DOS' : ['2', '', ''],
'Dragon 32/64' : ['79', '', ''],
'Dreamcast' : ['8', 'Sega Dreamcast', 'dreamcast'],
'Electron' : ['93', '', ''],
'ExEn' : ['70', '', ''],
'Game Boy' : ['10', 'Nintendo Gameboy', 'gameboy'],
'Game Boy Advance' : ['12', 'Nintendo Gameboy Advance', 'gba'],
'Game Boy Color' : ['11', 'Nintendo Game Boy Color', 'gbc'],
'GameCube' : ['14', 'Nintendo GameCube', 'gamecube'],
'Game Gear' : ['25', 'Sega Game Gear', 'gamegear'],
'Genesis' : ['16', 'Sega Genesis', 'genesis'],
'Gizmondo' : ['55', '', 'gizmondo'],
'Intellivision' : ['30', 'Intellivision', 'intellivision'],
'Jaguar' : ['17', 'Atari Jaguar', 'jaguar'],
'Linux' : ['1', '', ''],
'Lynx' : ['18', '', 'lynx'],
'Macintosh' : ['74', 'Mac OS', ''],
'MAME' : ['0', 'Arcade', ''],
'Mophun' : ['71', '', ''],
'MSX' : ['57', '', 'msx'],
'Neo Geo' : ['36', 'NeoGeo', 'neo'],
'Neo Geo CD' : ['54', '', 'neogeocd'],
'Neo Geo Pocket' : ['52', '', ''],
'Neo Geo Pocket Color' : ['53', '', 'ngpc'],
'NES' : ['22', 'Nintendo Entertainment System (NES)', 'nes'],
'N-Gage' : ['32', '', 'ngage'],
'Nintendo 64' : ['9', 'Nintendo 64', 'n64'],
'Nintendo DS' : ['44', 'Nintendo DS', ''],
'Nintendo DSi' : ['87', '', ''],
'Odyssey' : ['75', '', 'odyssey'],
'Odyssey 2' : ['78', '', 'odyssey2'],
'PC-88' : ['94', '', 'pc88'],
'PC-98' : ['95', '', 'pc98'],
'PC Booter' : ['4', '', ''],
'PC-FX' : ['59', '', 'pcfx'],
'PlayStation' : ['6', 'Sony Playstation', 'ps'],
'PlayStation 2' : ['7', 'Sony Playstation 2', 'ps2'],
'PlayStation 3' : ['81', 'Sony Playstation 3', ''],
'PSP' : ['46', 'Sony PSP', ''],
'SEGA 32X' : ['21', 'Sega 32X', 'sega32x'],
'SEGA CD' : ['20', 'Sega CD', 'segacd'],
'SEGA Master System' : ['26', 'Sega Master System', 'sms'],
'SEGA Saturn' : ['23', 'Sega Saturn', 'saturn'],
'SNES' : ['15', 'Super Nintendo (SNES)', 'snes'],
'Spectravideo' : ['85', '', ''],
'TI-99/4A' : ['47', '', 'ti99'],
'TRS-80' : ['58', '', ''],
'TRS-80 CoCo' : ['62', '', ''],
'TurboGrafx-16' : ['40', 'TurboGrafx 16', 'tg16'],
'TurboGrafx CD' : ['45', '', ''],
'Vectrex' : ['37', '', 'vectrex'],
'VIC-20' : ['43', '', 'vic20'],
'Virtual Boy' : ['38', 'Nintendo Virtual Boy', 'virtualboy'],
'V.Smile' : ['42', '', ''],
'Wii' : ['82', 'Nintendo Wii', ''],
'Windows' : ['3', 'PC', ''],
'Windows 3.x' : ['5', '', ''],
'WonderSwan' : ['48', '', 'wonderswan'],
'WonderSwan Color' : ['49', '', ''],
'Xbox' : ['13', 'Microsoft Xbox', 'xbox'],
'Xbox 360' : ['69', 'Microsoft Xbox 360', ''],
'Zeebo' : ['88', '', ''],
'Zodiac' : ['68', '', 'zod'],
'ZX Spectr' : ['41', 'Sinclair ZX Spectrum', '']}
missingFilterOptions = {util.localize(32157) : util.localize(32158),
util.localize(32159) : util.localize(32160),
util.localize(32161) : util.localize(32162)}
def getPlatformByRomCollection(source, romCollectionName):
platform = ''
if(source.find('mobygames.com') != -1):
try:
platform = consoleDict[romCollectionName][0]
except:
Logutil.log('Could not find platform name for Rom Collection %s' %romCollectionName, util.LOG_LEVEL_WARNING)
elif(source.find('thegamesdb.net') != -1):
try:
platform = consoleDict[romCollectionName][1]
except:
Logutil.log('Could not find platform name for Rom Collection %s' %romCollectionName, util.LOG_LEVEL_WARNING)
elif(source.find('archive.vg') != -1):
try:
platform = consoleDict[romCollectionName][2]
except:
Logutil.log('Could not find platform name for Rom Collection %s' %romCollectionName, util.LOG_LEVEL_WARNING)
return platform
imagePlacingDict = {'gameinfobig' : 'one big',
'gameinfobigVideo' : 'one big or video',
'gameinfosmall' : 'four small',
'gameinfosmallVideo' : 'three small + video',
'gameinfomamemarquee' : 'MAME: marquee in list',
'gameinfomamecabinet' : 'MAME: cabinet in list'}
class FileType:
name = ''
id = -1
type = ''
parent = ''
class ImagePlacing:
name = ''
fileTypesForGameList = None
fileTypesForGameListSelected = None
fileTypesForMainView1 = None
fileTypesForMainView2 = None
fileTypesForMainView3 = None
fileTypesForMainViewBackground = None
fileTypesForMainViewGameInfoBig = None
fileTypesForMainViewGameInfoUpperLeft = None
fileTypesForMainViewGameInfoUpperRight = None
fileTypesForMainViewGameInfoLowerLeft = None
fileTypesForMainViewGameInfoLowerRight = None
fileTypesForMainViewGameInfoUpper = None
fileTypesForMainViewGameInfoLower = None
fileTypesForMainViewGameInfoLeft = None
fileTypesForMainViewGameInfoRight = None
fileTypesForMainViewVideoWindowBig = None
fileTypesForMainViewVideoWindowSmall = None
fileTypesForMainViewVideoFullscreen = None
class MediaPath:
path = ''
fileType = None
class Scraper:
parseInstruction = ''
source = ''
sourceAppend = ''
encoding = 'utf-8'
returnUrl = False
replaceKeyString = ''
replaceValueString = ''
class Site:
name = ''
descFilePerGame = False
searchGameByCRC = True
searchGameByCRCIgnoreRomName = False
useFoldernameAsCRC = False
useFilenameAsCRC = False
scrapers = None
class MissingFilter:
andGroup = []
orGroup = []
class RomCollection:
id = -1
name = ''
useBuiltinEmulator = False
gameclient = ''
emulatorCmd = ''
preCmd = ''
postCmd = ''
emulatorParams = ''
romPaths = None
saveStatePath = ''
saveStateParams = ''
mediaPaths = None
scraperSites = None
imagePlacingMain = None
imagePlacingInfo = None
autoplayVideoMain = True
autoplayVideoInfo = True
ignoreOnScan = False
allowUpdate = True
useEmuSolo = False
usePopen = False
maxFolderDepth = 99
useFoldernameAsGamename = False
doNotExtractZipFiles = False
makeLocalCopy = False
diskPrefix = '_Disk.*'
xboxCreateShortcut = False
xboxCreateShortcutAddRomfile = False
xboxCreateShortcutUseShortGamename = False
class Config:
romCollections = None
scraperSites = None
fileTypeIdsForGamelist = None
showHideOption = 'ignore'
missingFilterInfo = None
missingFilterArtwork = None
tree = None
configPath = None
def __init__(self, configFile):
Logutil.log('Config() set path to %s' %configFile, util.LOG_LEVEL_INFO)
self.configFile = configFile
def initXml(self):
Logutil.log('initXml', util.LOG_LEVEL_INFO)
if(not self.configFile):
self.configFile = util.getConfigXmlPath()
if(not os.path.isfile(self.configFile)):
Logutil.log('File config.xml does not exist. Place a valid config file here: %s' %self.configFile, util.LOG_LEVEL_ERROR)
return None, False, util.localize(32003)
tree = ElementTree().parse(self.configFile)
if(tree == None):
Logutil.log('Could not read config.xml', util.LOG_LEVEL_ERROR)
return None, False, util.localize(32004)
self.tree = tree
return tree, True, ''
def checkRomCollectionsAvailable(self):
Logutil.log('checkRomCollectionsAvailable', util.LOG_LEVEL_INFO)
tree, success, errorMsg = self.initXml()
if(not success):
return False, errorMsg
romCollectionRows = tree.findall('RomCollections/RomCollection')
numRomCollections = len(romCollectionRows)
Logutil.log("Number of Rom Collections in config.xml: %i" %numRomCollections, util.LOG_LEVEL_INFO)
return numRomCollections > 0, ''
def readXml(self):
Logutil.log('readXml', util.LOG_LEVEL_INFO)
tree, success, errorMsg = self.initXml()
if(not success):
return False, errorMsg
#Rom Collections
romCollections, errorMsg = self.readRomCollections(tree)
if(romCollections == None):
return False, errorMsg
self.romCollections = romCollections
#Scrapers
scrapers, errorMsg = self.readScrapers(tree)
if(scrapers == None):
return False, errorMsg
self.scraperSites = scrapers
self.fileTypeIdsForGamelist = self.getFileTypeIdsForGameList(tree, romCollections)
#Missing filter settings
missingFilter = tree.find('MissingFilter')
if(missingFilter != None):
showHideOption = self.readTextElement(missingFilter, 'showHideOption')
if(showHideOption != ''):
self.showHideOption = showHideOption
self.missingFilterInfo = self.readMissingFilter('missingInfoFilter', missingFilter)
self.missingFilterArtwork = self.readMissingFilter('missingArtworkFilter', missingFilter)
return True, ''
def readRomCollections(self, tree):
Logutil.log('Begin readRomCollections', util.LOG_LEVEL_INFO)
romCollections = {}
romCollectionRows = tree.findall('RomCollections/RomCollection')
if (len(romCollectionRows) == 0):
Logutil.log('Configuration error. config.xml does not contain any RomCollections', util.LOG_LEVEL_ERROR)
return None, 'Configuration error. See xbmc.log for details'
for romCollectionRow in romCollectionRows:
romCollection = RomCollection()
romCollection.name = romCollectionRow.attrib.get('name')
if(romCollection.name == None):
Logutil.log('Configuration error. RomCollection must have an attribute name', util.LOG_LEVEL_ERROR)
return None, util.localize(32005)
Logutil.log('current Rom Collection: ' +str(romCollection.name), util.LOG_LEVEL_INFO)
id = romCollectionRow.attrib.get('id')
if(id == ''):
Logutil.log('Configuration error. RomCollection %s must have an id' %romCollection.name, util.LOG_LEVEL_ERROR)
return None, util.localize(32005)
try:
rc = romCollections[id]
Logutil.log('Error while adding RomCollection. Make sure that the id is unique.', util.LOG_LEVEL_ERROR)
return None, util.localize(32006)
except:
pass
romCollection.id = id
#romPath
romCollection.romPaths = []
romPathRows = romCollectionRow.findall('romPath')
for romPathRow in romPathRows:
Logutil.log('Rom path: ' +romPathRow.text, util.LOG_LEVEL_INFO)
if(romPathRow.text != None):
romCollection.romPaths.append(romPathRow.text)
#mediaPath
romCollection.mediaPaths = []
mediaPathRows = romCollectionRow.findall('mediaPath')
for mediaPathRow in mediaPathRows:
mediaPath = MediaPath()
if(mediaPathRow.text != None):
mediaPath.path = mediaPathRow.text
Logutil.log('Media path: ' +mediaPath.path, util.LOG_LEVEL_INFO)
fileType, errorMsg = self.readFileType(mediaPathRow.attrib.get('type'), tree)
if(fileType == None):
return None, errorMsg
mediaPath.fileType = fileType
romCollection.mediaPaths.append(mediaPath)
#Scraper
romCollection.scraperSites = []
scraperRows = romCollectionRow.findall('scraper')
for scraperRow in scraperRows:
siteName = scraperRow.attrib.get('name')
Logutil.log('Scraper site: ' +str(siteName), util.LOG_LEVEL_INFO)
if(siteName == None or siteName == ''):
Logutil.log('Configuration error. RomCollection/scraper must have an attribute name', util.LOG_LEVEL_ERROR)
return None, util.localize(32005)
#read additional scraper properties
replaceKeyString = scraperRow.attrib.get('replaceKeyString')
if(replaceKeyString == None):
replaceKeyString = ''
replaceValueString = scraperRow.attrib.get('replaceValueString')
if(replaceValueString == None):
replaceValueString = ''
#elementtree version 1.2.7 does not support xpath like this: Scrapers/Site[@name="%s"]
siteRow = None
siteRows = tree.findall('Scrapers/Site')
for element in siteRows:
if(element.attrib.get('name') == siteName):
siteRow = element
break
if(siteRow == None):
Logutil.log('Configuration error. Site %s does not exist in config.xml' %siteName, util.LOG_LEVEL_ERROR)
return None, util.localize(32005)
scraper, errorMsg = self.readScraper(siteRow, romCollection.name, replaceKeyString, replaceValueString, True, tree)
if(scraper == None):
return None, errorMsg
romCollection.scraperSites.append(scraper)
#imagePlacing - Main window
romCollection.imagePlacingMain = ImagePlacing()
imagePlacingRow = romCollectionRow.find('imagePlacingMain')
if(imagePlacingRow != None):
Logutil.log('Image Placing name: ' +str(imagePlacingRow.text), util.LOG_LEVEL_INFO)
fileTypeFor, errorMsg = self.readImagePlacing(imagePlacingRow.text, tree)
if(fileTypeFor == None):
return None, errorMsg
romCollection.imagePlacingMain = fileTypeFor
#imagePlacing - Info window
romCollection.imagePlacingInfo = ImagePlacing()
imagePlacingRow = romCollectionRow.find('imagePlacingInfo')
if(imagePlacingRow != None):
Logutil.log('Image Placing name: ' +str(imagePlacingRow.text), util.LOG_LEVEL_INFO)
fileTypeFor, errorMsg = self.readImagePlacing(imagePlacingRow.text, tree)
if(fileTypeFor == None):
return None, errorMsg
romCollection.imagePlacingInfo = fileTypeFor
#all simple RomCollection properties
romCollection.gameclient = self.readTextElement(romCollectionRow, 'gameclient')
romCollection.emulatorCmd = self.readTextElement(romCollectionRow, 'emulatorCmd')
romCollection.preCmd = self.readTextElement(romCollectionRow, 'preCmd')
romCollection.postCmd = self.readTextElement(romCollectionRow, 'postCmd')
romCollection.emulatorParams = self.readTextElement(romCollectionRow, 'emulatorParams')
romCollection.saveStatePath = self.readTextElement(romCollectionRow, 'saveStatePath')
romCollection.saveStateParams = self.readTextElement(romCollectionRow, 'saveStateParams')
useBuiltinEmulator = self.readTextElement(romCollectionRow, 'useBuiltinEmulator')
if(useBuiltinEmulator != ''):
romCollection.useBuiltinEmulator = useBuiltinEmulator.upper() == 'TRUE'
ignoreOnScan = self.readTextElement(romCollectionRow, 'ignoreOnScan')
if(ignoreOnScan != ''):
romCollection.ignoreOnScan = ignoreOnScan.upper() == 'TRUE'
allowUpdate = self.readTextElement(romCollectionRow, 'allowUpdate')
if(allowUpdate != ''):
romCollection.allowUpdate = allowUpdate.upper() == 'TRUE'
useEmuSolo = self.readTextElement(romCollectionRow, 'useEmuSolo')
if(useEmuSolo != ''):
romCollection.useEmuSolo = useEmuSolo.upper() == 'TRUE'
usePopen = self.readTextElement(romCollectionRow, 'usePopen')
if(usePopen != ''):
romCollection.usePopen = usePopen.upper() == 'TRUE'
autoplayVideoMain = self.readTextElement(romCollectionRow, 'autoplayVideoMain')
if(autoplayVideoMain != ''):
romCollection.autoplayVideoMain = autoplayVideoMain.upper() == 'TRUE'
autoplayVideoInfo = self.readTextElement(romCollectionRow, 'autoplayVideoInfo')
if(autoplayVideoInfo != ''):
romCollection.autoplayVideoInfo = autoplayVideoInfo.upper() == 'TRUE'
useFoldernameAsGamename = self.readTextElement(romCollectionRow, 'useFoldernameAsGamename')
if(useFoldernameAsGamename != ''):
romCollection.useFoldernameAsGamename = useFoldernameAsGamename.upper() == 'TRUE'
maxFolderDepth = self.readTextElement(romCollectionRow, 'maxFolderDepth')
if(maxFolderDepth != ''):
romCollection.maxFolderDepth = int(maxFolderDepth)
doNotExtractZipFiles = self.readTextElement(romCollectionRow, 'doNotExtractZipFiles')
if(doNotExtractZipFiles != ''):
romCollection.doNotExtractZipFiles = doNotExtractZipFiles.upper() == 'TRUE'
makeLocalCopy = self.readTextElement(romCollectionRow, 'makeLocalCopy')
if(makeLocalCopy != ''):
romCollection.makeLocalCopy = makeLocalCopy.upper() == 'TRUE'
romCollection.diskPrefix = self.readTextElement(romCollectionRow, 'diskPrefix')
xboxCreateShortcut = self.readTextElement(romCollectionRow, 'xboxCreateShortcut')
if(xboxCreateShortcut != ''):
romCollection.xboxCreateShortcut = xboxCreateShortcut.upper() == 'TRUE'
xboxCreateShortcutAddRomfile = self.readTextElement(romCollectionRow, 'xboxCreateShortcutAddRomfile')
if(xboxCreateShortcutAddRomfile != ''):
romCollection.xboxCreateShortcutAddRomfile = xboxCreateShortcutAddRomfile.upper() == 'TRUE'
xboxCreateShortcutUseShortGamename = self.readTextElement(romCollectionRow, 'xboxCreateShortcutUseShortGamename')
if(xboxCreateShortcutUseShortGamename != ''):
romCollection.xboxCreateShortcutUseShortGamename = xboxCreateShortcutUseShortGamename.upper() == 'TRUE'
romCollections[id] = romCollection
return romCollections, ''
def readScrapers(self, tree):
sites = {}
siteRows = tree.findall('Scrapers/Site')
for siteRow in siteRows:
site, errorMsg = self.readScraper(siteRow, '', '', '', False, tree)
if(site == None):
return None, errorMsg
name = siteRow.attrib.get('name')
sites[name] = site
return sites, ''
def readScraper(self, siteRow, romCollectionName, inReplaceKeyString, inReplaceValueString, replaceValues, tree):
site = Site()
site.name = siteRow.attrib.get('name')
Logutil.log('Scraper Site: ' +str(site.name), util.LOG_LEVEL_INFO)
descFilePerGame = siteRow.attrib.get('descFilePerGame')
if(descFilePerGame != None and descFilePerGame != ''):
site.descFilePerGame = descFilePerGame.upper() == 'TRUE'
Logutil.log('Scraper descFilePerGame: ' +str(site.descFilePerGame), util.LOG_LEVEL_INFO)
searchGameByCRC = siteRow.attrib.get('searchGameByCRC')
if(searchGameByCRC != None and searchGameByCRC != ''):
site.searchGameByCRC = searchGameByCRC.upper() == 'TRUE'
searchGameByCRCIgnoreRomName = siteRow.attrib.get('searchGameByCRCIgnoreRomName')
if(searchGameByCRCIgnoreRomName != None and searchGameByCRCIgnoreRomName != ''):
site.searchGameByCRCIgnoreRomName = searchGameByCRCIgnoreRomName.upper() == 'TRUE'
useFoldernameAsCRC = siteRow.attrib.get('useFoldernameAsCRC')
if(useFoldernameAsCRC != None and useFoldernameAsCRC != ''):
site.useFoldernameAsCRC = useFoldernameAsCRC.upper() == 'TRUE'
useFilenameAsCRC = siteRow.attrib.get('useFilenameAsCRC')
if(useFilenameAsCRC != None and useFilenameAsCRC != ''):
site.useFilenameAsCRC = useFilenameAsCRC.upper() == 'TRUE'
scrapers = []
scraperRows = siteRow.findall('Scraper')
for scraperRow in scraperRows:
scraper = Scraper()
parseInstruction = scraperRow.attrib.get('parseInstruction')
if(parseInstruction != None and parseInstruction != ''):
if(not os.path.isabs(parseInstruction)):
#if it is a relative path, search in RCBs home directory
parseInstruction = os.path.join(util.RCBHOME, 'resources', 'scraper', parseInstruction)
if(not os.path.isfile(parseInstruction)):
Logutil.log('Configuration error. parseInstruction file %s does not exist.' %parseInstruction, util.LOG_LEVEL_ERROR)
return None, util.localize(32005)
scraper.parseInstruction = parseInstruction
source = scraperRow.attrib.get('source')
if(source != None and source != ''):
if(replaceValues):
platform = getPlatformByRomCollection(source, romCollectionName)
platform = urllib.quote(platform, safe='')
source = source.replace('%PLATFORM%', platform)
scraper.source = source
encoding = scraperRow.attrib.get('encoding')
if(encoding != None and encoding != 'utf-8'):
scraper.encoding = encoding
returnUrl = scraperRow.attrib.get('returnUrl')
if(returnUrl != None and returnUrl != ''):
scraper.returnUrl = returnUrl.upper() == 'TRUE'
sourceAppend = scraperRow.attrib.get('sourceAppend')
if(sourceAppend != None and sourceAppend != ''):
scraper.sourceAppend = sourceAppend
scraper.replaceKeyString = inReplaceKeyString
scraper.replaceValueString = inReplaceValueString
scrapers.append(scraper)
site.scrapers = scrapers
return site, ''
def readFileType(self, name, tree):
fileTypeRow = None
fileTypeRows = tree.findall('FileTypes/FileType')
for element in fileTypeRows:
if(element.attrib.get('name') == name):
fileTypeRow = element
break
if(fileTypeRow == None):
Logutil.log('Configuration error. FileType %s does not exist in config.xml' %name, util.LOG_LEVEL_ERROR)
return None, util.localize(32005)
fileType = FileType()
fileType.name = name
id = fileTypeRow.attrib.get('id')
if(id == ''):
Logutil.log('Configuration error. FileType %s must have an id' %name, util.LOG_LEVEL_ERROR)
return None, util.localize(32005)
fileType.id = id
type = fileTypeRow.find('type')
if(type != None):
fileType.type = type.text
parent = fileTypeRow.find('parent')
if(parent != None):
fileType.parent = parent.text
return fileType, ''
def readImagePlacing(self, imagePlacingName, tree):
fileTypeForRow = None
fileTypeForRows = tree.findall('ImagePlacing/fileTypeFor')
for element in fileTypeForRows:
if(element.attrib.get('name') == imagePlacingName):
fileTypeForRow = element
break
if(fileTypeForRow == None):
Logutil.log('Configuration error. ImagePlacing/fileTypeFor %s does not exist in config.xml' %str(imagePlacingName), util.LOG_LEVEL_ERROR)
return None, util.localize(32005)
imagePlacing = ImagePlacing()
imagePlacing.name = imagePlacingName
imagePlacing.fileTypesForGameList, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForGameList', tree)
imagePlacing.fileTypesForGameListSelected, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForGameListSelected', tree)
imagePlacing.fileTypesForMainView1, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainView1', tree)
imagePlacing.fileTypesForMainView2, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainView2', tree)
imagePlacing.fileTypesForMainView3, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainView3', tree)
imagePlacing.fileTypesForMainViewBackground, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewBackground', tree)
imagePlacing.fileTypesForMainViewGameInfoBig, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewGameInfoBig', tree)
imagePlacing.fileTypesForMainViewGameInfoUpperLeft, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewGameInfoUpperLeft', tree)
imagePlacing.fileTypesForMainViewGameInfoUpperRight, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewGameInfoUpperRight', tree)
imagePlacing.fileTypesForMainViewGameInfoLowerLeft, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewGameInfoLowerLeft', tree)
imagePlacing.fileTypesForMainViewGameInfoLowerRight, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewGameInfoLowerRight', tree)
imagePlacing.fileTypesForMainViewGameInfoLower, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewGameInfoLower', tree)
imagePlacing.fileTypesForMainViewGameInfoUpper, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewGameInfoUpper', tree)
imagePlacing.fileTypesForMainViewGameInfoRight, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewGameInfoRight', tree)
imagePlacing.fileTypesForMainViewGameInfoLeft, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewGameInfoLeft', tree)
imagePlacing.fileTypesForMainViewVideoWindowBig, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewVideoWindowBig', tree)
imagePlacing.fileTypesForMainViewVideoWindowSmall, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewVideoWindowSmall', tree)
imagePlacing.fileTypesForMainViewVideoFullscreen, errorMsg = self.readFileTypeForElement(fileTypeForRow, 'fileTypeForMainViewVideoFullscreen', tree)
return imagePlacing, ''
def readFileTypeForElement(self, fileTypeForRow, key, tree):
fileTypeList = []
fileTypesForControl = fileTypeForRow.findall(key)
for fileTypeForControl in fileTypesForControl:
fileType, errorMsg = self.readFileType(fileTypeForControl.text, tree)
if(fileType == None):
return None, errorMsg
fileTypeList.append(fileType)
return fileTypeList, ''
def readMissingFilter(self, filterName, tree):
missingFilter = MissingFilter()
missingFilter.andGroup = []
missingFilter.orGroup = []
if(tree != None):
missingFilterRow = tree.find(filterName)
if(missingFilterRow != None):
missingFilter.andGroup = self.getMissingFilterItems(missingFilterRow, 'andGroup')
missingFilter.orGroup = self.getMissingFilterItems(missingFilterRow, 'orGroup')
return missingFilter
def getMissingFilterItems(self, missingFilterRow, groupName):
items = []
groupRow = missingFilterRow.find(groupName)
if(groupRow != None):
itemRows = groupRow.findall('item')
for element in itemRows:
items.append(element.text)
return items
def getFileTypeIdsForGameList(self, tree, romCollections):
fileTypeIds = []
for romCollection in romCollections.values():
for fileType in romCollection.imagePlacingMain.fileTypesForGameList:
if(fileTypeIds.count(fileType.id) == 0):
fileTypeIds.append(fileType.id)
for fileType in romCollection.imagePlacingMain.fileTypesForGameListSelected:
if(fileTypeIds.count(fileType.id) == 0):
fileTypeIds.append(fileType.id)
#fullscreen video
fileType, errorMsg = self.readFileType('gameplay', tree)
if(fileType != None):
fileTypeIds.append(fileType.id)
return fileTypeIds
def readTextElement(self, parent, elementName):
element = parent.find(elementName)
if(element != None and element.text != None):
Logutil.log('%s: %s' %(elementName, element.text), util.LOG_LEVEL_INFO)
return element.text
else:
return ''
|
azumimuo/family-xbmc-addon
|
script.games.rom.collection.browser/resources/lib/config.py
|
Python
|
gpl-2.0
| 27,831
|
[
"Jaguar"
] |
1a557d9bdfc3241e3bc173d760faf1196b413f4db1a4a7f1b4361263809a884b
|
"""Probit regression class and diagnostics."""
__author__ = "Luc Anselin luc.anselin@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu"
import numpy as np
import numpy.linalg as la
import scipy.optimize as op
from scipy.stats import norm, chisqprob
import scipy.sparse as SP
import user_output as USER
import summary_output as SUMMARY
__all__ = ["Probit"]
class BaseProbit:
"""
Probit class to do all the computations
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
Note: Disregards the presence of dummies.
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in Pinkse (2004)
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in Kelejian and Prucha (2001)
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in Pinkse and Slade (1998)
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
References
----------
.. [1] Pinkse, J. (2004). Moran-flavored tests with nuisance parameter. In: Anselin,
L., Florax, R. J., Rey, S. J. (editors) Advances in Spatial Econometrics,
pages 67-77. Springer-Verlag, Heidelberg.
.. [2] Kelejian, H., Prucha, I. (2001) "On the asymptotic distribution of the
Moran I test statistic with applications". Journal of Econometrics, 104(2):219-57.
.. [3] Pinkse, J., Slade, M. E. (1998) "Contracting in space: an application of
spatial statistics to discrete-choice models". Journal of Econometrics, 85(1):125-54.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> x = np.array([dbf.by_col('INC'), dbf.by_col('HOVAL')]).T
>>> x = np.hstack((np.ones(y.shape),x))
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
>>> w.transform='r'
>>> model = BaseProbit((y>40).astype(float), x, w=w)
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 8.52814000e-01, -4.36270000e-02, -8.05200000e-03],
[ -4.36270000e-02, 4.11400000e-03, -1.93000000e-04],
[ -8.05200000e-03, -1.93000000e-04, 3.10000000e-04]])
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
"""
def __init__(self,y,x,w=None,optim='newton',scalem='phimean',maxiter=100):
self.y = y
self.x = x
self.n, self.k = x.shape
self.optim = optim
self.scalem = scalem
self.w = w
self.maxiter = maxiter
par_est, self.warning = self.par_est()
self.betas = np.reshape(par_est[0],(self.k,1))
self.logl = -float(par_est[1])
self._cache = {}
@property
def vm(self):
if 'vm' not in self._cache:
H = self.hessian(self.betas)
self._cache['vm'] = -la.inv(H)
return self._cache['vm']
@property
def z_stat(self):
if 'z_stat' not in self._cache:
variance = self.vm.diagonal()
zStat = self.betas.reshape(len(self.betas),)/ np.sqrt(variance)
rs = {}
for i in range(len(self.betas)):
rs[i] = (zStat[i],norm.sf(abs(zStat[i]))*2)
self._cache['z_stat'] = rs.values()
return self._cache['z_stat']
@property
def slopes_std_err(self):
if 'slopes_std_err' not in self._cache:
variance = self.slopes_vm.diagonal()
self._cache['slopes_std_err'] = np.sqrt(variance)
return self._cache['slopes_std_err']
@property
def slopes_z_stat(self):
if 'slopes_z_stat' not in self._cache:
zStat = self.slopes.reshape(len(self.slopes),)/self.slopes_std_err
rs = {}
for i in range(len(self.slopes)):
rs[i] = (zStat[i],norm.sf(abs(zStat[i]))*2)
self._cache['slopes_z_stat'] = rs.values()
return self._cache['slopes_z_stat']
@property
def xmean(self):
if 'xmean' not in self._cache:
self._cache['xmean'] = np.reshape(sum(self.x)/self.n,(self.k,1))
return self._cache['xmean']
@property
def xb(self):
if 'xb' not in self._cache:
self._cache['xb'] = np.dot(self.x,self.betas)
return self._cache['xb']
@property
def predy(self):
if 'predy' not in self._cache:
self._cache['predy'] = norm.cdf(self.xb)
return self._cache['predy']
@property
def predpc(self):
if 'predpc' not in self._cache:
predpc = abs(self.y-self.predy)
for i in range(len(predpc)):
if predpc[i]>0.5:
predpc[i]=0
else:
predpc[i]=1
self._cache['predpc'] = float(100* np.sum(predpc) / self.n)
return self._cache['predpc']
@property
def phiy(self):
if 'phiy' not in self._cache:
self._cache['phiy'] = norm.pdf(self.xb)
return self._cache['phiy']
@property
def scale(self):
if 'scale' not in self._cache:
if self.scalem == 'phimean':
self._cache['scale'] = float(1.0 * np.sum(self.phiy)/self.n)
if self.scalem == 'xmean':
self._cache['scale'] = float(norm.pdf(np.dot(self.xmean.T,self.betas)))
return self._cache['scale']
@property
def slopes(self):
if 'slopes' not in self._cache:
self._cache['slopes'] = self.betas[1:] * self.scale #Disregard the presence of dummies.
return self._cache['slopes']
@property
def slopes_vm(self):
if 'slopes_vm' not in self._cache:
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - np.dot(b.T,x)*np.dot(b,x.T)
slopes_vm = (self.scale**2)*np.dot(np.dot(dfdb,self.vm),dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:,1:]
return self._cache['slopes_vm']
@property
def LR(self):
if 'LR' not in self._cache:
P = 1.0 * np.sum(self.y) / self.n
LR = float(-2 * (self.n*(P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR,chisqprob(LR,self.k))
return self._cache['LR']
@property
def u_naive(self):
if 'u_naive' not in self._cache:
u_naive = self.y - self.predy
self._cache['u_naive'] = u_naive
return self._cache['u_naive']
@property
def u_gen(self):
if 'u_gen' not in self._cache:
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
return self._cache['u_gen']
@property
def Pinkse_error(self):
if 'Pinkse_error' not in self._cache:
self._cache['Pinkse_error'],self._cache['KP_error'],self._cache['PS_error'] = sp_tests(self)
return self._cache['Pinkse_error']
@property
def KP_error(self):
if 'KP_error' not in self._cache:
self._cache['Pinkse_error'],self._cache['KP_error'],self._cache['PS_error'] = sp_tests(self)
return self._cache['KP_error']
@property
def PS_error(self):
if 'PS_error' not in self._cache:
self._cache['Pinkse_error'],self._cache['KP_error'],self._cache['PS_error'] = sp_tests(self)
return self._cache['PS_error']
def par_est(self):
start = np.dot(la.inv(np.dot(self.x.T,self.x)),np.dot(self.x.T,self.y))
flogl = lambda par: -self.ll(par)
if self.optim == 'newton':
fgrad = lambda par: self.gradient(par)
fhess = lambda par: self.hessian(par)
par_hat = newton(flogl,start,fgrad,fhess,self.maxiter)
warn = par_hat[2]
else:
fgrad = lambda par: -self.gradient(par)
if self.optim == 'bfgs':
par_hat = op.fmin_bfgs(flogl,start,fgrad,full_output=1,disp=0)
warn = par_hat[6]
if self.optim == 'ncg':
fhess = lambda par: -self.hessian(par)
par_hat = op.fmin_ncg(flogl,start,fgrad,fhess=fhess,full_output=1,disp=0)
warn = par_hat[5]
if warn > 0:
warn = True
else:
warn = False
return par_hat, warn
def ll(self,par):
beta = np.reshape(np.array(par),(self.k,1))
q = 2 * self.y - 1
qxb = q * np.dot(self.x,beta)
ll = sum(np.log(norm.cdf(qxb)))
return ll
def gradient(self,par):
beta = np.reshape(np.array(par),(self.k,1))
q = 2 * self.y - 1
qxb = q * np.dot(self.x,beta)
lamb = q * norm.pdf(qxb)/norm.cdf(qxb)
gradient = np.dot(lamb.T,self.x)[0]
return gradient
def hessian(self,par):
beta = np.reshape(np.array(par),(self.k,1))
q = 2 * self.y - 1
xb = np.dot(self.x,beta)
qxb = q * xb
lamb = q * norm.pdf(qxb)/norm.cdf(qxb)
hessian = np.dot((self.x.T),(-lamb * (lamb + xb) * self.x ))
return hessian
class Probit(BaseProbit):
"""
Classic non-spatial Probit and spatial diagnostics. The class includes a
printout that formats all the results and tests in a nice format.
The diagnostics for spatial dependence currently implemented are:
* Pinkse Error [1]_
* Kelejian and Prucha Moran's I [2]_
* Pinkse & Slade Error [3]_
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in Pinkse (2004)
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in Kelejian and Prucha (2001)
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in Pinkse and Slade (1998)
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
References
----------
.. [1] Pinkse, J. (2004). Moran-flavored tests with nuisance parameter. In: Anselin, L., Florax, R. J., Rey, S. J. (editors) Advances in Spatial Econometrics, pages 67-77. Springer-Verlag, Heidelberg.
.. [2] Kelejian, H., Prucha, I. (2001) "On the asymptotic distribution of the Moran I test statistic with applications". Journal of Econometrics, 104(2):219-57.
.. [3] Pinkse, J., Slade, M. E. (1998) "Contracting in space: an application of spatial statistics to discrete-choice models". Journal of Econometrics, 85(1):125-54.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the CRIME column (crime) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept. Since we want to run a probit model and for this
example we use the Columbus data, we also need to transform the continuous
CRIME variable into a binary variable. As in McMillen, D. (1992) "Probit with
spatial autocorrelation". Journal of Regional Science 32(3):335-48, we define
y = 1 if CRIME > 40.
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> y = (y>40).astype(float)
Extract HOVAL (home values) and INC (income) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> names_to_extract = ['INC', 'HOVAL']
>>> x = np.array([dbf.by_col(name) for name in names_to_extract]).T
Since we want to the test the probit model for spatial dependence, we need to
specify the spatial weights matrix that includes the spatial configuration of
the observations into the error component of the model. To do that, we can open
an already existing gal file or create a new one. In this case, we will use
``columbus.gal``, which contains contiguity relationships between the
observations in the Columbus dataset we are using throughout this example.
Note that, in order to read the file, not only to open it, we need to
append '.read()' at the end of the command.
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. In PySAL, this
can be easily performed in the following way:
>>> w.transform='r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = Probit(y, x, w=w, name_y='crime', name_x=['income','home value'], name_ds='columbus', name_w='columbus.gal')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them.
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 8.52814000e-01, -4.36270000e-02, -8.05200000e-03],
[ -4.36270000e-02, 4.11400000e-03, -1.93000000e-04],
[ -8.05200000e-03, -1.93000000e-04, 3.10000000e-04]])
Since we have provided a spatial weigths matrix, the diagnostics for
spatial dependence have also been computed. We can access them and their
p-values individually:
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
Or we can easily obtain a full summary of all the results nicely formatted and
ready to be printed simply by typing 'print model.summary'
"""
def __init__(self, y, x, w=None, optim='newton',scalem='phimean',maxiter=100,\
vm=False, name_y=None, name_x=None, name_w=None, name_ds=None, \
spat_diag=False):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
if w:
USER.check_weights(w, y)
spat_diag = True
x_constant = USER.check_constant(x)
BaseProbit.__init__(self, y=y, x=x_constant, w=w)
self.title = "CLASSIC PROBIT ESTIMATOR"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.Probit(reg=self, w=w, vm=vm, spat_diag=spat_diag)
def newton(flogl,start,fgrad,fhess,maxiter):
"""
Calculates the Newton-Raphson method
Parameters
----------
flogl : lambda
Function to calculate the log-likelihood
start : array
kx1 array of starting values
fgrad : lambda
Function to calculate the gradient
fhess : lambda
Function to calculate the hessian
maxiter : int
Maximum number of iterations until optimizer stops
"""
warn = 0
iteration = 0
par_hat0 = start
m = 1
while (iteration < maxiter and m>=1e-04):
H = -la.inv(fhess(par_hat0))
g = fgrad(par_hat0).reshape(start.shape)
Hg = np.dot(H,g)
par_hat0 = par_hat0 + Hg
iteration += 1
m = np.dot(g.T,Hg)
if iteration == maxiter:
warn = 1
logl = flogl(par_hat0)
return (par_hat0, logl, warn)
def sp_tests(reg):
"""
Calculates tests for spatial dependence in Probit models
Parameters
----------
reg : regression object
output instance from a probit model
"""
if reg.w:
w = reg.w.sparse
Phi = reg.predy
phi = reg.phiy
#Pinkse_error:
Phi_prod = Phi * (1 - Phi)
u_naive = reg.u_naive
u_gen = reg.u_gen
sig2 = np.sum((phi * phi) / Phi_prod) / reg.n
LM_err_num = np.dot(u_gen.T,(w * u_gen))**2
trWW = np.sum((w*w).diagonal())
trWWWWp = trWW + np.sum((w*w.T).diagonal())
LM_err = float(1.0 * LM_err_num / (sig2**2 * trWWWWp))
LM_err = np.array([LM_err,chisqprob(LM_err,1)])
#KP_error:
moran = moran_KP(reg.w,u_naive,Phi_prod)
#Pinkse-Slade_error:
u_std = u_naive / np.sqrt(Phi_prod)
ps_num = np.dot(u_std.T, (w * u_std))**2
trWpW = np.sum((w.T*w).diagonal())
ps = float(ps_num / (trWW + trWpW))
ps = np.array([ps,chisqprob(ps,1)]) #chi-square instead of bootstrap.
else:
raise Exception, "W matrix not provided to calculate spatial test."
return LM_err,moran,ps
def moran_KP(w,u,sig2i):
"""
Calculates Moran-flavoured tests
Parameters
----------
w : W
PySAL weights instance aligned with y
u : array
nx1 array of naive residuals
sig2i : array
nx1 array of individual variance
"""
w = w.sparse
moran_num = np.dot(u.T, (w * u))
E = SP.lil_matrix(w.get_shape())
E.setdiag(sig2i.flat)
E = E.asformat('csr')
WE = w*E
moran_den = np.sqrt(np.sum((WE*WE + (w.T*E)*WE).diagonal()))
moran = float(1.0*moran_num / moran_den)
moran = np.array([moran,norm.sf(abs(moran)) * 2.])
return moran
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
y = np.array([dbf.by_col('CRIME')]).T
var_x = ['INC', 'HOVAL']
x = np.array([dbf.by_col(name) for name in var_x]).T
w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
w.transform='r'
probit1 = Probit((y>40).astype(float), x, w=w, name_x=var_x, name_y="CRIME",\
name_ds="Columbus", name_w="columbus.dbf")
#print probit1.summary
|
AlanZatarain/pysal
|
pysal/spreg/probit.py
|
Python
|
bsd-3-clause
| 26,628
|
[
"COLUMBUS"
] |
0aa975c9573fca190039ef53298af1bdb8e794db9df94b2a3ae7d333c01a9f43
|
########################################################################
# $HeadURL$
# File: ReqProxyHandler.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/06/04 13:18:41
########################################################################
"""
:mod: RequestProxyHandler
.. module: ReqtProxyHandler
:synopsis: ReqProxy service
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
Careful with that axe, Eugene! Some 'transfer' requests are using local fs
and they never should be forwarded to the central RequestManager.
"""
__RCSID__ = "$Id$"
# #
# @file RequestProxyHandler.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2012/07/20 13:18:58
# @brief Definition of RequestProxyHandler class.
# # imports
import os
from types import DictType
try:
from hashlib import md5
except ImportError:
from md5 import md5
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
def initializeReqProxyHandler( serviceInfo ):
""" init RequestProxy handler
:param serviceInfo: whatever
"""
gLogger.info( "Initalizing ReqProxyHandler" )
gThreadScheduler.addPeriodicTask( 120, ReqProxyHandler.sweeper )
return S_OK()
########################################################################
class ReqProxyHandler( RequestHandler ):
"""
.. class:: ReqProxyHandler
:param RPCCLient requestManager: a RPCClient to RequestManager
:param str cacheDir: os.path.join( workDir, "requestCache" )
"""
__requestManager = None
__cacheDir = None
def initialize( self ):
""" service initialization
:param self: self reference
"""
gLogger.notice( "CacheDirectory: %s" % self.cacheDir() )
return S_OK()
@classmethod
def requestManager( cls ):
""" get request manager """
if not cls.__requestManager:
cls.__requestManager = RPCClient( "RequestManagement/ReqManager" )
return cls.__requestManager
@classmethod
def cacheDir( cls ):
""" get cache dir """
if not cls.__cacheDir:
cls.__cacheDir = os.path.abspath( "requestCache" )
if not os.path.exists( cls.__cacheDir ):
os.mkdir( cls.__cacheDir )
return cls.__cacheDir
@classmethod
def sweeper( cls ):
""" move cached request to the central request manager
:param self: self reference
"""
cacheDir = cls.cacheDir()
# # cache dir empty?
if not os.listdir( cacheDir ):
gLogger.always( "sweeper: CacheDir %s is empty, nothing to do" % cacheDir )
return S_OK()
else:
# # read 10 cache dir files, the oldest first
cachedRequests = [ os.path.abspath( requestFile ) for requestFile in
sorted( filter( os.path.isfile,
[ os.path.join( cacheDir, requestName )
for requestName in os.listdir( cacheDir ) ] ),
key = os.path.getctime ) ][:10]
# # set cached requests to the central RequestManager
for cachedFile in cachedRequests:
# # break if something went wrong last time
try:
requestString = "".join( open( cachedFile, "r" ).readlines() )
cachedRequest = eval( requestString )
cachedName = cachedRequest.get( "RequestName", "***UNKNOWN***" )
setRequest = cls.requestManager().putRequest( cachedRequest )
if not setRequest["OK"]:
gLogger.error( "sweeper: unable to set request %s @ ReqManager: %s" % ( cachedName,
setRequest["Message"] ) )
continue
gLogger.info( "sweeper: successfully set request '%s' @ ReqManager" % cachedName )
os.unlink( cachedFile )
except Exception, error:
gLogger.exception( "sweeper: hit by exception %s" % str( error ) )
return S_ERROR( "sweeper: hit by exception: %s" % str( error ) )
return S_OK()
def __saveRequest( self, requestName, requestJSON ):
""" save request string to the working dir cache
:param self: self reference
:param str requestName: request name
:param str requestJSON: request serialized to JSON format
"""
try:
requestFile = os.path.join( self.cacheDir(), md5( str( requestJSON ) ).hexdigest() )
request = open( requestFile, "w+" )
request.write( str( requestJSON ) )
request.close()
return S_OK( requestFile )
except OSError, error:
err = "unable to dump %s to cache file: %s" % ( requestName, str( error ) )
gLogger.exception( err )
return S_ERROR( err )
types_getStatus = []
def export_getStatus( self ):
""" get number of requests in cache """
try:
cachedRequests = len( os.listdir( self.cacheDir() ) )
except OSError, error:
err = "getStatus: unable to list cache dir contents: %s" % str( error )
gLogger.exception( err )
return S_ERROR( err )
return S_OK( cachedRequests )
types_putRequest = [ DictType ]
def export_putRequest( self, requestJSON ):
""" forward request from local RequestDB to central RequestManager
:param self: self reference
:param str requestType: request type
"""
requestName = requestJSON.get( "RequestName", "***UNKNOWN***" )
gLogger.info( "setRequest: got request '%s'" % requestName )
forwardable = self.__forwardable( requestJSON )
if not forwardable["OK"]:
gLogger.warn( "setRequest: %s" % forwardable["Message"] )
setRequest = self.requestManager().putRequest( requestJSON )
if not setRequest["OK"]:
gLogger.error( "setReqeuest: unable to set request '%s' @ RequestManager: %s" % ( requestName,
setRequest["Message"] ) )
# # put request to the request file cache
save = self.__saveRequest( requestName, requestJSON )
if not save["OK"]:
gLogger.error( "setRequest: unable to save request to the cache: %s" % save["Message"] )
return save
gLogger.info( "setRequest: %s is saved to %s file" % ( requestName, save["Value"] ) )
return S_OK( { "set" : False, "saved" : True } )
gLogger.info( "setRequest: request '%s' has been set to the ReqManager" % ( requestName ) )
return S_OK( { "set" : True, "saved" : False } )
@staticmethod
def __forwardable( requestJSON ):
""" check if request if forwardable
The sub-request of type transfer:putAndRegister, removal:physicalRemoval and removal:reTransfer are
definitely not, they should be executed locally, as they are using local fs.
:param str requestJSON: serialized request
"""
operations = requestJSON.get( "Operations", [] )
for operationDict in operations:
if operationDict.get( "Type", "" ) in ( "PutAndRegister", "PhysicalRemoval", "ReTransfer" ):
return S_ERROR( "found operation '%s' that cannot be forwarded" % operationDict.get( "Type", "" ) )
return S_OK()
|
Sbalbp/DIRAC
|
RequestManagementSystem/Service/ReqProxyHandler.py
|
Python
|
gpl-3.0
| 7,200
|
[
"DIRAC"
] |
0c43dd5820faae1234dcc0367e733243853d8371b266c53c8d6bb75849704705
|
import pytest
import psi4
pytestmark = [pytest.mark.quick]
def test_dft_grid_threaded_raise():
dimer = psi4.geometry("""
1 1
K -4.067042 -1.894214 0.002270
""")
psi4.set_options({
"dft_grid_name": "SG1",
"dft_vv10_radial_points": 50,
"dft_vv10_spherical_points": 194,
"dft_nuclear_scheme": "treutler",
"dft_radial_scheme": "EM",
"basis": "def2-TZVPPD",
})
with pytest.raises(RuntimeError) as e:
ene = psi4.energy("wB97M-V")
assert "There is no SG-1 grid defined for the requested atomic number" in str(e.value)
|
ashutoshvt/psi4
|
tests/pytests/test_raises.py
|
Python
|
lgpl-3.0
| 617
|
[
"Psi4"
] |
7e1e0b397238d4bb58534668c1db538678f427b87cc02c38db4817e26deccb5c
|
#!/usr/bin/env python -Es
"""
Script that creates bcbio-compatible inputs in case of multiple files samples
"""
import os
import yaml
from collections import defaultdict
from argparse import ArgumentParser
from cluster_helper import cluster as ipc
from bcbio import log
from bcbio.log import logger
from bcbio.install import _get_data_dir
from bcbio import utils
from bcbio.bam import is_bam
from bcbio.bam.fastq import is_fastq, combine_pairs
from bcbio.distributed.transaction import file_transaction
from bcbio.distributed import clargs, resources, prun
from bcbio.provenance import system, profile
def create_new_csv(samples, args):
"""create csv file that can be use with bcbio -w template"""
out_fn = os.path.splitext(args.csv)[0] + "-merged.csv"
logger.info("Preparing new csv: %s" % out_fn)
with file_transaction(out_fn) as tx_out:
with open(tx_out, 'w') as handle:
handle.write(_header(args.csv))
for s in samples:
sample_name = s['name'] if isinstance(s['out_file'], list) else os.path.basename(s['out_file'])
handle.write("%s,%s,%s\n" % (sample_name, s['name'], ",".join(s['anno'])))
def _header(fn):
"""read header of csv file"""
l = open(fn).readline()
return l
def _get_samples_to_process(fn, out_dir, config):
"""parse csv file with one line per file. It will merge
all files that have the same description name"""
samples = defaultdict(list)
with open(fn) as handle:
for l in handle:
cols = l.strip().split(",")
if len(cols) < 2:
raise ValueError("Line needs 2 values: file and name.")
if utils.file_exists(cols[0]):
samples[cols[1]].append(cols)
else:
logger.info("skipping %s, File doesn't exist." % cols[0])
for sample, items in samples.iteritems():
if is_fastq(items[0][0], True):
fn = "fq_merge"
ext = ".fastq.gz"
elif is_bam(items[0][0]):
fn = "bam_merge"
ext = ".bam"
files = [os.path.abspath(fn_file[0]) for fn_file in items]
samples[sample] = [{'files': _check_paired(files), 'out_file': os.path.join(out_dir, sample + ext), 'fn': fn, 'anno': items[0][2:], 'config': config, 'name': sample, 'out_dir': out_dir}]
return [samples[sample] for sample in samples]
def _check_paired(files):
"""check if files are fastq(.gz) and paired"""
if files[0].endswith(".bam"):
return files
return combine_pairs(files)
def get_cluster_view(p):
"""get ipython running"""
return ipc.cluster_view(p['scheduler'], p['queue'], p['num_jobs'], p['cores_per_job'], start_wait=p['timeout'], extra_params={"resources": p['resources'], "mem": p['mem'], "tag": p['tag'], "run_local": False})
def wait_until_complete(jobs):
"""wait jobs finish"""
return [j.get() for j in jobs]
if __name__ == "__main__":
description = ("Merge multiple files from the same sample to be compatible with bcbio BAM/FASTQ input files")
parser = ArgumentParser(description="Merge fastq or bam files")
parser.add_argument("--csv", required=True, help="csv file with metadata")
parser.add_argument("--out", required=True, help="output dir")
parser.add_argument("-n", "--numcores", type=int,
default=1, help="Number of concurrent jobs to process.")
parser.add_argument("-c", "--cores-per-job", type=int,
default=1, help="Number of cores to use.")
parser.add_argument("-m", "--memory-per-job", default=2, help="Memory in GB to reserve per job.")
parser.add_argument("--timeout", default=15, help="Time to wait before giving up starting.")
parser.add_argument("--retries", default=0, type=int,
help=("Number of retries of failed tasks during "
"distributed processing. Default 0 "
"(no retries)"))
parser.add_argument("-s", "--scheduler", help="Type of scheduler to use.",
choices=["lsf", "slurm", "torque", "sge", "pbspro"])
parser.add_argument("-r", "--resources", help="Extra scheduler resource flags.", default=[], action="append")
parser.add_argument("-q", "--queue", help="Queue to submit jobs to.")
parser.add_argument("-p", "--tag", help="Tag name to label jobs on the cluster", default="bcb-prep")
parser.add_argument("-t", "--paralleltype",
choices=["local", "ipython"],
default="local", help="Run with iptyhon")
args = parser.parse_args()
out_dir = os.path.abspath(args.out)
utils.safe_makedir(out_dir)
system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
with open(system_config) as in_handle:
config = yaml.load(in_handle)
res = {'cores': args.cores_per_job}
config["algorithm"] = {"num_cores": args.cores_per_job}
config["resources"].update({'sambamba': res,
'samtools': res})
config["log_dir"] = os.path.join(os.path.abspath(os.getcwd()), "log")
parallel = clargs.to_parallel(args)
parallel.update({'progs': ['samtools', 'sambamba']})
parallel = log.create_base_logger(config, parallel)
log.setup_local_logging(config, parallel)
dirs = {'work': os.path.abspath(os.getcwd())}
system.write_info(dirs, parallel, config)
sysinfo = system.machine_info()[0]
samples = _get_samples_to_process(args.csv, out_dir, config)
parallel = resources.calculate(parallel, [samples], sysinfo, config)
with prun.start(parallel, samples, config, dirs) as run_parallel:
with profile.report("prepare bcbio samples", dirs):
samples = run_parallel("prepare_bcbio_samples", samples)
create_new_csv(samples, args)
|
Cyberbio-Lab/bcbio-nextgen
|
scripts/bcbio_prepare_samples.py
|
Python
|
mit
| 5,871
|
[
"Galaxy"
] |
7073483c01cebd9addab9818db0a998cccda782611c03d5a0393353708a0dd81
|
"""
This module contains classes for representing ProblemStatistics object
For further information visit http://codeforces.com/api/help/objects#ProblemStatistics
"""
from . import BaseJsonObject
__all__ = ['ProblemStatistics']
class ProblemStatistics(BaseJsonObject):
"""
Represents a statistic data about a problem.
For further information visit http://codeforces.com/api/help/objects#ProblemStatistics
"""
def __init__(self, data=None):
self._contest_id = None
self._index = None
self._solved_count = None
super().__init__(data)
def __repr__(self):
return '<ProblemStatistics: {}/{}: {}>'.format(self.contest_id, self.index, self.solved_count)
def load_required_fields_from_dict(self, values):
super().load_required_fields_from_dict(values)
self.contest_id = values['contestId']
self.index = values['index']
self.solved_count = values['solvedCount']
@property
def contest_id(self):
"""
Id of the contest, containing the problem.
:return: Id or None if not initialized
:rtype: int
"""
return self._contest_id
@contest_id.setter
def contest_id(self, value):
"""
Id of the contest, containing the problem.
:param value: Id
:type value: int or str
"""
assert isinstance(value, (int, str))
self._contest_id = int(value)
@property
def index(self):
"""
Usually a letter or a letter, followed by a digit, that represent a problem index in a contest.
:return: Index or None if not initialized
:rtype: int
"""
return self._index
@index.setter
def index(self, value):
"""
Usually a letter or a letter, followed by a digit, that represent a problem index in a contest.
:param value: Index
:type value: str
"""
assert isinstance(value, str)
self._index = value
@property
def solved_count(self):
"""
:return: Number of users, who solved the problem or None if not initialized
:rtype: int
"""
return self._solved_count
@solved_count.setter
def solved_count(self, value):
"""
:param value: Number of users, who solved the problem.
:type value: int or str
"""
assert isinstance(value, (int, str))
self._solved_count = int(value)
|
soon/CodeforcesAPI
|
codeforces/api/json_objects/problem_statistics.py
|
Python
|
mit
| 2,497
|
[
"VisIt"
] |
24b7be8c94ff12b7c437638ffa3f065b4c5cc7a688597f1fe82449af7d0fb754
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Mar 19, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 19, 2012"
import os
import unittest
from pymatgen.entries.compatibility import MaterialsProjectCompatibility, \
MITCompatibility, AqueousCorrection, MITAqueousCompatibility, MaterialsProjectAqueousCompatibility
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from pymatgen import Composition, Lattice, Structure, Element
class MaterialsProjectCompatibilityTest(unittest.TestCase):
def setUp(self):
self.entry1 = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.entry2 = ComputedEntry(
'Fe3O4', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.entry3 = ComputedEntry(
'FeO', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 4.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
def test_process_entry(self):
compat = MaterialsProjectCompatibility()
ggacompat = MaterialsProjectCompatibility("GGA")
#Correct parameters
self.assertIsNotNone(compat.process_entry(self.entry1))
self.assertIsNone(ggacompat.process_entry(self.entry1))
#Correct parameters
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': False, "hubbards": {}, 'run_type': 'GGA',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
self.assertIsNotNone(ggacompat.process_entry(entry))
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNotNone(compat.process_entry(entry))
#Check actual correction
self.assertAlmostEqual(compat.process_entry(entry).correction,
- 2.733 * 2 - 0.70229 * 3)
entry = ComputedEntry(
'FeF3', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'F': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE F 08Apr2002']})
self.assertIsNotNone(compat.process_entry(entry))
#Check actual correction
self.assertAlmostEqual(compat.process_entry(entry).correction, -2.733)
#Wrong U value
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 5.2, 'O': 0}, 'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#GGA run of U
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': False, 'hubbards': None,
'run_type': 'GGA',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#GGA+U run of non-U
entry = ComputedEntry(
'Al2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Al': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Al 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#Materials project should not have a U for sulfides
entry = ComputedEntry(
'FeS2', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'S': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE S 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#Wrong psp
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#Testing processing of elements.
entry = ComputedEntry(
'O', -1, 0.0,
parameters={'is_hubbard': False, 'hubbards': {},
'potcar_symbols': ['PAW_PBE O 08Apr2002'],
'run_type': 'GGA'})
entry = compat.process_entry(entry)
# self.assertEqual(entry.entry_id, -8)
self.assertAlmostEqual(entry.energy, -1)
self.assertAlmostEqual(ggacompat.process_entry(entry).energy,
-1)
def test_get_corrections_dict(self):
compat = MaterialsProjectCompatibility()
ggacompat = MaterialsProjectCompatibility("GGA")
#Correct parameters
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
c = compat.get_corrections_dict(entry)
self.assertAlmostEqual(c["MP Gas Correction"], -2.10687)
self.assertAlmostEqual(c["MP Advanced Correction"], -5.466)
entry.parameters["is_hubbard"] = False
del entry.parameters["hubbards"]
c = ggacompat.get_corrections_dict(entry)
self.assertNotIn("MP Advanced Correction", c)
def test_process_entries(self):
compat = MaterialsProjectCompatibility()
entries = compat.process_entries([self.entry1, self.entry2,
self.entry3])
self.assertEqual(len(entries), 2)
class MITCompatibilityTest(unittest.TestCase):
def test_process_entry(self):
compat = MITCompatibility()
#Correct parameters
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNotNone(compat.process_entry(entry))
self.assertAlmostEqual(compat.process_entry(entry).correction,
- 1.723 * 2 -0.66975*3)
entry = ComputedEntry(
'FeF3', -2, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'F': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE F 08Apr2002']})
self.assertIsNotNone(compat.process_entry(entry))
#Check actual correction
self.assertAlmostEqual(compat.process_entry(entry).correction, -1.723)
#MIT should not have a U for sulfides
entry = ComputedEntry(
'FeS2', -2, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 1.9, 'S': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE S 08Apr2002']})
self.assertIsNotNone(compat.process_entry(entry))
self.assertAlmostEqual(compat.process_entry(entry).correction, -1.113)
#Wrong U value
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 5.2, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#GGA run
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#Wrong psp
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
#Testing processing of elements.
entry = ComputedEntry(
'O', -1, 0.0,
parameters={'is_hubbard': False, 'hubbards': {},
'potcar_symbols': ['PAW_PBE O 08Apr2002'],
'run_type': 'GGA'})
entry = compat.process_entry(entry)
self.assertAlmostEqual(entry.energy, -1)
class OxideTypeCorrectionTest(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility()
def test_no_struct_compat(self):
lio2_entry_nostruct = ComputedEntry(Composition("Li2O4"), -3,
data={"oxide_type": "superoxide"},
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
lio2_entry_corrected = self.compat.process_entry(lio2_entry_nostruct)
self.assertAlmostEqual(lio2_entry_corrected.energy, -3 - 0.13893*4, 4)
def test_process_entry_superoxide(self):
el_li = Element("Li")
el_o = Element("O")
latt = Lattice([[3.985034, 0.0, 0.0],
[0.0, 4.881506, 0.0],
[0.0, 0.0, 2.959824]])
elts = [el_li, el_li, el_o, el_o, el_o, el_o]
coords = list()
coords.append([0.500000, 0.500000, 0.500000])
coords.append([0.0, 0.0, 0.0])
coords.append([0.632568, 0.085090, 0.500000])
coords.append([0.367432, 0.914910, 0.500000])
coords.append([0.132568, 0.414910, 0.000000])
coords.append([0.867432, 0.585090, 0.000000])
struct = Structure(latt, elts, coords)
lio2_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
lio2_entry_corrected = self.compat.process_entry(lio2_entry)
self.assertAlmostEqual(lio2_entry_corrected.energy, -3 -0.13893*4, 4)
def test_process_entry_peroxide(self):
latt = Lattice.from_parameters(3.159597, 3.159572, 7.685205, 89.999884, 89.999674, 60.000510)
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_li, el_li, el_o, el_o, el_o, el_o]
coords = [[0.666656, 0.666705, 0.750001],
[0.333342, 0.333378, 0.250001],
[0.000001, 0.000041, 0.500001],
[0.000001, 0.000021, 0.000001],
[0.333347, 0.333332, 0.649191],
[0.333322, 0.333353, 0.850803],
[0.666666, 0.666686, 0.350813],
[0.666665, 0.666684, 0.149189]]
struct = Structure(latt, elts, coords)
li2o2_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
li2o2_entry_corrected = self.compat.process_entry(li2o2_entry)
self.assertAlmostEqual(li2o2_entry_corrected.energy, -3 - 0.44317 * 4, 4)
def test_process_entry_ozonide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_o, el_o, el_o]
latt = Lattice.from_parameters(3.999911, 3.999911, 3.999911,
133.847504, 102.228244, 95.477342)
coords = [[0.513004, 0.513004, 1.000000],
[0.017616, 0.017616, 0.000000],
[0.649993, 0.874790, 0.775203],
[0.099587, 0.874790, 0.224797]]
struct = Structure(latt, elts, coords)
lio3_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
lio3_entry_corrected = self.compat.process_entry(lio3_entry)
self.assertAlmostEqual(lio3_entry_corrected.energy, -3.0)
def test_process_entry_oxide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_o]
latt = Lattice.from_parameters(3.278, 3.278, 3.278,
60, 60, 60)
coords = [[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75],
[0.0, 0.0, 0.0]]
struct = Structure(latt, elts, coords)
li2o_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
li2o_entry_corrected = self.compat.process_entry(li2o_entry)
self.assertAlmostEqual(li2o_entry_corrected.energy, -3.0 -0.66975, 4)
class OxideTypeCorrectionNoPeroxideCorrTest(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility(correct_peroxide=False)
def test_oxide_energy_corr(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_o]
latt = Lattice.from_parameters(3.278, 3.278, 3.278,
60, 60, 60)
coords = [[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75],
[0.0, 0.0, 0.0]]
struct = Structure(latt, elts, coords)
li2o_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
li2o_entry_corrected = self.compat.process_entry(li2o_entry)
self.assertAlmostEqual(li2o_entry_corrected.energy, -3.0 -0.66975, 4)
def test_peroxide_energy_corr(self):
latt = Lattice.from_parameters(3.159597, 3.159572, 7.685205, 89.999884, 89.999674, 60.000510)
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_li, el_li, el_o, el_o, el_o, el_o]
coords = [[0.666656, 0.666705, 0.750001],
[0.333342, 0.333378, 0.250001],
[0.000001, 0.000041, 0.500001],
[0.000001, 0.000021, 0.000001],
[0.333347, 0.333332, 0.649191],
[0.333322, 0.333353, 0.850803],
[0.666666, 0.666686, 0.350813],
[0.666665, 0.666684, 0.149189]]
struct = Structure(latt, elts, coords)
li2o2_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
li2o2_entry_corrected = self.compat.process_entry(li2o2_entry)
self.assertRaises(AssertionError, self.assertAlmostEqual,
*(li2o2_entry_corrected.energy, -3 - 0.44317 * 4, 4))
self.assertAlmostEqual(li2o2_entry_corrected.energy, -3 - 0.66975 * 4, 4)
def test_ozonide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_o, el_o, el_o]
latt = Lattice.from_parameters(3.999911, 3.999911, 3.999911,
133.847504, 102.228244, 95.477342)
coords = [[0.513004, 0.513004, 1.000000],
[0.017616, 0.017616, 0.000000],
[0.649993, 0.874790, 0.775203],
[0.099587, 0.874790, 0.224797]]
struct = Structure(latt, elts, coords)
lio3_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
lio3_entry_corrected = self.compat.process_entry(lio3_entry)
self.assertAlmostEqual(lio3_entry_corrected.energy, -3.0 - 3 * 0.66975)
class AqueousCorrectionTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, os.path.pardir, "MITCompatibility.yaml")
self.corr = AqueousCorrection(fp)
def test_compound_energy(self):
O2_entry = self.corr.correct_entry(ComputedEntry(Composition("O2"),
-4.9355 * 2))
H2_entry = self.corr.correct_entry(ComputedEntry(Composition("H2"), 3))
H2O_entry = self.corr.correct_entry(ComputedEntry(Composition("H2O"), 3))
H2O_formation_energy = H2O_entry.energy - (H2_entry.energy +
O2_entry.energy / 2.0)
self.assertAlmostEqual(H2O_formation_energy, -2.46, 2)
entry = ComputedEntry(Composition("H2O"), -16)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -14.916, 4)
entry = ComputedEntry(Composition("H2O"), -24)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -14.916, 4)
entry = ComputedEntry(Composition("Cl"), -24)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -24.344373, 4)
class TestMITAqueousCompatibility(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility()
self.aqcompat = MITAqueousCompatibility()
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, os.path.pardir, "MITCompatibility.yaml")
self.aqcorr = AqueousCorrection(fp)
def test_aqueous_compat(self):
el_li = Element("Li")
el_o = Element("O")
el_h = Element("H")
latt = Lattice.from_parameters(3.565276, 3.565276, 4.384277, 90.000000, 90.000000, 90.000000)
elts = [el_h, el_h, el_li, el_li, el_o, el_o]
coords = [[0.000000, 0.500000, 0.413969],
[0.500000, 0.000000, 0.586031],
[0.000000, 0.000000, 0.000000],
[0.500000, 0.500000, 0.000000],
[0.000000, 0.500000, 0.192672],
[0.500000, 0.000000, 0.807328]]
struct = Structure(latt, elts, coords)
lioh_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 17Jan2003', 'PAW_PBE O 08Apr2002', 'PAW_PBE H 15Jun2001']})
lioh_entry_compat = self.compat.process_entry(lioh_entry)
lioh_entry_compat_aqcorr = self.aqcorr.correct_entry(lioh_entry_compat)
lioh_entry_aqcompat = self.aqcompat.process_entry(lioh_entry)
self.assertAlmostEqual(lioh_entry_compat_aqcorr.energy, lioh_entry_aqcompat.energy, 4)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
yanikou19/pymatgen
|
pymatgen/entries/tests/test_compatibility.py
|
Python
|
mit
| 22,208
|
[
"pymatgen"
] |
074f2c11b7011a33792977057d17e94bed5a33a4672d703859d64d730b1a9541
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from contextlib import contextmanager
from unittest.mock import patch
from odoo.tests.common import TransactionCase, HttpCase
from odoo import Command
class TransactionCaseWithUserDemo(TransactionCase):
def setUp(self):
super(TransactionCaseWithUserDemo, self).setUp()
self.env.ref('base.partner_admin').write({'name': 'Mitchell Admin'})
self.user_demo = self.env['res.users'].search([('login', '=', 'demo')])
self.partner_demo = self.user_demo.partner_id
if not self.user_demo:
self.env['ir.config_parameter'].sudo().set_param('auth_password_policy.minlength', 4)
# YTI TODO: This could be factorized between the different classes
self.partner_demo = self.env['res.partner'].create({
'name': 'Marc Demo',
'email': 'mark.brown23@example.com',
})
self.user_demo = self.env['res.users'].create({
'login': 'demo',
'password': 'demo',
'partner_id': self.partner_demo.id,
'groups_id': [Command.set([self.env.ref('base.group_user').id, self.env.ref('base.group_partner_manager').id])],
})
class HttpCaseWithUserDemo(HttpCase):
def setUp(self):
super(HttpCaseWithUserDemo, self).setUp()
self.user_admin = self.env.ref('base.user_admin')
self.user_admin.write({'name': 'Mitchell Admin'})
self.partner_admin = self.user_admin.partner_id
self.user_demo = self.env['res.users'].search([('login', '=', 'demo')])
self.partner_demo = self.user_demo.partner_id
if not self.user_demo:
self.env['ir.config_parameter'].sudo().set_param('auth_password_policy.minlength', 4)
self.partner_demo = self.env['res.partner'].create({
'name': 'Marc Demo',
'email': 'mark.brown23@example.com',
})
self.user_demo = self.env['res.users'].create({
'login': 'demo',
'password': 'demo',
'partner_id': self.partner_demo.id,
'groups_id': [Command.set([self.env.ref('base.group_user').id, self.env.ref('base.group_partner_manager').id])],
})
class SavepointCaseWithUserDemo(TransactionCase):
@classmethod
def setUpClass(cls):
super(SavepointCaseWithUserDemo, cls).setUpClass()
cls.user_demo = cls.env['res.users'].search([('login', '=', 'demo')])
cls.partner_demo = cls.user_demo.partner_id
if not cls.user_demo:
cls.env['ir.config_parameter'].sudo().set_param('auth_password_policy.minlength', 4)
cls.partner_demo = cls.env['res.partner'].create({
'name': 'Marc Demo',
'email': 'mark.brown23@example.com',
})
cls.user_demo = cls.env['res.users'].create({
'login': 'demo',
'password': 'demo',
'partner_id': cls.partner_demo.id,
'groups_id': [Command.set([cls.env.ref('base.group_user').id, cls.env.ref('base.group_partner_manager').id])],
})
@classmethod
def _load_partners_set(cls):
cls.partner_category = cls.env['res.partner.category'].create({
'name': 'Sellers',
'color': 2,
})
cls.partner_category_child_1 = cls.env['res.partner.category'].create({
'name': 'Office Supplies',
'parent_id': cls.partner_category.id,
})
cls.partner_category_child_2 = cls.env['res.partner.category'].create({
'name': 'Desk Manufacturers',
'parent_id': cls.partner_category.id,
})
# Load all the demo partners
cls.partners = cls.env['res.partner'].create([
{
'name': 'Inner Works', # Wood Corner
'state_id': cls.env.ref('base.state_us_1').id,
'category_id': [Command.set([cls.partner_category_child_1.id, cls.partner_category_child_2.id,])],
'child_ids': [Command.create({
'name': 'Sheila Ruiz', # 'Willie Burke',
}), Command.create({
'name': 'Wyatt Howard', # 'Ron Gibson',
}), Command.create({
'name': 'Austin Kennedy', # Tom Ruiz
})],
}, {
'name': 'Pepper Street', # 'Deco Addict',
'state_id': cls.env.ref('base.state_us_2').id,
'child_ids': [Command.create({
'name': 'Liam King', # 'Douglas Fletcher',
}), Command.create({
'name': 'Craig Richardson', # 'Floyd Steward',
}), Command.create({
'name': 'Adam Cox', # 'Addison Olson',
})],
}, {
'name': 'AnalytIQ', #'Gemini Furniture',
'state_id': cls.env.ref('base.state_us_3').id,
'child_ids': [Command.create({
'name': 'Pedro Boyd', # Edwin Hansen
}), Command.create({
'name': 'Landon Roberts', # 'Jesse Brown',
'company_id': cls.env.ref('base.main_company').id,
}), Command.create({
'name': 'Leona Shelton', # 'Soham Palmer',
}), Command.create({
'name': 'Scott Kim', # 'Oscar Morgan',
})],
}, {
'name': 'Urban Trends', # 'Ready Mat',
'state_id': cls.env.ref('base.state_us_4').id,
'category_id': [Command.set([cls.partner_category_child_1.id, cls.partner_category_child_2.id,])],
'child_ids': [Command.create({
'name': 'Louella Jacobs', # 'Billy Fox',
}), Command.create({
'name': 'Albert Alexander', # 'Kim Snyder',
}), Command.create({
'name': 'Brad Castillo', # 'Edith Sanchez',
}), Command.create({
'name': 'Sophie Montgomery', # 'Sandra Neal',
}), Command.create({
'name': 'Chloe Bates', # 'Julie Richards',
}), Command.create({
'name': 'Mason Crawford', # 'Travis Mendoza',
}), Command.create({
'name': 'Elsie Kennedy', # 'Theodore Gardner',
})],
}, {
'name': 'Ctrl-Alt-Fix', # 'The Jackson Group',
'state_id': cls.env.ref('base.state_us_5').id,
'child_ids': [Command.create({
'name': 'carole miller', # 'Toni Rhodes',
}), Command.create({
'name': 'Cecil Holmes', # 'Gordon Owens',
})],
}, {
'name': 'Ignitive Labs', # 'Azure Interior',
'state_id': cls.env.ref('base.state_us_6').id,
'child_ids': [Command.create({
'name': 'Jonathan Webb', # 'Brandon Freeman',
}), Command.create({
'name': 'Clinton Clark', # 'Nicole Ford',
}), Command.create({
'name': 'Howard Bryant', # 'Colleen Diaz',
})],
}, {
'name': 'Amber & Forge', # 'Lumber Inc',
'state_id': cls.env.ref('base.state_us_7').id,
'child_ids': [Command.create({
'name': 'Mark Webb', # 'Lorraine Douglas',
})],
}, {
'name': 'Rebecca Day', # 'Chester Reed',
'parent_id': cls.env.ref('base.main_partner').id,
}, {
'name': 'Gabriella Jennings', # 'Dwayne Newman',
'parent_id': cls.env.ref('base.main_partner').id,
}
])
class HttpCaseWithUserPortal(HttpCase):
def setUp(self):
super(HttpCaseWithUserPortal, self).setUp()
self.user_portal = self.env['res.users'].search([('login', '=', 'portal')])
self.partner_portal = self.user_portal.partner_id
if not self.user_portal:
self.env['ir.config_parameter'].sudo().set_param('auth_password_policy.minlength', 4)
self.partner_portal = self.env['res.partner'].create({
'name': 'Joel Willis',
'email': 'joel.willis63@example.com',
})
self.user_portal = self.env['res.users'].with_context(no_reset_password=True).create({
'login': 'portal',
'password': 'portal',
'partner_id': self.partner_portal.id,
'groups_id': [Command.set([self.env.ref('base.group_portal').id])],
})
class MockSmtplibCase:
"""Class which allows you to mock the smtplib feature, to be able to test in depth the
sending of emails. Unlike "MockEmail" which mocks mainly the <ir.mail_server> methods,
here we mainly mock the smtplib to be able to test the <ir.mail_server> model.
"""
@contextmanager
def mock_smtplib_connection(self):
self.emails = []
origin = self
class TestingSMTPSession:
"""SMTP session object returned during the testing.
So we do not connect to real SMTP server. Store the mail
server id used for the SMTP connection and other information.
Can be mocked for testing to know which with arguments the email was sent.
"""
def quit(self):
pass
def send_message(self, message, smtp_from, smtp_to_list):
origin.emails.append({
'smtp_from': smtp_from,
'smtp_to_list': smtp_to_list,
'message': message.as_string(),
'from_filter': self.from_filter,
})
def sendmail(self, smtp_from, smtp_to_list, message_str, mail_options):
origin.emails.append({
'smtp_from': smtp_from,
'smtp_to_list': smtp_to_list,
'message': message_str,
'from_filter': self.from_filter,
})
def set_debuglevel(self, smtp_debug):
pass
def ehlo_or_helo_if_needed(self):
pass
def login(self, user, password):
pass
self.testing_smtp_session = TestingSMTPSession()
IrMailServer = self.env['ir.mail_server']
connect = IrMailServer.connect
find_mail_server = IrMailServer._find_mail_server
with patch.object(type(IrMailServer), '_is_test_mode', lambda self: False), \
patch('smtplib.SMTP_SSL', side_effect=lambda *args, **kwargs: self.testing_smtp_session), \
patch('smtplib.SMTP', side_effect=lambda *args, **kwargs: self.testing_smtp_session), \
patch.object(type(IrMailServer), 'connect', side_effect=connect) as connect_mocked, \
patch.object(type(IrMailServer), '_find_mail_server', side_effect=find_mail_server) as find_mail_server_mocked:
self.connect_mocked = connect_mocked
self.find_mail_server_mocked = find_mail_server_mocked
yield
def assert_email_sent_smtp(self, smtp_from=None, smtp_to_list=None, message_from=None, from_filter=None, emails_count=1):
"""Check that the given email has been sent.
If one of the parameter is None, it's just ignored and not used to retrieve the email.
:param smtp_from: FROM used for the authentication to the mail server
:param smtp_to_list: List of destination email address
:param message_from: FROM used in the SMTP headers
:param from_filter: from_filter of the <ir.mail_server> used to send the email
Can use a lambda to check the value
:param emails_count: the number of emails which should match the condition
:return: True if at least one email has been found with those parameters
"""
matching_emails = filter(
lambda email:
(smtp_from is None or (
smtp_from(email['smtp_from'])
if callable(smtp_from)
else smtp_from == email['smtp_from'])
)
and (smtp_to_list is None or smtp_to_list == email['smtp_to_list'])
and (message_from is None or 'From: %s' % message_from in email['message'])
and (from_filter is None or from_filter == email['from_filter']),
self.emails,
)
matching_emails_count = len(list(matching_emails))
self.assertTrue(
matching_emails_count == emails_count,
msg='Emails not sent, %i emails match the condition but %i are expected' % (matching_emails_count, emails_count),
)
@classmethod
def _init_mail_servers(cls):
cls.env['ir.config_parameter'].sudo().set_param('mail.catchall.domain', 'test.com')
cls.env['ir.config_parameter'].sudo().set_param('mail.default.from', 'notifications')
cls.env['ir.config_parameter'].sudo().set_param('mail.bounce.alias', 'bounce')
cls.alias_bounce = 'bounce'
cls.alias_domain = 'test.com'
cls.env['ir.mail_server'].search([]).unlink()
ir_mail_server_values = {
'smtp_host': 'smtp_host',
'smtp_encryption': 'none',
}
(
cls.server_domain,
cls.server_user,
cls.server_notification,
cls.server_default,
) = cls.env['ir.mail_server'].create([
{
'name': 'Domain based server',
'from_filter': 'test.com',
** ir_mail_server_values,
}, {
'name': 'User specific server',
'from_filter': 'specific_user@test.com',
** ir_mail_server_values,
}, {
'name': 'Server Notifications',
'from_filter': 'notifications@test.com',
** ir_mail_server_values,
}, {
'name': 'Server No From Filter',
'from_filter': False,
** ir_mail_server_values,
},
])
|
jeremiahyan/odoo
|
odoo/addons/base/tests/common.py
|
Python
|
gpl-3.0
| 14,419
|
[
"Amber"
] |
b8f834c33a6d12376581cc3dbe7a03c7893db2aef902b4e8ad7acc2abd5a8bb3
|
import time
import numpy as np
import theano
import theano.tensor as T
import ops
def expressions_YTYpR_byY(Y, lambda_reg):
f = Y.shape[1] - 1 # f = number of factors
Y_e = T.set_subtensor(Y[:, f], T.ones((Y.shape[0],))) # factors with biases replaced by a column of ones.
YTY = T.dot(Y_e.T, Y_e)
R = T.eye(f + 1) # reguarization matrix
R = T.set_subtensor(R[f, f], 0.0) # don't regularize the biases
R *= lambda_reg
YTYpR = YTY + R
b_y = Y[:, f]
byY = T.dot(b_y, Y_e)
return YTYpR, byY
def batch_solve_expression(A_batch, B_batch):
Binv_batch = ops.batched_inv(B_batch)
R_batch = ops.batched_dot(A_batch.dimshuffle(0, 'x', 1), Binv_batch) # need to turn the A vectors into single-row matrices for this
return R_batch[:, 0, :] # get rid of spurious dimension
def batch_update_expression(batch_index, Y, indptr, indices, data, YTYpR, byY, batch_size):
m = indptr.shape[0] - 1 # m = number of users
f = Y.shape[1] - 1 # f = number of factors
lo = batch_index * batch_size
hi = T.minimum((batch_index + 1) * batch_size, m)
current_batch_size = hi - lo
lo_batch = indptr[lo]
hi_batch = indptr[hi] # hi - 1 + 1
i_batch = indices[lo_batch:hi_batch]
s_batch = data[lo_batch:hi_batch]
Y_batch = Y[i_batch]
b_y_batch = Y_batch[:, f]
Y_e_batch = T.set_subtensor(Y_batch[:, f], T.ones((Y_batch.shape[0],)))
# precompute the left hand side of the dot product for computing A for the entire batch.
a_lhs_batch = (1 - b_y_batch) * s_batch + 1
# also precompute the right hand side of the dot product for computing B for the entire batch.
b_rhs_batch = Y_e_batch * s_batch.dimshuffle(0, 'x')
# precompute the terms of A so all we have to do is sum a bunch of vectors in the scan iterations.
# in each iteration, A = T.dot(a_lhs_u, Y_u)
# so we can precompute A_terms_batch = a_lhs_batch * Y_e_batch and then A = A_terms_batch[lo_iter:hi_iter].sum(0)
A_terms_batch = a_lhs_batch.dimshuffle(0, 'x') * Y_e_batch
# scan iteration helper function that computes A and B for the current index
def fn(k):
lo_iter = indptr[k] - lo_batch
hi_iter = indptr[k + 1] - lo_batch
s_u = s_batch[lo_iter:hi_iter]
Y_u = Y_e_batch[lo_iter:hi_iter]
A_terms_u = A_terms_batch[lo_iter:hi_iter]
b_rhs_u = b_rhs_batch[lo_iter:hi_iter]
A = A_terms_u.sum(axis=0)
B = T.dot(Y_u.T, b_rhs_u)
return A, B
(A_batch, B_batch), dummy_updates = theano.scan(fn, sequences=T.arange(lo, hi), name='AB_iter')
A_batch -= byY.dimshuffle('x', 0)
B_batch += YTYpR.dimshuffle('x', 0, 1)
X_batch = batch_solve_expression(A_batch, B_batch)
return X_batch
def factorize(S, num_factors, batch_size, lambda_reg=1e-5, num_iterations=20, init_std=0.01, verbose=False):
"""
factorize a given sparse matrix using the Weighted Matrix Factorization algorithm by
Hu, Koren and Volinsky.
This is a GPU-only implementation in Theano. It does not run on the CPU because some
of the custom ops use scikits.cuda.
S: 'surplus' confidence matrix, i.e. C - I where C is the matrix with confidence weights.
S is sparse while C is not (and the sparsity pattern of S is the same as that of
the preference matrix, so this matrix doesn't need to be specified separately).
num_factors: the number of factors.
batch_size: size of the batches used for batched matrix inversion on the GPU. Make this
big enough to benefit from the speedup, but don't make it too big or you will run
out of memory.
lambda_reg: the value of the regularization constant.
num_iterations: the number of iterations to run the algorithm for. Each iteration consists
of two steps, one to recompute U given V, and one to recompute V given U.
init_std: the standard deviation of the Gaussian with which V is initialized.
verbose: print a bunch of stuff during training, including timing information.
returns:
U, V: factor matrices. If bias=True, the last columns of the matrices contain the biases.
"""
num_users, num_items = S.shape
if verbose:
print "precomputing transpose..."
ST = S.T.tocsr()
if verbose:
print "copying data to GPU..."
## define shared variables for everything that persists across the computation of a single batch
# input data
indptr = theano.shared(S.indptr.astype('int32'))
indices = theano.shared(S.indices.astype('int32'))
data = theano.shared(S.data.astype(theano.config.floatX))
indptr_t = theano.shared(ST.indptr.astype('int32'))
indices_t = theano.shared(ST.indices.astype('int32'))
data_t = theano.shared(ST.data.astype(theano.config.floatX))
# output (factors)
U = theano.shared(np.zeros((num_users, num_factors), dtype=theano.config.floatX)) # no need to initialize U randomly, it will be overwritten anyway
V = theano.shared(np.random.randn(num_items, num_factors).astype(theano.config.floatX) * init_std)
# things to precompute once per (half-)iteration (stays constant across batches)
UTUpR = theano.shared(np.zeros((num_factors, num_factors), dtype=theano.config.floatX)) # this is precomputed each iteration
buU = theano.shared(np.zeros((num_factors,), dtype=theano.config.floatX)) # this is precomputed each iteration
VTVpR = theano.shared(np.zeros((num_factors, num_factors), dtype=theano.config.floatX)) # this is precomputed each iteration
bvV = theano.shared(np.zeros((num_factors,), dtype=theano.config.floatX)) # this is precomputed each iteration
## symbolic batch index
batch_index = T.iscalar('batch_index')
if verbose:
print "compiling functions..."
## functions for precomputation per half-iteration
# for U (in terms of V)
new_VTVpR, new_bvV = expressions_YTYpR_byY(V, lambda_reg)
precompute_for_U = theano.function([], [], updates=[
(VTVpR, new_VTVpR),
(bvV, new_bvV),
])
# for V (in terms of U)
new_UTUpR, new_buU = expressions_YTYpR_byY(U, lambda_reg)
precompute_for_V = theano.function([], [], updates=[
(UTUpR, new_UTUpR),
(buU, new_buU),
])
## functions for batch updates
# for U (in terms of V)
lo_U = batch_index * batch_size
hi_U = T.minimum((batch_index + 1) * batch_size, num_users)
batch_update_expr_U = batch_update_expression(batch_index, V, indptr, indices, data, VTVpR, bvV, batch_size)
batch_update_U = theano.function([batch_index], [], updates=[
(U, T.set_subtensor(U[lo_U:hi_U], batch_update_expr_U)),
])
# for V (in terms of U)
lo_V = batch_index * batch_size
hi_V = T.minimum((batch_index + 1) * batch_size, num_items)
batch_update_expr_V = batch_update_expression(batch_index, U, indptr_t, indices_t, data_t, UTUpR, buU, batch_size)
batch_update_V = theano.function([batch_index], [], updates=[
(V, T.set_subtensor(V[lo_V:hi_V], batch_update_expr_V)),
])
## functions that perform half-iterations
num_batches_U = int(np.ceil(num_users / float(batch_size)))
num_batches_V = int(np.ceil(num_items / float(batch_size)))
def recompute_factors_U():
precompute_for_U()
for b in xrange(num_batches_U):
print b # DEBUG TODO
batch_update_U(b)
def recompute_factors_V():
precompute_for_V()
for b in xrange(num_batches_V):
print b # DEBUG TODO
batch_update_V(b)
if verbose:
print "running ALS algorithm"
start_time = time.time()
for i in xrange(num_iterations):
if verbose:
print "iteration %d" % i
print " recompute user factors U"
recompute_factors_U()
if verbose:
print " time since start: %.3f seconds" % (time.time() - start_time)
print " recompute item factors V"
recompute_factors_V()
if verbose:
print " time since start: %.3f seconds" % (time.time() - start_time)
return U.get_value(), V.get_value()
|
benanne/theano_wmf
|
theano_wmf.py
|
Python
|
mit
| 8,156
|
[
"Gaussian"
] |
fe85592313aeaf84c3b7a410cfa6c8bd5f805a85f51f5cd9a47bd70ab70d19ca
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-job-kill
# Author : Stuart Paterson
########################################################################
"""
Issue a kill signal to a running DIRAC job
Example:
$ dirac-wms-job-kill 1918
Killed job 1918
.. Note::
- jobs will not disappear from JobDB until JobCleaningAgent has deleted them
- jobs will be deleted "immediately" if they are in the status 'Deleted'
- USER jobs will be deleted after a grace period if they are in status Killed, Failed, Done
What happens when you hit the "kill job" button
- if the job is in status 'Running', 'Matched', 'Stalled' it will be properly killed, and then its
status will be marked as 'Killed'
- otherwise, it will be marked directly as 'Killed'.
"""
import DIRAC
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(["JobID: DIRAC Job ID"])
_, args = Script.parseCommandLine(ignoreErrors=True)
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
result = Dirac().killJob(parseArguments(args))
if result["OK"]:
print("Killed jobs %s" % ",".join([str(j) for j in result["Value"]]))
exitCode = 0
else:
print("ERROR", result["Message"])
exitCode = 2
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_wms_job_kill.py
|
Python
|
gpl-3.0
| 1,520
|
[
"DIRAC"
] |
2df944cf9d98e51521b85b437f6f8eb2d6baaf8d663d89a468ea9f155ddc2719
|
# -*- coding: utf-8 -*-
u"""SRW execution template.
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcompat
from pykern import pkinspect
from pykern import pkio
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp
from sirepo import crystal
from sirepo import job
from sirepo import simulation_db
from sirepo.template import srw_common
from sirepo.template import template_common
import copy
import glob
import math
import numpy as np
import os
import py.path
import pykern.pkjson
import re
import sirepo.mpi
import sirepo.sim_data
import sirepo.template.srw_fixup
import sirepo.uri_router
import sirepo.util
import srwlib
import time
import traceback
import uti_io
import uti_math
import uti_plot_com
import werkzeug
import zipfile
_SIM_DATA, SIM_TYPE, _SCHEMA = sirepo.sim_data.template_globals()
WANT_BROWSER_FRAME_CACHE = False
PARSED_DATA_ATTR = 'srwParsedData'
_CANVAS_MAX_SIZE = 65535
_BRILLIANCE_OUTPUT_FILE = 'res_brilliance.dat'
_MIRROR_OUTPUT_FILE = 'res_mirror.dat'
_DATA_FILE_FOR_MODEL = PKDict({
'coherenceXAnimation': {'filename': 'res_int_pr_me_dcx.dat', 'dimension': 3},
'coherenceYAnimation': {'filename': 'res_int_pr_me_dcy.dat', 'dimension': 3},
'fluxAnimation': {'filename': 'res_spec_me.dat', 'dimension': 2},
'fluxReport': {'filename': 'res_spec_me.dat', 'dimension': 2},
'initialIntensityReport': {'filename': 'res_int_se.dat', 'dimension': 3},
'intensityReport': {'filename': 'res_spec_se.dat', 'dimension': 2},
'mirrorReport': {'filename': _MIRROR_OUTPUT_FILE, 'dimension': 3},
'multiElectronAnimation': {'filename': 'res_int_pr_me.dat', 'dimension': 3},
'powerDensityReport': {'filename': 'res_pow.dat', 'dimension': 3},
'sourceIntensityReport': {'filename': 'res_int_se.dat', 'dimension': 3},
'brillianceReport': {'filename': _BRILLIANCE_OUTPUT_FILE, 'dimension': 2},
'trajectoryReport': {'filename': 'res_trj.dat', 'dimension': 2},
'beamline3DReport': {'filename': 'beamline_orient.dat', 'dimension': 2},
_SIM_DATA.WATCHPOINT_REPORT: {'filename': 'res_int_pr_se.dat', 'dimension': 3},
})
_LOG_DIR = '__srwl_logs__'
_JSON_MESSAGE_EXPANSION = 20
_RSOPT_PARAMS = {
i for sublist in [v for v in [list(_SCHEMA.constants.rsOptElements[k].keys()) for
k in _SCHEMA.constants.rsOptElements]] for i in sublist
}
_TABULATED_UNDULATOR_DATA_DIR = 'tabulatedUndulator'
_USER_MODEL_LIST_FILENAME = PKDict({
'electronBeam': '_user_beam_list.json',
'tabulatedUndulator': '_user_undulator_list.json',
})
_IMPORT_PYTHON_POLLS = 60
class MagnMeasZip:
def __init__(self, archive_name):
"""The class for convenient operation with an archive with the magnetic measurements.
Args:
archive_name: the name of the archive.
"""
self.z = zipfile.ZipFile(archive_name)
self.index_dir = None
self.index_file = None
self.gaps = None
self.dat_files = None
self._find_index_file()
self._find_dat_files_from_index_file()
def find_closest_gap(self, gap):
gap = float(gap)
indices_previous = []
indices_next = []
for i in range(len(self.gaps)):
if self.gaps[i] <= gap:
indices_previous.append(i)
else:
indices_next.append(i)
assert indices_previous or indices_next
idx_previous = indices_previous[-1] if indices_previous else indices_next[0]
idx_next = indices_next[0] if indices_next else indices_previous[-1]
idx = idx_previous if abs(self.gaps[idx_previous] - gap) <= abs(self.gaps[idx_next] - gap) else idx_next
dat_file = self.dat_files[idx]
dat_content = self._get_file_content(dat_file)
dat_file_step = float(dat_content[8].split('#')[1].strip())
dat_file_number_of_points = int(dat_content[9].split('#')[1].strip())
return round(dat_file_step * dat_file_number_of_points, 6)
def _find_dat_files_from_index_file(self):
self.gaps = []
self.dat_files = []
for row in self._get_file_content(self.index_file):
v = row.strip()
if v:
v = v.split()
self.gaps.append(float(v[0]))
self.dat_files.append(v[3])
def _find_index_file(self):
# finds an index file (``*.txt``) in the provided zip-object.
for f in self.z.namelist():
if re.search(r'\.txt', f):
self.index_file = os.path.basename(f)
self.index_dir = os.path.dirname(f)
break
assert self.index_file is not None
def _get_file_content(self, file_name):
with self.z.open(os.path.join(self.index_dir, file_name)) as f:
return self._normalize_eol(f)
def _normalize_eol(self, file_desc):
s = file_desc.read().decode().replace('\r\n', '\n').replace('\r', '\n')
content = s.split('\n')
return content
def background_percent_complete(report, run_dir, is_running):
res = PKDict({
'percentComplete': 0,
'frameCount': 0,
})
filename = run_dir.join(get_filename_for_model(report))
if filename.exists():
status = PKDict({
'progress': 100,
'particle_number': 0,
'total_num_of_particles': 0,
})
status_files = pkio.sorted_glob(run_dir.join(_LOG_DIR, 'srwl_*.json'))
if status_files: # Read the status file if SRW produces the multi-e logs
progress_file = py.path.local(status_files[-1])
if progress_file.exists():
status = simulation_db.read_json(progress_file)
t = int(filename.mtime())
if not is_running and report == 'fluxAnimation':
# let the client know which flux method was used for the output
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
res['method'] = data['models']['fluxAnimation']['method']
if report == 'multiElectronAnimation':
# let client know that degree of coherence reports are also available
res['calcCoherence'] = run_dir.join(get_filename_for_model('coherenceXAnimation')).exists()
res.update({
'frameCount': t + 1,
'frameIndex': t,
'lastUpdateTime': t,
'percentComplete': status['progress'],
'particleNumber': status['particle_number'],
'particleCount': status['total_num_of_particles'],
})
return res
def calculate_beam_drift(ebeam_position, source_type, undulator_type, undulator_length, undulator_period):
if ebeam_position['driftCalculationMethod'] == 'auto':
"""Calculate drift for ideal undulator."""
if _SIM_DATA.srw_is_idealized_undulator(source_type, undulator_type):
# initial drift = 1/2 undulator length + 2 periods
return -0.5 * float(undulator_length) - 2 * float(undulator_period)
return 0
return ebeam_position['drift']
def compute_crl_focus(model):
import bnlcrl.pkcli.simulate
d = bnlcrl.pkcli.simulate.calc_ideal_focus(
radius=float(model['tipRadius']) * 1e-6, # um -> m
n=model['numberOfLenses'],
delta=model['refractiveIndex'],
p0=model['position']
)
model['focalDistance'] = d['ideal_focus']
model['absoluteFocusPosition'] = d['p1_ideal_from_source']
return model
def compute_undulator_length(model):
if model['undulatorType'] == 'u_i':
return PKDict()
if _SIM_DATA.lib_file_exists(model['magneticFile']):
z = _SIM_DATA.lib_file_abspath(model['magneticFile'])
return PKDict(
length=_SIM_DATA.srw_format_float(
MagnMeasZip(str(z)).find_closest_gap(model['gap']),
),
)
return PKDict()
def copy_related_files(data, source_path, target_path):
# copy results and log for the long-running simulations
for d in ('fluxAnimation', 'multiElectronAnimation'):
source_dir = py.path.local(source_path).join(d)
if source_dir.exists():
target_dir = py.path.local(target_path).join(d)
pkio.mkdir_parent(str(target_dir))
for f in glob.glob(str(source_dir.join('*'))):
name = os.path.basename(f)
if re.search(r'^res.*\.dat$', name) or re.search(r'\.json$', name):
py.path.local(f).copy(target_dir)
source_log_dir = source_dir.join(_LOG_DIR)
if source_log_dir.exists():
target_log_dir = target_dir.join(_LOG_DIR)
pkio.mkdir_parent(str(target_log_dir))
for f in glob.glob(str(source_log_dir.join('*.json'))):
py.path.local(f).copy(target_log_dir)
def clean_run_dir(run_dir):
zip_dir = run_dir.join(_TABULATED_UNDULATOR_DATA_DIR)
if zip_dir.exists():
zip_dir.remove()
def extract_report_data(filename, sim_in):
import uti_plot_com
r = sim_in.report
m = sim_in.models
# special case for 3d beamline report
if r == 'beamline3DReport':
return _extract_beamline_orientation(filename)
#TODO(pjm): remove fixup after dcx/dcy files can be read by uti_plot_com
if re.search(r'/res_int_pr_me_dc.\.dat', filename):
_fix_file_header(filename)
data, _, allrange, _, _ = uti_plot_com.file_load(filename, multicolumn_data=r in ('brillianceReport', 'trajectoryReport'))
if r == 'brillianceReport':
return _extract_brilliance_report(m['brillianceReport'], data)
if r == 'trajectoryReport':
return _extract_trajectory_report(m['trajectoryReport'], data)
flux_type = 1
if 'report' in sim_in and r in ['fluxReport', 'fluxAnimation']:
flux_type = int(m[r]['fluxType'])
sValShort = 'Flux'; sValType = 'Flux through Finite Aperture'; sValUnit = 'ph/s/.1%bw'
if flux_type == 2:
sValShort = 'Intensity'
sValUnit = 'ph/s/.1%bw/mm^2'
is_gaussian = False
if 'models' in sim_in and _SIM_DATA.srw_is_gaussian_source(m['simulation']):
is_gaussian = True
#TODO(pjm): move filename and metadata to a constant, using _DATA_FILE_FOR_MODEL
if r == 'initialIntensityReport':
before_propagation_name = 'Before Propagation (E={photonEnergy} eV)'
elif r == 'sourceIntensityReport':
before_propagation_name = 'E={sourcePhotonEnergy} eV'
else:
before_propagation_name = 'E={photonEnergy} eV'
file_info = PKDict({
'res_spec_se.dat': [['Photon Energy', 'Intensity', 'On-Axis Spectrum from Filament Electron Beam'], ['eV', _intensity_units(is_gaussian, sim_in)]],
'res_spec_me.dat': [['Photon Energy', sValShort, sValType], ['eV', sValUnit]],
'res_pow.dat': [['Horizontal Position', 'Vertical Position', 'Power Density', 'Power Density'], ['m', 'm', 'W/mm^2']],
'res_int_se.dat': [['Horizontal Position', 'Vertical Position', before_propagation_name, 'Intensity'], ['m', 'm', _intensity_units(is_gaussian, sim_in)]],
#TODO(pjm): improve multi-electron label
'res_int_pr_me.dat': [['Horizontal Position', 'Vertical Position', before_propagation_name, 'Intensity'], ['m', 'm', _intensity_units(is_gaussian, sim_in)]],
'res_int_pr_me_dcx.dat': [['(X1 + X2) / 2', '(X1 - X2) / 2', '', 'Degree of Coherence'], ['m', 'm', '']],
'res_int_pr_me_dcy.dat': [['(Y1 + Y2) / 2', '(Y1 - Y2) / 2', '', 'Degree of Coherence'], ['m', 'm', '']],
'res_int_pr_se.dat': [['Horizontal Position', 'Vertical Position', 'After Propagation (E={photonEnergy} eV)', 'Intensity'], ['m', 'm', _intensity_units(is_gaussian, sim_in)]],
_MIRROR_OUTPUT_FILE: [['Horizontal Position', 'Vertical Position', 'Optical Path Difference', 'Optical Path Difference'], ['m', 'm', 'm']],
})
filename = os.path.basename(filename)
title = file_info[filename][0][2]
if '{photonEnergy}' in title:
title = title.format(photonEnergy=m['simulation']['photonEnergy'])
elif '{sourcePhotonEnergy}' in title:
title = title.format(sourcePhotonEnergy=m['sourceIntensityReport']['photonEnergy'])
y_units = file_info[filename][1][1]
if y_units == 'm':
y_units = '[m]'
else:
y_units = '({})'.format(y_units)
subtitle = ''
schema_enum = []
report_model = m[r]
subtitle_datum = ''
subtitle_format = '{}'
if r in ('intensityReport',):
schema_enum = _SCHEMA['enum']['Polarization']
subtitle_datum = report_model['polarization']
subtitle_format = '{} Polarization'
elif r in ('initialIntensityReport', 'sourceIntensityReport') or _SIM_DATA.is_watchpoint(r):
schema_enum = _SCHEMA['enum']['Characteristic']
subtitle_datum = report_model['characteristic']
# Schema enums are indexed by strings, but model data may be numeric
schema_values = [e for e in schema_enum if e[0] == str(subtitle_datum)]
if len(schema_values) > 0:
subtitle = subtitle_format.format(schema_values[0][1])
info = PKDict({
'title': title,
'subtitle': subtitle,
'x_range': [allrange[0], allrange[1]],
'y_label': _superscript(file_info[filename][0][1] + ' ' + y_units),
'x_label': file_info[filename][0][0] + ' [' + file_info[filename][1][0] + ']',
'x_units': file_info[filename][1][0],
'y_units': file_info[filename][1][1],
'points': data,
'z_range' : [np.min(data), np.max(data)],
# send the full plot ranges as summaryData
'summaryData': PKDict(
fieldRange=allrange,
fieldIntensityRange=report_model.get('summaryData', {}).get('fieldIntensityRange', [np.min(data), np.max(data)]),
),
})
rep_name = _SIM_DATA.WATCHPOINT_REPORT if _SIM_DATA.is_watchpoint(r) else r
if _DATA_FILE_FOR_MODEL[rep_name]['dimension'] == 3:
info = _remap_3d(info, allrange, file_info[filename][0][3], file_info[filename][1][2], report_model)
return info
def export_rsopt_config(data, filename):
v = _rsopt_jinja_context(data.models.exportRsOpt)
fz = pkio.py_path(filename)
f = re.sub(r'[^\w\.]+', '-', fz.purebasename).strip('-')
v.runDir = f'{f}_scan'
v.fileBase = f
tf = {k: PKDict(file=f'{f}.{k}') for k in ['py', 'sh', 'yml']}
for t in tf:
v[f'{t}FileName'] = tf[t].file
v['outFileName'] = f'{f}.out'
# do this in a second loop so v is fully updated
# note that the rsopt context is regenerated in python_source_for_model()
for t in tf:
tf[t].content = python_source_for_model(data, 'rsoptExport', plot_reports=False) \
if t == 'py' else \
template_common.render_jinja(SIM_TYPE, v, f'rsoptExport.{t}')
with zipfile.ZipFile(
fz,
mode='w',
compression=zipfile.ZIP_DEFLATED,
allowZip64=True,
) as z:
for t in tf:
z.writestr(tf[t].file, tf[t].content)
for d in _SIM_DATA.lib_files_for_export(data):
z.write(d, d.basename)
return fz
def get_application_data(data, **kwargs):
if data['method'] == 'model_list':
res = []
model_name = data['model_name']
if model_name == 'electronBeam':
res.extend(get_predefined_beams())
res.extend(_load_user_model_list(model_name))
if model_name == 'electronBeam':
for beam in res:
srw_common.process_beam_parameters(beam)
return PKDict({
'modelList': res
})
if data.method == 'create_shadow_simulation':
from sirepo.template.srw_shadow_converter import SRWShadowConverter
return SRWShadowConverter().srw_to_shadow(data)
if data['method'] == 'delete_user_models':
return _delete_user_models(data['electron_beam'], data['tabulated_undulator'])
# TODO(e-carlin): This doesn't seem to be used in GUI? Discuss with pjm
# elif data['method'] == 'compute_grating_orientation':
# return _compute_grating_orientation(data['optical_element'])
elif data['method'] == 'compute_undulator_length':
return compute_undulator_length(data['tabulated_undulator'])
elif data['method'] == 'processedImage':
try:
return _process_image(data, kwargs['tmp_dir'])
except Exception as e:
pkdlog('exception during processedImage: {}', pkdexc())
return PKDict(
error=str(e),
)
raise RuntimeError('unknown application data method: {}'.format(data['method']))
def get_data_file(run_dir, model, frame, **kwargs):
return get_filename_for_model(model)
def get_filename_for_model(model):
if _SIM_DATA.is_watchpoint(model):
model = _SIM_DATA.WATCHPOINT_REPORT
return _DATA_FILE_FOR_MODEL[model]['filename']
def get_predefined_beams():
return _SIM_DATA.srw_predefined().beams
def sim_frame(frame_args):
r = frame_args.frameReport
if r == 'multiElectronAnimation':
m = frame_args.sim_in.models[r]
m.intensityPlotsWidth = frame_args.intensityPlotsWidth
if frame_args.get('rotateAngle', 0):
m.rotateAngle = float(frame_args.rotateAngle)
m.rotateReshape = frame_args.rotateReshape
else:
m.rotateAngle = 0
for i in (1, 2, 3):
try:
return extract_report_data(
str(frame_args.run_dir.join(get_filename_for_model(r))),
frame_args.sim_in,
)
except Exception:
# sleep and retry to work-around concurrent file read/write
pkdlog('sleep and retry simulation frame read: {} {}', i, r)
time.sleep(2)
return extract_report_data(
str(frame_args.run_dir.join(get_filename_for_model(r))),
frame_args.sim_in,
)
def import_file(req, tmp_dir, **kwargs):
import sirepo.server
i = None
try:
r = kwargs['reply_op'](simulation_db.default_data(SIM_TYPE))
d = pykern.pkjson.load_any(r.data)
i = d.models.simulation.simulationId
b = d.models.backgroundImport = PKDict(
arguments=req.import_file_arguments,
python=pkcompat.from_bytes(req.file_stream.read()),
userFilename=req.filename,
)
# POSIT: import.py uses ''', but we just don't allow quotes in names
if "'" in b.arguments:
raise sirepo.util.UserAlert('arguments may not contain quotes')
if "'" in b.userFilename:
raise sirepo.util.UserAlert('filename may not contain quotes')
d.pkupdate(
report='backgroundImport',
forceRun=True,
simulationId=i,
)
r = sirepo.uri_router.call_api('runSimulation', data=d)
for _ in range(_IMPORT_PYTHON_POLLS):
if r.status_code != 200:
raise sirepo.util.UserAlert(
'error parsing python',
'unexpected response status={} data={}',
r.status_code,
r.data,
)
try:
r = pykern.pkjson.load_any(r.data)
except Exception as e:
raise sirepo.util.UserAlert(
'error parsing python',
'error={} parsing response data={}',
e,
r.data,
)
if 'error' in r:
pkdc('runSimulation error msg={}', r)
raise sirepo.util.UserAlert(r.get('error'))
if PARSED_DATA_ATTR in r:
break
if 'nextRequest' not in r:
raise sirepo.util.UserAlert(
'error parsing python',
'unable to find nextRequest in response={}',
PARSED_DATA_ATTR,
r,
)
time.sleep(r.nextRequestSeconds)
r = sirepo.uri_router.call_api('runStatus', data=r.nextRequest)
else:
raise sirepo.util.UserAlert(
'error parsing python',
'polled too many times, last response={}',
r,
)
r = r.get(PARSED_DATA_ATTR)
r.models.simulation.simulationId = i
r = simulation_db.save_simulation_json(r, do_validate=True)
except Exception:
#TODO(robnagler) need to clean up simulations except in dev
raise
if i:
try:
simulation_db.delete_simulation(req.type, i)
except Exception:
pass
raise
raise sirepo.util.Response(sirepo.server.api_simulationData(r.simulationType, i, pretty=False))
def new_simulation(data, new_simulation_data):
sim = data['models']['simulation']
sim['sourceType'] = new_simulation_data['sourceType']
if _SIM_DATA.srw_is_gaussian_source(sim):
data['models']['initialIntensityReport']['sampleFactor'] = 0
elif _SIM_DATA.srw_is_dipole_source(sim):
data['models']['intensityReport']['method'] = "2"
elif _SIM_DATA.srw_is_arbitrary_source(sim):
data['models']['sourceIntensityReport']['method'] = "2"
elif _SIM_DATA.srw_is_tabulated_undulator_source(sim):
data['models']['undulator']['length'] = compute_undulator_length(data['models']['tabulatedUndulator'])['length']
data['models']['electronBeamPosition']['driftCalculationMethod'] = 'manual'
def prepare_for_client(data):
save = False
for model_name in _USER_MODEL_LIST_FILENAME.keys():
if model_name == 'tabulatedUndulator' and not _SIM_DATA.srw_is_tabulated_undulator_source(data['models']['simulation']):
# don't add a named undulator if tabulated is not the current source type
continue
model = data['models'][model_name]
if _SIM_DATA.srw_is_user_defined_model(model):
user_model_list = _load_user_model_list(model_name)
search_model = None
models_by_id = _user_model_map(user_model_list, 'id')
if 'id' in model and model['id'] in models_by_id:
search_model = models_by_id[model['id']]
if search_model:
data['models'][model_name] = search_model
if model_name == 'tabulatedUndulator':
del data['models'][model_name]['undulator']
else:
pkdc('adding model: {}', model['name'])
if model['name'] in _user_model_map(user_model_list, 'name'):
model['name'] = _unique_name(user_model_list, 'name', model['name'] + ' {}')
selectorName = 'beamSelector' if model_name == 'electronBeam' else 'undulatorSelector'
model[selectorName] = model['name']
model['id'] = _unique_name(user_model_list, 'id', data['models']['simulation']['simulationId'] + ' {}')
user_model_list.append(_create_user_model(data, model_name))
_save_user_model_list(model_name, user_model_list)
save = True
if save:
pkdc("save simulation json with sim_data_template_fixup={}", data.get('sim_data_template_fixup', None))
simulation_db.save_simulation_json(data)
return data
def prepare_for_save(data):
for model_name in _USER_MODEL_LIST_FILENAME.keys():
if model_name == 'tabulatedUndulator' and not _SIM_DATA.srw_is_tabulated_undulator_source(data['models']['simulation']):
# don't add a named undulator if tabulated is not the current source type
continue
model = data['models'][model_name]
if _SIM_DATA.srw_is_user_defined_model(model):
user_model_list = _load_user_model_list(model_name)
models_by_id = _user_model_map(user_model_list, 'id')
if model['id'] not in models_by_id:
pkdc('adding new model: {}', model['name'])
user_model_list.append(_create_user_model(data, model_name))
_save_user_model_list(model_name, user_model_list)
elif models_by_id[model['id']] != model:
pkdc('replacing beam: {}: {}', model['id'], model['name'])
for i,m in enumerate(user_model_list):
if m['id'] == model['id']:
pkdc('found replace beam, id: {}, i: {}', m['id'], i)
user_model_list[i] = _create_user_model(data, model_name)
_save_user_model_list(model_name, user_model_list)
break
return data
def prepare_sequential_output_file(run_dir, sim_in):
m = sim_in.report
if m in ('brillianceReport', 'mirrorReport'):
return
#TODO(pjm): only need to rerun extract_report_data() if report style fields have changed
fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
if fn.exists():
fn.remove()
output_file = run_dir.join(get_filename_for_model(m))
if output_file.exists():
res = extract_report_data(str(output_file), sim_in)
template_common.write_sequential_result(res, run_dir=run_dir)
def process_undulator_definition(model):
"""Convert K -> B and B -> K."""
try:
if model['undulator_definition'] == 'B':
# Convert B -> K:
und = srwlib.SRWLMagFldU([srwlib.SRWLMagFldH(1, 'v', float(model['amplitude']), 0, 1)], float(model['undulator_period']))
model['undulator_parameter'] = _SIM_DATA.srw_format_float(und.get_K())
elif model['undulator_definition'] == 'K':
# Convert K to B:
und = srwlib.SRWLMagFldU([], float(model['undulator_period']))
model['amplitude'] = _SIM_DATA.srw_format_float(
und.K_2_B(float(model['undulator_parameter'])),
)
return model
except Exception:
return model
def python_source_for_model(data, model, plot_reports=True):
data['report'] = model or _SIM_DATA.SRW_RUN_ALL_MODEL
return _generate_parameters_file(data, plot_reports=plot_reports)
def remove_last_frame(run_dir):
pass
def stateless_compute_compute_PGM_value(data):
return _compute_PGM_value(data.optical_element)
def stateless_compute_compute_crl_characteristics(data):
return compute_crl_focus(_compute_material_characteristics(
data.optical_element,
data.photon_energy,
))
def stateless_compute_compute_crystal_init(data):
return _compute_crystal_init(data.optical_element)
def stateless_compute_compute_crystal_orientation(data):
return _compute_crystal_orientation(data.optical_element)
def stateless_compute_compute_delta_atten_characteristics(data):
return _compute_material_characteristics(
data.optical_element,
data.photon_energy,
)
def stateless_compute_compute_dual_characteristics(data):
return _compute_material_characteristics(
_compute_material_characteristics(
data.optical_element,
data.photon_energy,
prefix=data.prefix1,
),
data.photon_energy,
prefix=data.prefix2,
)
def stateless_compute_compute_grazing_orientation(data):
return _compute_grazing_orientation(data.optical_element)
def stateless_compute_process_beam_parameters(data):
data.ebeam = srw_common.process_beam_parameters(data.ebeam)
data.ebeam.drift = calculate_beam_drift(
data.ebeam_position,
data.source_type,
data.undulator_type,
data.undulator_length,
data.undulator_period,
)
return data.ebeam
def stateless_compute_process_undulator_definition(data):
return process_undulator_definition(data)
def validate_file(file_type, path):
"""Ensure the data file contains parseable rows data"""
import srwl_uti_smp
if not _SIM_DATA.srw_is_valid_file_type(file_type, path):
return 'invalid file type: {}'.format(path.ext)
if file_type == 'mirror':
# mirror file
try:
count = 0
with open(str(path)) as f:
for line in f.readlines():
parts = line.split("\t")
if len(parts) > 0:
float(parts[0])
if len(parts) > 1:
float(parts[1])
count += 1
if count == 0:
return 'no data rows found in file'
except ValueError as e:
return 'invalid file format: {}'.format(e)
elif file_type == 'undulatorTable':
# undulator magnetic data file
try:
_validate_safe_zip(str(path), '.', validate_magnet_data_file)
except AssertionError as err:
return err.message
elif file_type == 'sample':
srwl_uti_smp.SRWLUtiSmp(
file_path=str(path),
# srw processes the image so we save to tmp location
is_save_images=True,
prefix=path.purebasename,
)
if not _SIM_DATA.srw_is_valid_file(file_type, path):
return 'Column count is incorrect for file type: {}'.format(file_type)
return None
def validate_magnet_data_file(zf):
"""Validate a zip file containing tabulated magentic data
Performs the following checks:
- Only .txt and .dat files are allowed
- Zip file must contain one and only one .txt file to use as an index
- The index file must list the data files with the name in the 4th column
- Zip file must contain only the index file and the data files it lists
Args:
zf (zipfile.ZipFile): the zip file to examine
Returns:
True if all conditions are met, False otherwise
A string for debugging purposes
"""
import collections
def index_file_name(zf):
# Apparently pkio.has_file_extension will return true for any extension if fed a directory path ('some_dir/')
text_files = [f for f in zf.namelist() if not f.endswith('/') and pkio.has_file_extension(f, 'txt')]
if len(text_files) != 1:
return None
return text_files[0]
# Check against whitelist
for f in zf.namelist():
# allow directories
if f.endswith('/'):
continue
if not template_common.file_extension_ok(f, white_list=['txt', 'dat']):
return False, 'File {} has forbidden type'.format(f)
file_name_column = 3
# Assure unique index exists
if index_file_name(zf) is None:
return False, 'Zip file has no unique index'
# Validate correct number of columns (plus other format validations if needed)
index_file = zf.open(index_file_name(zf))
lines = index_file.readlines()
file_names_in_index = []
for line in lines:
cols = line.split()
if len(cols) <= file_name_column:
return False, 'Index file {} has bad format'.format(index_file_name())
file_names_in_index.append(cols[file_name_column].decode())
# Compare index and zip contents
# Does not include the index itself, nor any directories
# also extract the filename since the index does not include path info
file_names_in_zip = list(map(lambda path: os.path.basename(path), [f for f in zf.namelist() if not f.endswith('/') and f != index_file_name(zf)]))
files_match = collections.Counter(file_names_in_index) == collections.Counter(file_names_in_zip)
return files_match, '' if files_match else 'Files in index {} do not match files in zip {}'.format(file_names_in_index, file_names_in_zip)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkdc('write_parameters file to {}'.format(run_dir))
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_trim(_generate_parameters_file(data, run_dir=run_dir))
)
def _add_report_filenames(v):
for k in _DATA_FILE_FOR_MODEL:
v['{}Filename'.format(k)] = _DATA_FILE_FOR_MODEL[k]['filename']
def _compute_material_characteristics(model, photon_energy, prefix=''):
import bnlcrl.pkcli.simulate
fields_with_prefix = PKDict({
'material': 'material',
'refractiveIndex': 'refractiveIndex',
'attenuationLength': 'attenuationLength',
})
if prefix:
for k in fields_with_prefix.keys():
fields_with_prefix[k] = '{}{}{}'.format(
prefix,
fields_with_prefix[k][0].upper(),
fields_with_prefix[k][1:],
)
if model[fields_with_prefix['material']] == 'User-defined':
return model
# Index of refraction:
kwargs = PKDict({
'energy': photon_energy,
})
if model['method'] == 'server':
kwargs['precise'] = True
kwargs['formula'] = model[fields_with_prefix['material']]
elif model['method'] == 'file':
kwargs['precise'] = True
kwargs['data_file'] = '{}_delta.dat'.format(model[fields_with_prefix['material']])
else:
kwargs['calc_delta'] = True
kwargs['formula'] = model[fields_with_prefix['material']]
delta = bnlcrl.pkcli.simulate.find_delta(**kwargs)
model[fields_with_prefix['refractiveIndex']] = delta['characteristic_value']
# Attenuation length:
kwargs['characteristic'] = 'atten'
if model['method'] == 'file':
kwargs['precise'] = True
kwargs['data_file'] = '{}_atten.dat'.format(model[fields_with_prefix['material']])
if model['method'] == 'calculation':
# The method 'calculation' in bnlcrl library is not supported yet for attenuation length calculation.
pass
else:
atten = bnlcrl.pkcli.simulate.find_delta(**kwargs)
model[fields_with_prefix['attenuationLength']] = atten['characteristic_value']
return model
def _compute_PGM_value(model):
#if not model['energyAvg'] or not model['cff'] or not model['grazingAngle']:
# return model
#if model['cff'] == 1:
# return model
parms_list = ['energyAvg', 'cff', 'grazingAngle']
try:
mirror = srwlib.SRWLOptMirPl(
_size_tang=model['tangentialSize'],
_size_sag=model['sagittalSize'],
_nvx=model['nvx'],
_nvy=model['nvy'],
_nvz=model['nvz'],
_tvx=model['tvx'],
_tvy=model['tvy'],
_x=model['horizontalOffset'],
_y=model['verticalOffset'],
)
#TODO(pjm): existing data may have photonEnergy as a string
model['energyAvg'] = float(model['energyAvg'])
if model.computeParametersFrom == '1':
opGr = srwlib.SRWLOptG(
_mirSub=mirror,
_m=model['diffractionOrder'],
_grDen=model['grooveDensity0'],
_grDen1=model['grooveDensity1'],
_grDen2=model['grooveDensity2'],
_grDen3=model['grooveDensity3'],
_grDen4=model['grooveDensity4'],
_e_avg=model['energyAvg'],
_cff=model['cff'],
_ang_graz=0,
_ang_roll=model['rollAngle'],
)
grAng, defAng = opGr.cff2ang(_en=model['energyAvg'], _cff=model['cff'])
model['grazingAngle'] = grAng * 1000.0
elif model.computeParametersFrom == '2':
opGr = srwlib.SRWLOptG(
_mirSub=mirror,
_m=model['diffractionOrder'],
_grDen=model['grooveDensity0'],
_grDen1=model['grooveDensity1'],
_grDen2=model['grooveDensity2'],
_grDen3=model['grooveDensity3'],
_grDen4=model['grooveDensity4'],
_e_avg=model['energyAvg'],
_cff=1.5, # model['cff'],
_ang_graz=model['grazingAngle'],
_ang_roll=model['rollAngle'],
)
cff, defAng = opGr.ang2cff(_en=model['energyAvg'], _ang_graz=model['grazingAngle']/1000.0)
#print("cff={}".format(cff))
model['cff'] = cff
angroll = model['rollAngle']
if abs(angroll) < np.pi/4 or abs(angroll-np.pi) < np.pi/4:
model['orientation'] = 'y'
else: model['orientation'] = 'x'
_compute_grating_orientation(model)
except Exception:
pkdlog('\n{}', traceback.format_exc())
if model.computeParametersFrom == '1': model['grazingAngle'] = None
elif model.computeParametersFrom == '2': model['cff'] = None
#for key in parms_list:
# model[key] = None
pkdc("grazingAngle={} nvz-sin(grazingAngle)={} cff={}",
model['grazingAngle'], np.fabs(model['nvz'])-np.fabs(np.sin(model['grazingAngle']/1000)), model['cff'])
return model
def _compute_grating_orientation(model):
if not model['grazingAngle']:
pkdlog("grazingAngle is missing, return old data")
return model
parms_list = ['nvx', 'nvy', 'nvz', 'tvx', 'tvy', 'outoptvx', 'outoptvy', 'outoptvz', 'outframevx', 'outframevy']
try:
mirror = srwlib.SRWLOptMirPl(
_size_tang=model['tangentialSize'],
_size_sag=model['sagittalSize'],
_nvx=model['nvx'],
_nvy=model['nvy'],
_nvz=model['nvz'],
_tvx=model['tvx'],
_tvy=model['tvy'],
_x=model['horizontalOffset'],
_y=model['verticalOffset'],
)
opGr = srwlib.SRWLOptG(
_mirSub=mirror,
_m=model['diffractionOrder'],
_grDen=model['grooveDensity0'],
_grDen1=model['grooveDensity1'],
_grDen2=model['grooveDensity2'],
_grDen3=model['grooveDensity3'],
_grDen4=model['grooveDensity4'],
_e_avg=model['energyAvg'],
_cff=model['cff'],
_ang_graz=model['grazingAngle'],
_ang_roll=model['rollAngle'],
)
pkdc("updating nvz from {} to {} with grazingAngle= {}mrad", model['nvz'], opGr.mirSub.nvz, model['grazingAngle'])
model['nvx'] = opGr.mirSub.nvx
model['nvy'] = opGr.mirSub.nvy
model['nvz'] = opGr.mirSub.nvz
model['tvx'] = opGr.mirSub.tvx
model['tvy'] = opGr.mirSub.tvy
orientDataGr_pp = opGr.get_orient(_e=model['energyAvg'])[1]
tGr_pp = orientDataGr_pp[0] # Tangential Vector to Grystal surface
nGr_pp = orientDataGr_pp[2] # Normal Vector to Grystal surface
model['outoptvx'] = nGr_pp[0]
model['outoptvy'] = nGr_pp[1]
model['outoptvz'] = nGr_pp[2]
model['outframevx'] = tGr_pp[0]
model['outframevy'] = tGr_pp[1]
except Exception:
pkdlog('\n{}', traceback.format_exc())
for key in parms_list:
model[key] = None
return model
def _compute_crystal_init(model):
import srwl_uti_cryst
parms_list = ['dSpacing', 'psi0r', 'psi0i', 'psiHr', 'psiHi', 'psiHBr', 'psiHBi']
try:
material_raw = model['material'] # name contains either "(SRW)" or "(X0h)"
material = material_raw.split()[0] # short name for SRW (e.g., Si), long name for X0h (e.g., Silicon)
h = int(model['h'])
k = int(model['k'])
l = int(model['l'])
millerIndices = [h, k, l]
energy = model['energy']
if re.search('(X0h)', material_raw):
crystal_parameters = crystal.get_crystal_parameters(material, energy, h, k, l)
dc = crystal_parameters['d']
xr0 = crystal_parameters['xr0']
xi0 = crystal_parameters['xi0']
xrh = crystal_parameters['xrh']
xih = crystal_parameters['xih']
elif re.search('(SRW)', material_raw):
dc = srwl_uti_cryst.srwl_uti_cryst_pl_sp(millerIndices, material)
xr0, xi0, xrh, xih = srwl_uti_cryst.srwl_uti_cryst_pol_f(energy, millerIndices, material)
else:
dc = xr0 = xi0 = xrh = xih = None
model['dSpacing'] = dc
model['psi0r'] = xr0
model['psi0i'] = xi0
model['psiHr'] = xrh
model['psiHi'] = xih
model['psiHBr'] = xrh
model['psiHBi'] = xih
if model['diffractionAngle'] == '-1.57079632' or model['diffractionAngle'] == '1.57079632':
model['orientation'] = 'x'
else: model['orientation'] = 'y'
except Exception:
pkdlog('{https://github.com/ochubar/SRW/blob/master/env/work/srw_python/srwlib.py}: error: {}', material_raw)
for key in parms_list:
model[key] = None
return model
def _compute_crystal_orientation(model):
import uti_math
if not model['dSpacing']:
return model
parms_list = ['nvx', 'nvy', 'nvz', 'tvx', 'tvy', 'outoptvx', 'outoptvy', 'outoptvz', 'outframevx', 'outframevy']
try:
opCr = srwlib.SRWLOptCryst(
_d_sp=model['dSpacing'],
_psi0r=model['psi0r'],
_psi0i=model['psi0i'],
_psi_hr=model['psiHr'],
_psi_hi=model['psiHi'],
_psi_hbr=model['psiHBr'],
_psi_hbi=model['psiHBi'],
_tc=model['crystalThickness'],
_uc=float(model['useCase']),
_ang_as=model['asymmetryAngle'],
_e_avg=model['energy'],
_ang_roll=float(model['diffractionAngle']),
)
model['nvx'] = opCr.nvx
model['nvy'] = opCr.nvy
model['nvz'] = opCr.nvz
model['tvx'] = opCr.tvx
model['tvy'] = opCr.tvy
orientDataCr_pp = opCr.get_orient(_e=model['energy'])[1]
tCr_pp = orientDataCr_pp[0] # Tangential Vector to Crystal surface
nCr_pp = orientDataCr_pp[2] # Normal Vector to Crystal surface
model['outoptvx'] = nCr_pp[0]
model['outoptvy'] = nCr_pp[1]
model['outoptvz'] = nCr_pp[2]
model['outframevx'] = tCr_pp[0]
model['outframevy'] = tCr_pp[1]
_SIM_DATA.srw_compute_crystal_grazing_angle(model)
except Exception:
pkdlog('\n{}', traceback.format_exc())
for key in parms_list:
model[key] = None
return model
def _compute_grazing_orientation(model):
def preserve_sign(item, field, new_value):
old_value = item[field] if field in item else 0
was_negative = float(old_value) < 0
item[field] = float(new_value)
if (was_negative and item[field] > 0) or item[field] < 0:
item[field] = - item[field]
grazing_angle = float(model.grazingAngle) / 1000.0
# z is always negative
model.normalVectorZ = - abs(math.sin(grazing_angle))
if model.autocomputeVectors == 'horizontal':
preserve_sign(model, 'normalVectorX', math.cos(grazing_angle))
preserve_sign(model, 'tangentialVectorX', math.sin(grazing_angle))
model.normalVectorY = 0
model.tangentialVectorY = 0
elif model.autocomputeVectors == 'vertical':
preserve_sign(model, 'normalVectorY', math.cos(grazing_angle))
preserve_sign(model, 'tangentialVectorY', math.sin(grazing_angle))
model.normalVectorX = 0
model.tangentialVectorX = 0
return model
def _create_user_model(data, model_name):
model = data['models'][model_name]
if model_name == 'tabulatedUndulator':
model = model.copy()
model['undulator'] = data['models']['undulator']
return model
def _delete_user_models(electron_beam, tabulated_undulator):
"""Remove the beam and undulator user model list files"""
for model_name in _USER_MODEL_LIST_FILENAME.keys():
model = electron_beam if model_name == 'electronBeam' else tabulated_undulator
if not model or 'id' not in model:
continue
user_model_list = _load_user_model_list(model_name)
for i,m in enumerate(user_model_list):
if m['id'] == model.id:
del user_model_list[i]
_save_user_model_list(model_name, user_model_list)
break
return PKDict()
def _extract_beamline_orientation(filename):
cols = np.array(uti_io.read_ascii_data_cols(filename, '\t', _i_col_start=1, _n_line_skip=1))
rows = list(reversed(np.rot90(cols).tolist()))
rows = np.reshape(rows, (len(rows), 4, 3))
res = []
for row in rows:
# the vtk client renders x axis flipped, so update x position and rotation
p = row[0].tolist()
p[0] = -p[0]
orient = row[1:].tolist()
orient[1][0] = -orient[1][0]
orient[1][1] = -orient[1][1]
orient[1][2] = -orient[1][2]
res.append(PKDict(
point=p,
orient=orient,
))
return PKDict(
x_range=[],
elements=res,
)
def _extract_brilliance_report(model, data):
label = template_common.enum_text(_SCHEMA, 'BrillianceReportType', model['reportType'])
if model['reportType'] in ('3', '4'):
label += ' [rad]'
elif model['reportType'] in ('5', '6'):
label += ' [m]'
x_points = []
points = []
scale_adjustment = 1000.0
if 'brightnessComponent' in model and model['brightnessComponent'] == 'spectral-detuning':
scale_adjustment = 1.0
for f in data:
m = re.search(r'^f(\d+)', f)
if m:
x_points.append((np.array(data[f]['data']) * scale_adjustment).tolist())
points.append(data['e{}'.format(m.group(1))]['data'])
title = template_common.enum_text(_SCHEMA, 'BrightnessComponent', model['brightnessComponent'])
if model['brightnessComponent'] == 'k-tuning':
if model['initialHarmonic'] == model['finalHarmonic']:
title += ', Harmonic {}'.format(model['initialHarmonic'])
else:
title += ', Harmonic {} - {}'.format(model['initialHarmonic'], model['finalHarmonic'])
else:
title += ', Harmonic {}'.format(model['harmonic'])
return {
'title': title,
'y_label': label,
'x_label': 'Photon Energy [eV]',
'x_range': [np.amin(x_points), np.amax(x_points)],
'y_range': [np.amin(points), np.amax(points)],
'x_points': x_points,
'points': points,
}
def _extract_trajectory_report(model, data):
available_axes = PKDict()
for s in _SCHEMA['enum']['TrajectoryPlotAxis']:
available_axes[s[0]] = s[1]
x_points = data[model['plotAxisX']]['data']
plots = []
y_range = []
for f in ('plotAxisY', 'plotAxisY2'):
if model[f] != 'None':
points = data[model[f]]['data']
if y_range:
y_range = [min(y_range[0], min(points)), max(y_range[1], max(points))]
else:
y_range = [min(points), max(points)]
plots.append(PKDict(
points=points,
label=available_axes[model[f]],
#TODO(pjm): refactor with template_common.compute_plot_color_and_range()
color='#ff7f0e' if plots else '#1f77b4',
))
return PKDict(
title='Electron Trajectory',
x_range=[min(x_points), max(x_points)],
x_points=x_points,
y_label='[{}]'.format(data[model['plotAxisY']]['units']),
x_label=available_axes[model['plotAxisX']] + ' [' + data[model['plotAxisX']]['units'] + ']',
y_range=y_range,
plots=plots,
)
def _fix_file_header(filename):
# fixes file header for coherenceXAnimation and coherenceYAnimation reports
rows = []
pkdc('fix header filename: {}', filename)
with pkio.open_text(filename) as f:
for line in f:
rows.append(line)
if len(rows) == 11:
pkdc('before header changed rows4: {}',rows[4])
pkdc('before header changed rows5: {}',rows[5])
pkdc('before header changed rows6: {}',rows[6])
pkdc('before header changed rows7: {}',rows[7])
pkdc('before header changed rows8: {}',rows[8])
pkdc('before header changed rows9: {}',rows[9])
#if rows[4] == rows[7]:
if rows[6].split()[0] == rows[9].split()[0] and rows[6].split()[0] != 1:
# already fixed up
return
col4 = rows[4].split()
col5 = rows[5].split()
col6 = rows[6].split()
col7 = rows[7].split()
col8 = rows[8].split()
col9 = rows[9].split()
#if re.search(r'^\#0 ', rows[4]):
if re.search(r'^\#1 ', rows[6]):
col4[0] = col7[0]
rows[4] = ' '.join(col4)+'\n'
col5[0] = col8[0]
rows[5] = ' '.join(col5)+'\n'
col6[0] = col9[0]
rows[6] = ' '.join(col6)+'\n'
else:
col7[0] = col4[0]
rows[7] = ' '.join(col7)+'\n'
col8[0] = col5[0]
rows[8] = ' '.join(col8)+'\n'
col9[0] = col6[0]
rows[9] = ' '.join(col9)+'\n'
Vmin = float(rows[7].split()[0][1:])
Vmax = float(rows[8].split()[0][1:])
rows[7] = '#'+str((Vmin-Vmax)/2)+' '+' '.join(rows[7].split()[1:])+'\n'
rows[8] = '#'+str((Vmax-Vmin)/2)+' '+' '.join(rows[8].split()[1:])+'\n'
pkdc('after header changed rows4:{}',rows[4])
pkdc('after header changed rows5:{}',rows[5])
pkdc('after header changed rows6:{}',rows[6])
pkdc('after header changed rows7:{}',rows[7])
pkdc('after header changed rows8:{}',rows[8])
pkdc('after header changed rows9:{}',rows[9])
pkio.write_text(filename, ''.join(rows))
def _generate_beamline_optics(report, data, last_id):
models = data['models']
if not _SIM_DATA.srw_is_beamline_report(report):
return ' pass', ''
has_beamline_elements = len(models.beamline) > 0
if has_beamline_elements and not last_id:
last_id = models.beamline[-1].id
names = []
items = []
prev = None
propagation = models.propagation
max_name_size = 0
for item in models.beamline:
is_disabled = 'isDisabled' in item and item.isDisabled
name = _safe_beamline_item_name(item.title, names)
max_name_size = max(max_name_size, len(name))
if prev:
size = item.position - prev.position
if size != 0:
# add a drift
drift_name = _safe_beamline_item_name('{}_{}'.format(prev.name, name), names)
max_name_size = max(max_name_size, len(drift_name))
names.append(drift_name)
items.append(PKDict(
name=drift_name,
type='drift',
position=prev.position,
propagation=prev.drift_propagation,
length=size,
))
pp = propagation[str(item.id)]
item.propagation = pp[0]
item.drift_propagation = pp[1]
item.name = name
if not is_disabled:
if item.type == 'watch' and not items:
# first item is a watch, insert a 0 length drift in front
items.append(PKDict(
name='zero_drift',
type='drift',
position=item.position,
propagation=item.propagation,
length=0,
))
names.append(items[-1].name)
if 'heightProfileFile' in item:
item.heightProfileDimension = _height_profile_dimension(item, data)
items.append(item)
names.append(name)
if int(last_id) == int(item.id):
break
prev = item
args = PKDict(
report=report,
items=items,
names=names,
postPropagation=models.postPropagation,
wantPostPropagation=has_beamline_elements and (int(last_id) == int(models.beamline[-1].id)),
maxNameSize=max_name_size,
nameMap=PKDict(
apertureShape='ap_shape',
asymmetryAngle='ang_as',
attenuationLength='atten_len',
complementaryAttenuationLength='atLen2',
complementaryRefractiveIndex='delta2',
coreAttenuationLength='atten_len_core',
coreDiameter='diam_core',
coreRefractiveIndex='delta_core',
crystalThickness='tc',
dSpacing='d_sp',
diffractionOrder='m',
externalAttenuationLength='atten_len_ext',
externalRefractiveIndex='delta_ext',
energyAvg='e_avg',
firstFocusLength='p',
focalLength='q',
focalPlane='foc_plane',
grazingAngle='ang',
gridShape='grid_sh',
grooveDensity0='grDen',
grooveDensity1='grDen1',
grooveDensity2='grDen2',
grooveDensity3='grDen3',
grooveDensity4='grDen4',
heightAmplification='amp_coef',
heightProfileFile='hfn',
horizontalApertureSize='apert_h',
horizontalCenterCoordinate='xc',
horizontalCenterPosition='xc',
horizontalFocalLength='Fx',
horizontalGridDimension='grid_dx',
horizontalGridPitch='pitch_x',
horizontalGridsNumber='grid_nx',
horizontalMaskCoordinate='mask_x0',
horizontalOffset='x',
horizontalPixelsNumber='mask_Nx',
horizontalSamplingInterval='hx',
horizontalSize='Dx',
horizontalTransverseSize='size_x',
imageFile='file_path',
length='L',
mainAttenuationLength='atLen1',
mainRefractiveIndex='delta1',
maskThickness='thick',
normalVectorX='nvx',
normalVectorY='nvy',
normalVectorZ='nvz',
numberOfLenses='n',
numberOfZones='nZones',
orientation='dim',
outerRadius='rn',
radius='r',
refractiveIndex='delta',
sagittalRadius='rs',
sagittalSize='size_sag',
tangentialRadius='rt',
tangentialSize='size_tang',
tangentialVectorX='tvx',
tangentialVectorY='tvy',
thickness='thick',
tipRadius='r_min',
tipWallThickness='wall_thick',
transmissionImage='extTransm',
useCase='uc',
verticalApertureSize='apert_v',
verticalCenterCoordinate='yc',
verticalCenterPosition='yc',
verticalFocalLength='Fy',
verticalGridDimension='grid_dy',
verticalGridPitch='pitch_y',
verticalGridsNumber='grid_ny',
verticalMaskCoordinate='mask_y0',
verticalOffset='y',
verticalPixelsNumber='mask_Ny',
verticalSamplingInterval='hy',
verticalSize='Dy',
verticalTransverseSize='size_y',
),
)
optics = template_common.render_jinja(SIM_TYPE, args, 'beamline_optics.py')
prop = template_common.render_jinja(SIM_TYPE, args, 'beamline_parameters.py')
return optics, prop
def _generate_parameters_file(data, plot_reports=False, run_dir=None):
# Process method and magnetic field values for intensity, flux and intensity distribution reports:
# Intensity report:
source_type = data['models']['simulation']['sourceType']
undulator_type = data['models']['tabulatedUndulator']['undulatorType']
magnetic_field = _process_intensity_reports(source_type, undulator_type)['magneticField']
data['models']['intensityReport']['magneticField'] = magnetic_field
data['models']['sourceIntensityReport']['magneticField'] = magnetic_field
data['models']['trajectoryReport']['magneticField'] = magnetic_field
data['models']['powerDensityReport']['magneticField'] = magnetic_field
report = data['report']
if report == 'fluxAnimation':
data['models']['fluxReport'] = data['models'][report].copy()
if _SIM_DATA.srw_is_idealized_undulator(source_type, undulator_type) and int(data['models']['fluxReport']['magneticField']) == 2:
data['models']['fluxReport']['magneticField'] = 1
elif _SIM_DATA.is_watchpoint(report) or report == 'sourceIntensityReport':
# render the watchpoint report settings in the initialIntensityReport template slot
data['models']['initialIntensityReport'] = data['models'][report].copy()
if report == 'sourceIntensityReport':
for k in ['photonEnergy', 'horizontalPointCount', 'horizontalPosition', 'horizontalRange',
'sampleFactor', 'samplingMethod', 'verticalPointCount', 'verticalPosition', 'verticalRange']:
data['models']['simulation'][k] = data['models']['sourceIntensityReport'][k]
if _SIM_DATA.srw_is_tabulated_undulator_source(data['models']['simulation']):
if undulator_type == 'u_i':
data['models']['tabulatedUndulator']['gap'] = 0.0
if report != 'multiElectronAnimation' or data['models']['multiElectronAnimation']['photonEnergyBandWidth'] <= 0:
data['models']['multiElectronAnimation']['photonEnergyIntegration'] = 0
data['models']['simulation']['finalPhotonEnergy'] = -1.0
else:
data['models']['multiElectronAnimation']['photonEnergyIntegration'] = 1
half_width = float(data['models']['multiElectronAnimation']['photonEnergyBandWidth']) / 2.0
data['models']['simulation']['photonEnergy'] = float(data['models']['simulation']['photonEnergy'])
data['models']['simulation']['finalPhotonEnergy'] = data['models']['simulation']['photonEnergy'] + half_width
data['models']['simulation']['photonEnergy'] -= half_width
# do this before validation or arrays get turned into strings
if report == 'rsoptExport':
rsopt_ctx = _rsopt_jinja_context(data.models.exportRsOpt)
_validate_data(data, _SCHEMA)
last_id = None
if _SIM_DATA.is_watchpoint(report):
last_id = _SIM_DATA.watchpoint_id(report)
if report == 'multiElectronAnimation':
last_id = data['models']['multiElectronAnimation']['watchpointId']
if int(data['models']['simulation']['samplingMethod']) == 2:
data['models']['simulation']['sampleFactor'] = 0
res, v = template_common.generate_parameters_file(data)
if report == 'rsoptExport':
v.update(rsopt_ctx)
# rsopt uses this as a lookup param so want it in one place
v['ws_fni_desc'] = 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'
v['rs_type'] = source_type
if _SIM_DATA.srw_is_idealized_undulator(source_type, undulator_type):
v['rs_type'] = 'u'
if report == 'mirrorReport':
v['mirrorOutputFilename'] = _MIRROR_OUTPUT_FILE
return template_common.render_jinja(SIM_TYPE, v, 'mirror.py')
if report == 'brillianceReport':
v['brillianceOutputFilename'] = _BRILLIANCE_OUTPUT_FILE
return template_common.render_jinja(SIM_TYPE, v, 'brilliance.py')
if report == 'backgroundImport':
v.tmp_dir = str(run_dir)
v.python_file = run_dir.join('user_python.py')
pkio.write_text(v.python_file, data.models.backgroundImport.python)
return template_common.render_jinja(SIM_TYPE, v, 'import.py')
if 'Animation' in report:
if report in data.models and 'jobRunMode' in data.models[report]:
if data.models[report].jobRunMode == 'sbatch':
v.sbatchBackup = '1'
v['beamlineOptics'], v['beamlineOpticsParameters'] = _generate_beamline_optics(report, data, last_id)
# und_g and und_ph API units are mm rather than m
v['tabulatedUndulator_gap'] *= 1000
v['tabulatedUndulator_phase'] *= 1000
if report in data['models'] and 'distanceFromSource' in data['models'][report]:
position = data['models'][report]['distanceFromSource']
else:
position = _get_first_element_position(data)
v['beamlineFirstElementPosition'] = position
# 1: auto-undulator 2: auto-wiggler
v['energyCalculationMethod'] = 1 if _SIM_DATA.srw_is_undulator_source(data['models']['simulation']) else 2
if data['models']['electronBeam']['beamDefinition'] == 'm':
v['electronBeam_horizontalBeta'] = None
v[report] = 1
_add_report_filenames(v)
v['setupMagneticMeasurementFiles'] = plot_reports and _SIM_DATA.srw_uses_tabulated_zipfile(data)
v['srwMain'] = _generate_srw_main(data, plot_reports)
if run_dir and _SIM_DATA.srw_uses_tabulated_zipfile(data):
src_zip = str(run_dir.join(v['tabulatedUndulator_magneticFile']))
target_dir = str(run_dir.join(_TABULATED_UNDULATOR_DATA_DIR))
# The MagnMeasZip class defined above has convenient properties we can use here
mmz = MagnMeasZip(src_zip)
zindex = _zip_path_for_file(mmz.z, mmz.index_file)
zdata = map(lambda fn: _zip_path_for_file(mmz.z, fn), mmz.dat_files)
# extract only the index file and the data files it lists
mmz.z.extract(zindex, target_dir)
for df in zdata:
mmz.z.extract(df, target_dir)
v.magneticMeasurementsDir = _TABULATED_UNDULATOR_DATA_DIR + '/' + mmz.index_dir
v.magneticMeasurementsIndexFile = mmz.index_file
return _trim(res + template_common.render_jinja(SIM_TYPE, v))
def _generate_srw_main(data, plot_reports):
report = data['report']
for_rsopt = report == 'rsoptExport'
source_type = data['models']['simulation']['sourceType']
run_all = report == _SIM_DATA.SRW_RUN_ALL_MODEL or report == 'rsoptExport'
vp_var = 'vp' if for_rsopt else 'varParam'
content = [
f'v = srwl_bl.srwl_uti_parse_options(srwl_bl.srwl_uti_ext_options({vp_var}), use_sys_argv={plot_reports})',
]
if plot_reports and _SIM_DATA.srw_uses_tabulated_zipfile(data):
content.append('setup_magnetic_measurement_files("{}", v)'.format(data['models']['tabulatedUndulator']['magneticFile']))
if run_all or _SIM_DATA.srw_is_beamline_report(report):
content.append('op = set_optics(v)')
else:
# set_optics() can be an expensive call for mirrors, only invoke if needed
content.append('op = None')
if (run_all and source_type != 'g') or report == 'intensityReport':
content.append('v.ss = True')
if plot_reports:
content.append("v.ss_pl = 'e'")
if (run_all and source_type not in ('g', 'm')) or report in 'fluxReport':
content.append('v.sm = True')
if plot_reports:
content.append("v.sm_pl = 'e'")
if (run_all and source_type != 'g') or report == 'powerDensityReport':
content.append('v.pw = True')
if plot_reports:
content.append("v.pw_pl = 'xy'")
if run_all or report in ['initialIntensityReport', 'sourceIntensityReport']:
content.append('v.si = True')
if plot_reports:
content.append("v.si_pl = 'xy'")
if (run_all and source_type != 'g') or report == 'trajectoryReport':
content.append('v.tr = True')
if plot_reports:
content.append("v.tr_pl = 'xz'")
if run_all or _SIM_DATA.is_watchpoint(report):
content.append('v.ws = True')
if plot_reports:
content.append("v.ws_pl = 'xy'")
#TODO(pjm): work-around for #1593
content.append('mag = None')
content.append("if v.rs_type == 'm':")
for line in (
'mag = srwlib.SRWLMagFldC()',
'mag.arXc.append(0)',
'mag.arYc.append(0)',
'mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))',
'mag.arZc.append(v.mp_zc)',
):
content.append(' {}'.format(line))
if _SIM_DATA.srw_is_background_report(report):
content.append(
# Number of "iterations" per save is best set to num processes
'v.wm_ns = v.sm_ns = {}'.format(sirepo.mpi.cfg.cores),
)
content.append('srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)')
return '\n'.join([f' {x}' for x in content] + [''] + ([] if for_rsopt \
else ['main()', '']))
def _get_first_element_position(data):
beamline = data['models']['beamline']
if beamline:
return beamline[0]['position']
if 'distanceFromSource' in data['models']['simulation']:
return data['models']['simulation']['distanceFromSource']
return template_common.DEFAULT_INTENSITY_DISTANCE
def _height_profile_dimension(item, data):
"""Find the dimension of the provided height profile .dat file.
1D files have 2 columns, 2D - 8 columns.
"""
dimension = 0
if item['heightProfileFile'] and item['heightProfileFile'] != 'None':
with _SIM_DATA.lib_file_abspath(item['heightProfileFile'], data=data).open('r') as f:
header = f.readline().strip().split()
dimension = 1 if len(header) == 2 else 2
return dimension
def _intensity_units(is_gaussian, sim_in):
if is_gaussian:
if 'report' in sim_in and 'fieldUnits' in sim_in['models'][sim_in['report']]:
i = sim_in['models'][sim_in['report']]['fieldUnits']
else:
i = sim_in['models']['initialIntensityReport']['fieldUnits']
return _SCHEMA['enum']['FieldUnits'][int(i)][1]
return 'ph/s/.1%bw/mm^2'
def _load_user_model_list(model_name):
f = _SIM_DATA.lib_file_write_path(_USER_MODEL_LIST_FILENAME[model_name])
try:
if f.exists():
return simulation_db.read_json(f)
except Exception:
pkdlog('user list read failed, resetting contents: {}', f)
_save_user_model_list(model_name, [])
return _load_user_model_list(model_name)
def _process_image(data, tmp_dir):
"""Process image and return
Args:
data (dict): description of simulation
Returns:
py.path.local: file to return
"""
# This should just be a basename, but this ensures it.
import srwl_uti_smp
path = str(_SIM_DATA.lib_file_abspath(sirepo.util.secure_filename(data.baseImage)))
m = data.model
with pkio.save_chdir(tmp_dir):
if m.sampleSource == 'file':
s = srwl_uti_smp.SRWLUtiSmp(
file_path=path,
area=None if not int(m.cropArea) else (m.areaXStart, m.areaXEnd, m.areaYStart, m.areaYEnd),
rotate_angle=float(m.rotateAngle),
rotate_reshape=int(m.rotateReshape),
cutoff_background_noise=float(m.cutoffBackgroundNoise),
background_color=int(m.backgroundColor),
invert=int(m.invert),
tile=None if not int(m.tileImage) else (m.tileRows, m.tileColumns),
shift_x=m.shiftX,
shift_y=m.shiftY,
is_save_images=True,
prefix=str(tmp_dir),
output_image_format=m.outputImageFormat,
)
return pkio.py_path(s.processed_image_name)
assert m.sampleSource == 'randomDisk'
s = srwl_uti_smp.srwl_opt_setup_smp_rnd_obj2d(
_thickness=0,
_delta=0,
_atten_len=0,
_dens=m.dens,
_rx=m.rx,
_ry=m.ry,
_obj_type=int(m.obj_type),
_r_min_bw_obj=m.r_min_bw_obj,
_obj_size_min=m.obj_size_min,
_obj_size_max=m.obj_size_max,
_size_dist=int(m.size_dist),
_ang_min=m.ang_min,
_ang_max=m.ang_max,
_ang_dist=int(m.ang_dist),
_rand_alg=int(m.rand_alg),
_obj_par1=m.obj_size_ratio if m.obj_type in ('1', '2', '3') \
else m.poly_sides if m.obj_type == '4' \
else m.rand_shapes,
_obj_par2=m.rand_obj_size == '1' if m.obj_type in ('1', '2', '3') \
else m.rand_poly_side == '1' if m.obj_type == '4' \
else None,
_ret='img',
)
filename = 'sample_processed.{}'.format(m.outputImageFormat)
s.save(filename)
return pkio.py_path(filename)
def _process_intensity_reports(source_type, undulator_type):
# Magnetic field processing:
return PKDict({
'magneticField': 2 if source_type == 'a' or _SIM_DATA.srw_is_tabulated_undulator_with_magnetic_file(source_type, undulator_type) else 1,
})
def _process_rsopt_elements(els):
x = [e for e in els if e.enabled and e.enabled != '0']
for e in x:
for p in _RSOPT_PARAMS:
if p in e:
e[p].offsets = sirepo.util.split_comma_delimited_string(e[f'{p}Offsets'], float)
return x
def _extend_plot(ar2d, x_range, y_range, horizontalStart, horizontalEnd, verticalStart, verticalEnd):
x_step = (x_range[1] - x_range[0]) / x_range[2]
y_step = (y_range[1] - y_range[0]) / y_range[2]
if horizontalStart < x_range[0]:
b = np.zeros((np.shape(ar2d)[0], int((x_range[0] - horizontalStart) / x_step)))
ar2d = np.hstack((b, ar2d))
x_range[0] = horizontalStart
if horizontalEnd > x_range[1]:
b = np.zeros((np.shape(ar2d)[0], int((horizontalEnd - x_range[1]) / x_step)))
ar2d = np.hstack((ar2d, b))
x_range[1] = horizontalEnd
if verticalStart < y_range[0]:
b = np.zeros((int((y_range[0] - verticalStart) / y_step), np.shape(ar2d)[1]))
ar2d = np.vstack((ar2d, b))
y_range[0] = verticalStart
if verticalEnd > y_range[1]:
b = np.zeros((int((verticalEnd - y_range[1]) / y_step), np.shape(ar2d)[1]))
ar2d = np.vstack((b, ar2d))
y_range[1] = verticalEnd
y_range[2], x_range[2] = np.shape(ar2d)
return (ar2d, x_range, y_range)
def _remap_3d(info, allrange, z_label, z_units, report):
x_range = [allrange[3], allrange[4], allrange[5]]
y_range = [allrange[6], allrange[7], allrange[8]]
width_pixels = int(report.intensityPlotsWidth)
rotate_angle = report.get('rotateAngle', 0)
ar2d = info['points']
totLen = int(x_range[2] * y_range[2])
n = len(ar2d) if totLen > len(ar2d) else totLen
ar2d = np.reshape(ar2d[0:n], (int(y_range[2]), int(x_range[2])))
if report.get('usePlotRange', '0') == '1':
horizontalStart = (report.horizontalOffset - report.horizontalSize/2) * 1e-3
horizontalEnd = (report.horizontalOffset + report.horizontalSize/2) * 1e-3
verticalStart = (report.verticalOffset - report.verticalSize/2) * 1e-3
verticalEnd = (report.verticalOffset + report.verticalSize/2) * 1e-3
ar2d, x_range, y_range = _extend_plot(ar2d, x_range, y_range, horizontalStart, horizontalEnd, verticalStart, verticalEnd)
x_left, x_right = np.clip(x_range[:2], horizontalStart, horizontalEnd)
y_left, y_right = np.clip(y_range[:2], verticalStart, verticalEnd)
x = np.linspace(x_range[0], x_range[1], int(x_range[2]))
y = np.linspace(y_range[0], y_range[1], int(y_range[2]))
xsel = ((x >= x_left) & (x <= x_right))
ysel = ((y >= y_left) & (y <= y_right))
ar2d = np.compress(xsel, np.compress(ysel, ar2d, axis=0), axis=1)
x_range = [x_left, x_right, np.shape(ar2d)[1]]
y_range = [y_left, y_right, np.shape(ar2d)[0]]
if report.get('useIntensityLimits', '0') == '1':
ar2d[ar2d < report.minIntensityLimit] = report.minIntensityLimit
ar2d[ar2d > report.maxIntensityLimit] = report.maxIntensityLimit
if not width_pixels:
# upper limit is browser's max html canvas size
width_pixels = _CANVAS_MAX_SIZE
job.init()
# roughly 20x size increase for json
if ar2d.size * _JSON_MESSAGE_EXPANSION > job.cfg.max_message_bytes:
max_width = int(math.sqrt(job.cfg.max_message_bytes / _JSON_MESSAGE_EXPANSION))
if max_width < width_pixels:
pkdc(
'auto scaling dimensions to fit message size. size: {}, max_width: {}',
ar2d.size,
max_width,
)
width_pixels = max_width
# rescale width and height to maximum of width_pixels
if width_pixels and (width_pixels < x_range[2] or width_pixels < y_range[2]):
x_resize = 1.0
y_resize = 1.0
if width_pixels < x_range[2]:
x_resize = float(width_pixels) / float(x_range[2])
if width_pixels < y_range[2]:
y_resize = float(width_pixels) / float(y_range[2])
pkdc('Size before: {} Dimensions: {}, Resize: [{}, {}]', ar2d.size, ar2d.shape, y_resize, x_resize)
try:
from scipy import ndimage
ar2d = ndimage.zoom(ar2d, [y_resize, x_resize], order=1)
pkdc('Size after : {} Dimensions: {}', ar2d.size, ar2d.shape)
x_range[2] = ar2d.shape[1]
y_range[2] = ar2d.shape[0]
except Exception:
pkdlog('Cannot resize the image - scipy.ndimage.zoom() cannot be imported.')
# rotate 3D image
if rotate_angle:
rotate_reshape = report.get('rotateReshape', '0') == '1'
try:
from scipy import ndimage
pkdc('Size before: {} Dimensions: {}', ar2d.size, ar2d.shape)
shape_before = list(ar2d.shape)
ar2d = ndimage.rotate(ar2d, rotate_angle, reshape = rotate_reshape, mode='constant', order = 3)
pkdc('Size after rotate: {} Dimensions: {}', ar2d.size, ar2d.shape)
shape_rotate = list(ar2d.shape)
pkdc('x_range and y_range before rotate is [{},{}] and [{},{}]', x_range[0], x_range[1], y_range[0], y_range[1])
x_range[0] = shape_rotate[0]/shape_before[0]*x_range[0]
x_range[1] = shape_rotate[0]/shape_before[0]*x_range[1]
y_range[0] = shape_rotate[1]/shape_before[1]*y_range[0]
y_range[1] = shape_rotate[1]/shape_before[1]*y_range[1]
pkdc('x_range and y_range after rotate is [{},{}] and [{},{}]', x_range[0], x_range[1], y_range[0], y_range[1])
x_range[2] = ar2d.shape[1]
y_range[2] = ar2d.shape[0]
if info['title'] != 'Power Density': info['subtitle'] = info['subtitle'] + ' Image Rotate {}^0'.format(rotate_angle)
except Exception:
pkdlog('Cannot rotate the image - scipy.ndimage.rotate() cannot be imported.')
if z_units:
z_label = u'{} [{}]'.format(z_label, z_units)
return PKDict(
x_range=x_range,
y_range=y_range,
x_label=info['x_label'],
y_label=info['y_label'],
z_label=_superscript(z_label),
title=info['title'],
subtitle=_superscript_2(info['subtitle']),
z_matrix=ar2d.tolist(),
z_range = [report.minIntensityLimit, report.maxIntensityLimit] if report.get('useIntensityLimits', '0') == '1' else [np.min(ar2d), np.max(ar2d)],
summaryData=info.summaryData,
)
def _rotated_axis_range(x, y, theta):
x_new = x*np.cos(theta) + y*np.sin(theta)
return x_new
def _rsopt_jinja_context(model):
import multiprocessing
return PKDict(
forRSOpt=True,
numCores=int(model.numCores),
numWorkers=max(1, multiprocessing.cpu_count() - 1),
numSamples=int(model.numSamples),
rsOptElements=_process_rsopt_elements(model.elements),
rsOptParams=_RSOPT_PARAMS,
scanType=model.scanType,
)
def _rsopt_main():
return [
'import sys',
'if len(sys.argv[1:]) > 0:',
' set_rsopt_params(*sys.argv[1:])',
' del sys.argv[1:]',
'else:',
' exit(0)'
]
def _safe_beamline_item_name(name, names):
name = re.sub(r'\W+', '_', name)
name = re.sub(r'_+', '_', name)
name = re.sub(r'^_|_$', '', name)
name = re.sub(r'^_+', '', name)
name = re.sub(r'_+$', '', name)
name = re.sub(r'^op_', '', name)
if not name or name == 'fin':
name = 'element'
idx = 2
current = name
while current in names:
current = '{}{}'.format(name, idx)
idx += 1
return current
def _save_user_model_list(model_name, beam_list):
pkdc('saving {} list', model_name)
simulation_db.write_json(
_SIM_DATA.lib_file_write_path(_USER_MODEL_LIST_FILENAME[model_name]),
beam_list,
)
def _superscript(val):
return re.sub(r'\^2', u'\u00B2', val)
def _superscript_2(val):
return re.sub(r'\^0', u'\u00B0', val)
def _trim(v):
res = ''
for l in v.split('\n'):
res += l.rstrip() + '\n'
x = res.rstrip('\n') + '\n'
return x
def _unique_name(items, field, template):
#TODO(pjm): this is the same logic as sirepo.js uniqueName()
values = PKDict()
for item in items:
values[item[field]] = True
index = 1
while True:
found_it = False
id = template.replace('{}', str(index))
if id in values:
index += 1
else:
return id
def _user_model_map(model_list, field):
res = PKDict()
for model in model_list:
res[model[field]] = model
return res
def _validate_data(data, schema):
# ensure enums match, convert ints/floats, apply scaling
template_common.validate_models(data, schema)
for item_id in data['models']['propagation']:
_validate_propagation(data['models']['propagation'][item_id][0])
_validate_propagation(data['models']['propagation'][item_id][1])
_validate_propagation(data['models']['postPropagation'])
def _validate_propagation(prop):
for i in range(len(prop)):
prop[i] = int(prop[i]) if i in (0, 1, 3, 4) else float(prop[i])
def _validate_safe_zip(zip_file_name, target_dir='.', *args):
"""Determine whether a zip file is safe to extract from
Performs the following checks:
- Each file must end up at or below the target directory
- Files must be 100MB or smaller
- If possible to determine, disallow "non-regular" and executable files
- Existing files cannot be overwritten
Args:
zip_file_name (str): name of the zip file to examine
target_dir (str): name of the directory to extract into (default to current directory)
*args: list of validator functions taking a zip file as argument and returning True or False and a string
Throws:
AssertionError if any test fails, otherwise completes silently
"""
def path_is_sub_path(path, dir_name):
real_dir = os.path.realpath(dir_name)
end_path = os.path.realpath(real_dir + '/' + path)
return end_path.startswith(real_dir)
def file_exists_in_dir(file_name, dir_name):
return os.path.exists(os.path.realpath(dir_name + '/' + file_name))
def file_attrs_ok(attrs):
# ms-dos attributes only use two bytes and don't contain much useful info, so pass them
if attrs < 2 << 16:
return True
# UNIX file attributes live in the top two bytes
mask = attrs >> 16
is_file_or_dir = mask & (0o0100000 | 0o0040000) != 0
no_exec = mask & (0o0000100 | 0o0000010 | 0o0000001) == 0
return is_file_or_dir and no_exec
# 100MB
max_file_size = 100000000
zip_file = zipfile.ZipFile(zip_file_name)
for f in zip_file.namelist():
i = zip_file.getinfo(f)
s = i.file_size
attrs = i.external_attr
assert path_is_sub_path(f, target_dir), 'Cannot extract {} above target directory'.format(f)
assert s <= max_file_size, '{} too large ({} > {})'.format(f, str(s), str(max_file_size))
assert file_attrs_ok(attrs), '{} not a normal file or is executable'.format(f)
assert not file_exists_in_dir(f, target_dir), 'Cannot overwrite file {} in target directory {}'.format(f, target_dir)
for validator in args:
res, err_string = validator(zip_file)
assert res, '{} failed validator: {}'.format(os.path.basename(zip_file_name), err_string)
def _zip_path_for_file(zf, file_to_find):
"""Find the full path of the specified file within the zip.
For a zip zf containing:
foo1
foo2
bar/
bar/foo3
_zip_path_for_file(zf, 'foo3') will return 'bar/foo3'
Args:
zf(zipfile.ZipFile): the zip file to examine
file_to_find (str): name of the file to find
Returns:
The first path in the zip that matches the file name, or None if no match is found
"""
# Get the base file names from the zip (directories have a basename of '')
file_names_in_zip = [os.path.basename(x) for x in zf.namelist()]
return zf.namelist()[file_names_in_zip.index(file_to_find)]
|
mkeilman/sirepo
|
sirepo/template/srw.py
|
Python
|
apache-2.0
| 80,889
|
[
"CRYSTAL",
"VTK"
] |
78aec70861737c4f84a4572dfd56239b3b4e3220fc2fdc0dd6118116f66453c5
|
# python_version >= '3.5'
#: Okay
class C:
async def __init__(*args, **kwargs):
pass
#: N805:4:17
class C:
@decorator(
'a')
async def m(cls, k='w'): # noqa: N805
pass
#: N805(--staticmethod-decorators=exstatik,stcmthd)
class ButWeLostTheOriginalStaticMethodLateDecorator(object):
async def test(so, exciting):
pass
test = staticmethod(test)
|
flintwork/pep8-naming
|
testsuite/N805_py35.py
|
Python
|
mit
| 394
|
[
"exciting"
] |
f4bb5fc6bdfa13097a1f32251567167ca14ca9b64a62ff9b64a3d759374b0c75
|
from django.shortcuts import render
from django.http import JsonResponse
from django.db import connections
from django.db.models import Count
from django.contrib import admin
from visitor.models import Apache
import json
admin.site.register(Apache)
# Create your views here.
def text(request):
apachelogs_list = Apache.objects.all()
context_dict = {'apaches': apachelogs_list}
return render(request, 'index.html', context_dict)
def render_javascript(request):
lists = [
{ "date": "2015-11-28", "visit": 10 },
{ "date": "2015-10-09", "visit": 8 },
{ "date": "2015-11-01", "visit": 25 },
]
context_dict = {'lists_as_json': lists}
return render(request, 'lists.html', context_dict)
def render_javascript2(request):
apaches = Apache.objects.all()
alist = []
for apache in apaches:
dateformat = "%Y-%m-%d %H:%M:%S" #2015-11-21 18:36:00
date_dict1 = apache.date
date_dict2 = date_dict1.strftime(dateformat)
adict = {'date': date_dict2, 'visit': apache.visit}
alist.append(adict)
context_dict = {'data_as_json': alist}
return render(request, 'logs.html', context_dict)
def render_javascript3(request):
return render(request, 'scatterplot.html')
|
za/dd3
|
dd3/visitor/views.py
|
Python
|
apache-2.0
| 1,183
|
[
"VisIt"
] |
1e0b8134cfd9abd50611c9b5431c25627efb2b46f535d4e7c3dd47006752045c
|
"""Extensions to the ase Atoms class
"""
import numpy as np
from ase import Atoms
from ase.io import read, write
from ase.data import covalent_radii
from ase.calculators.neighborlist import NeighborList
class Cluster(Atoms):
"""A class for cluster structures
to enable simplified manipulation"""
def __init__(self, *args, **kwargs):
self.data = {}
if len(args) > 0:
filename = args[0]
if isinstance(filename, str):
self.read(filename, kwargs.get('filetype'))
return
else:
Atoms.__init__(self, [])
if kwargs.get('filename') is not None:
filename = kwargs.pop('filename')
Atoms.__init__(self, *args, **kwargs)
self.read(filename, kwargs.get('filetype'))
else:
Atoms.__init__(self, *args, **kwargs)
def extreme_positions(self):
"""get the extreme positions of the structure"""
pos = self.get_positions()
return np.array([np.minimum.reduce(pos), np.maximum.reduce(pos)])
def find_connected(self, index, dmax=None, scale=1.5):
"""Find the atoms connected to self[index] and return them.
If dmax is not None:
Atoms are defined to be connected if they are nearer than dmax
to each other.
If dmax is None:
Atoms are defined to be connected if they are nearer than the
sum of their covalent radii * scale to each other.
"""
# set neighbor lists
neighborlist = []
if dmax is None:
# define neighbors according to covalent radii
radii = scale * covalent_radii[self.get_atomic_numbers()]
for atom in self:
positions = self.positions - atom.position
distances = np.sqrt(np.sum(positions**2, axis=1))
radius = scale * covalent_radii[atom.get_atomic_number()]
neighborlist.append(np.where(distances < radii + radius)[0])
else:
# define neighbors according to distance
nl = NeighborList([0.5 * dmax] * len(self), skin=0)
nl.update(self)
for i, atom in enumerate(self):
neighborlist.append(list(nl.get_neighbors(i)[0]))
connected = list(neighborlist[index])
isolated = False
while not isolated:
isolated = True
for i in connected:
for j in neighborlist[i]:
if j in connected:
pass
else:
connected.append(j)
isolated = False
atoms = Cluster()
for i in connected:
atoms.append(self[i])
return atoms
def minimal_box(self, border=0, h=None):
"""The box needed to fit the structure in.
The structure is moved to fit into the box [(0,x),(0,y),(0,z)]
with x,y,z > 0 (fitting the ASE constriction).
The border argument can be used to add a border of empty space
around the structure.
If h is set, the box is extended to ensure that box/h is
a multiple of 4.
This ensures that GPAW uses the desired h.
The shift applied to the structure is returned.
"""
if len(self) == 0:
return None
extr = self.extreme_positions()
# add borders
if type(border)==type([]):
b = border
else:
b = [border, border, border]
for c in range(3):
extr[0][c] -= b[c]
extr[1][c] += b[c] - extr[0][c] # shifted already
# check for multiple of 4
if h is not None:
if not hasattr(h, '__len__'):
h = np.array([h, h, h])
for c in range(3):
# apply the same as in paw.py
L = extr[1][c] # shifted already
N = max(4, int(L / h[c] / 4 + 0.5) * 4)
# correct L
dL = N * h[c] - L
# move accordingly
extr[1][c] += dL # shifted already
extr[0][c] -= dL / 2.
# move lower corner to (0, 0, 0)
shift = tuple(-1. * np.array(extr[0]))
self.translate(shift)
self.set_cell(tuple(extr[1]))
return shift
def get(self, name):
"""General get"""
attr = 'get_' + name
if hasattr(self, attr):
getattr(self, attr)(data)
elif self.data.has_key(name):
return self.data[name]
else:
return None
def set(self, name, data):
"""General set"""
attr = 'set_' + name
if hasattr(self, attr):
getattr(self, attr)(data)
else:
self.data[name] = data
def read(self, filename, format=None):
"""Read the structure from some file. The type can be given
or it will be guessed from the filename."""
self.__init__(read(filename, format=format))
return len(self)
def write(self, filename=None, format=None, repeat=None):
"""Write the structure to file.
Parameters
----------
format: string
can be given or it will be guessed from the filename
repeat: array, eg.: [1,0,1]
can be used to repeat the structure
"""
if filename is None:
if format is None:
raise RuntimeError('Please specify either filename or format.')
else:
filename = self.get_name() + '.' + format
out = self
if repeat is None:
out = self
else:
out = Cluster([])
cell = self.get_cell().diagonal()
for i in range(repeat[0] + 1):
for j in range(repeat[1] + 1):
for k in range(repeat[2] + 1):
copy = self.copy()
copy.translate(np.array([i, j, k]) * cell)
out += copy
write(filename, out, format)
|
qsnake/gpaw
|
gpaw/cluster.py
|
Python
|
gpl-3.0
| 6,110
|
[
"ASE",
"GPAW"
] |
ba97dfcead6e5c580b0d14f779994bafae54c3b7dd23be59e5ae27ffd4b61d87
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes that operate on points or vectors in 3D space.
"""
import re
import string
import warnings
from math import cos, pi, sin, sqrt
import numpy as np
from monty.json import MSONable
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.string import transformation_to_string
from pymatgen.util.typing import ArrayLike
__author__ = "Shyue Ping Ong, Shyam Dwaraknath, Matthew Horton"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
class SymmOp(MSONable):
"""
A symmetry operation in cartesian space. Consists of a rotation plus a
translation. Implementation is as an affine transformation matrix of rank 4
for efficiency. Read: http://en.wikipedia.org/wiki/Affine_transformation.
.. attribute:: affine_matrix
A 4x4 numpy.array representing the symmetry operation.
"""
def __init__(self, affine_transformation_matrix: ArrayLike, tol=0.01):
"""
Initializes the SymmOp from a 4x4 affine transformation matrix.
In general, this constructor should not be used unless you are
transferring rotations. Use the static constructors instead to
generate a SymmOp from proper rotations and translation.
Args:
affine_transformation_matrix (4x4 array): Representing an
affine transformation.
tol (float): Tolerance for determining if matrices are equal.
"""
affine_transformation_matrix = np.array(affine_transformation_matrix)
if affine_transformation_matrix.shape != (4, 4):
raise ValueError("Affine Matrix must be a 4x4 numpy array!")
self.affine_matrix = affine_transformation_matrix
self.tol = tol
@staticmethod
def from_rotation_and_translation(
rotation_matrix: ArrayLike = ((1, 0, 0), (0, 1, 0), (0, 0, 1)),
translation_vec: ArrayLike = (0, 0, 0),
tol=0.1,
):
"""
Creates a symmetry operation from a rotation matrix and a translation
vector.
Args:
rotation_matrix (3x3 array): Rotation matrix.
translation_vec (3x1 array): Translation vector.
tol (float): Tolerance to determine if rotation matrix is valid.
Returns:
SymmOp object
"""
rotation_matrix = np.array(rotation_matrix)
translation_vec = np.array(translation_vec)
if rotation_matrix.shape != (3, 3):
raise ValueError("Rotation Matrix must be a 3x3 numpy array.")
if translation_vec.shape != (3,):
raise ValueError("Translation vector must be a rank 1 numpy array " "with 3 elements.")
affine_matrix = np.eye(4)
affine_matrix[0:3][:, 0:3] = rotation_matrix
affine_matrix[0:3][:, 3] = translation_vec
return SymmOp(affine_matrix, tol)
def __eq__(self, other):
return np.allclose(self.affine_matrix, other.affine_matrix, atol=self.tol)
def __hash__(self):
return 7
def __repr__(self):
return self.__str__()
def __str__(self):
output = [
"Rot:",
str(self.affine_matrix[0:3][:, 0:3]),
"tau",
str(self.affine_matrix[0:3][:, 3]),
]
return "\n".join(output)
def operate(self, point):
"""
Apply the operation on a point.
Args:
point: Cartesian coordinate.
Returns:
Coordinates of point after operation.
"""
affine_point = np.array([point[0], point[1], point[2], 1])
return np.dot(self.affine_matrix, affine_point)[0:3]
def operate_multi(self, points):
"""
Apply the operation on a list of points.
Args:
points: List of Cartesian coordinates
Returns:
Numpy array of coordinates after operation
"""
points = np.array(points)
affine_points = np.concatenate([points, np.ones(points.shape[:-1] + (1,))], axis=-1)
return np.inner(affine_points, self.affine_matrix)[..., :-1]
def apply_rotation_only(self, vector: ArrayLike):
"""
Vectors should only be operated by the rotation matrix and not the
translation vector.
Args:
vector (3x1 array): A vector.
"""
return np.dot(self.rotation_matrix, vector)
def transform_tensor(self, tensor: np.ndarray):
"""
Applies rotation portion to a tensor. Note that tensor has to be in
full form, not the Voigt form.
Args:
tensor (numpy array): a rank n tensor
Returns:
Transformed tensor.
"""
dim = tensor.shape
rank = len(dim)
assert all([i == 3 for i in dim])
# Build einstein sum string
lc = string.ascii_lowercase
indices = lc[:rank], lc[rank : 2 * rank]
einsum_string = ",".join([a + i for a, i in zip(*indices)])
einsum_string += ",{}->{}".format(*indices[::-1])
einsum_args = [self.rotation_matrix] * rank + [tensor]
return np.einsum(einsum_string, *einsum_args)
def are_symmetrically_related(self, point_a: ArrayLike, point_b: ArrayLike, tol: float = 0.001) -> bool:
"""
Checks if two points are symmetrically related.
Args:
point_a (3x1 array): First point.
point_b (3x1 array): Second point.
tol (float): Absolute tolerance for checking distance.
Returns:
True if self.operate(point_a) == point_b or vice versa.
"""
if np.allclose(self.operate(point_a), point_b, atol=tol):
return True
if np.allclose(self.operate(point_b), point_a, atol=tol):
return True
return False
@property
def rotation_matrix(self) -> np.ndarray:
"""
A 3x3 numpy.array representing the rotation matrix.
"""
return self.affine_matrix[0:3][:, 0:3]
@property
def translation_vector(self) -> np.ndarray:
"""
A rank 1 numpy.array of dim 3 representing the translation vector.
"""
return self.affine_matrix[0:3][:, 3]
def __mul__(self, other):
"""
Returns a new SymmOp which is equivalent to apply the "other" SymmOp
followed by this one.
"""
new_matrix = np.dot(self.affine_matrix, other.affine_matrix)
return SymmOp(new_matrix)
@property
def inverse(self) -> "SymmOp":
"""
Returns inverse of transformation.
"""
invr = np.linalg.inv(self.affine_matrix)
return SymmOp(invr)
@staticmethod
def from_axis_angle_and_translation(
axis: ArrayLike, angle: float, angle_in_radians: bool = False, translation_vec: ArrayLike = (0, 0, 0)
) -> "SymmOp":
"""
Generates a SymmOp for a rotation about a given axis plus translation.
Args:
axis: The axis of rotation in cartesian space. For example,
[1, 0, 0]indicates rotation about x-axis.
angle (float): Angle of rotation.
angle_in_radians (bool): Set to True if angles are given in
radians. Or else, units of degrees are assumed.
translation_vec: A translation vector. Defaults to zero.
Returns:
SymmOp for a rotation about given axis and translation.
"""
if isinstance(axis, (tuple, list)):
axis = np.array(axis)
vec = np.array(translation_vec)
a = angle if angle_in_radians else angle * pi / 180
cosa = cos(a)
sina = sin(a)
u = axis / np.linalg.norm(axis)
r = np.zeros((3, 3))
r[0, 0] = cosa + u[0] ** 2 * (1 - cosa)
r[0, 1] = u[0] * u[1] * (1 - cosa) - u[2] * sina
r[0, 2] = u[0] * u[2] * (1 - cosa) + u[1] * sina
r[1, 0] = u[0] * u[1] * (1 - cosa) + u[2] * sina
r[1, 1] = cosa + u[1] ** 2 * (1 - cosa)
r[1, 2] = u[1] * u[2] * (1 - cosa) - u[0] * sina
r[2, 0] = u[0] * u[2] * (1 - cosa) - u[1] * sina
r[2, 1] = u[1] * u[2] * (1 - cosa) + u[0] * sina
r[2, 2] = cosa + u[2] ** 2 * (1 - cosa)
return SymmOp.from_rotation_and_translation(r, vec)
@staticmethod
def from_origin_axis_angle(
origin: ArrayLike, axis: ArrayLike, angle: float, angle_in_radians: bool = False
) -> "SymmOp":
"""
Generates a SymmOp for a rotation about a given axis through an
origin.
Args:
origin (3x1 array): The origin which the axis passes through.
axis (3x1 array): The axis of rotation in cartesian space. For
example, [1, 0, 0]indicates rotation about x-axis.
angle (float): Angle of rotation.
angle_in_radians (bool): Set to True if angles are given in
radians. Or else, units of degrees are assumed.
Returns:
SymmOp.
"""
theta = angle * pi / 180 if not angle_in_radians else angle
a = origin[0] # type: ignore
b = origin[1] # type: ignore
c = origin[2] # type: ignore
u = axis[0] # type: ignore
v = axis[1] # type: ignore
w = axis[2] # type: ignore
# Set some intermediate values.
u2 = u * u # type: ignore
v2 = v * v # type: ignore
w2 = w * w # type: ignore
cos_t = cos(theta)
sin_t = sin(theta)
l2 = u2 + v2 + w2 # type: ignore
l = sqrt(l2) # type: ignore
# Build the matrix entries element by element.
m11 = (u2 + (v2 + w2) * cos_t) / l2 # type: ignore
m12 = (u * v * (1 - cos_t) - w * l * sin_t) / l2 # type: ignore
m13 = (u * w * (1 - cos_t) + v * l * sin_t) / l2 # type: ignore
m14 = ( # type: ignore
a * (v2 + w2) # type: ignore
- u * (b * v + c * w) # type: ignore
+ (u * (b * v + c * w) - a * (v2 + w2)) * cos_t # type: ignore
+ (b * w - c * v) * l * sin_t # type: ignore
) / l2 # type: ignore
m21 = (u * v * (1 - cos_t) + w * l * sin_t) / l2 # type: ignore
m22 = (v2 + (u2 + w2) * cos_t) / l2 # type: ignore
m23 = (v * w * (1 - cos_t) - u * l * sin_t) / l2 # type: ignore
m24 = ( # type: ignore
b * (u2 + w2) # type: ignore
- v * (a * u + c * w) # type: ignore
+ (v * (a * u + c * w) - b * (u2 + w2)) * cos_t # type: ignore
+ (c * u - a * w) * l * sin_t # type: ignore
) / l2 # type: ignore
m31 = (u * w * (1 - cos_t) - v * l * sin_t) / l2 # type: ignore
m32 = (v * w * (1 - cos_t) + u * l * sin_t) / l2 # type: ignore
m33 = (w2 + (u2 + v2) * cos_t) / l2 # type: ignore
m34 = ( # type: ignore
c * (u2 + v2) # type: ignore
- w * (a * u + b * v) # type: ignore
+ (w * (a * u + b * v) - c * (u2 + v2)) * cos_t # type: ignore
+ (a * v - b * u) * l * sin_t # type: ignore
) / l2
return SymmOp(
[ # type: ignore
[m11, m12, m13, m14],
[m21, m22, m23, m24],
[m31, m32, m33, m34],
[0, 0, 0, 1],
]
)
@staticmethod
def reflection(normal: ArrayLike, origin: ArrayLike = (0, 0, 0)) -> "SymmOp":
"""
Returns reflection symmetry operation.
Args:
normal (3x1 array): Vector of the normal to the plane of
reflection.
origin (3x1 array): A point in which the mirror plane passes
through.
Returns:
SymmOp for the reflection about the plane
"""
# Normalize the normal vector first.
n = np.array(normal, dtype=float) / np.linalg.norm(normal)
u, v, w = n
translation = np.eye(4)
translation[0:3, 3] = -np.array(origin)
xx = 1 - 2 * u ** 2
yy = 1 - 2 * v ** 2
zz = 1 - 2 * w ** 2
xy = -2 * u * v
xz = -2 * u * w
yz = -2 * v * w
mirror_mat = [[xx, xy, xz, 0], [xy, yy, yz, 0], [xz, yz, zz, 0], [0, 0, 0, 1]]
if np.linalg.norm(origin) > 1e-6:
mirror_mat = np.dot(np.linalg.inv(translation), np.dot(mirror_mat, translation))
return SymmOp(mirror_mat)
@staticmethod
def inversion(origin: ArrayLike = (0, 0, 0)) -> "SymmOp":
"""
Inversion symmetry operation about axis.
Args:
origin (3x1 array): Origin of the inversion operation. Defaults
to [0, 0, 0].
Returns:
SymmOp representing an inversion operation about the origin.
"""
mat = -np.eye(4)
mat[3, 3] = 1
mat[0:3, 3] = 2 * np.array(origin)
return SymmOp(mat)
@staticmethod
def rotoreflection(axis: ArrayLike, angle: float, origin: ArrayLike = (0, 0, 0)) -> "SymmOp":
"""
Returns a roto-reflection symmetry operation
Args:
axis (3x1 array): Axis of rotation / mirror normal
angle (float): Angle in degrees
origin (3x1 array): Point left invariant by roto-reflection.
Defaults to (0, 0, 0).
Return:
Roto-reflection operation
"""
rot = SymmOp.from_origin_axis_angle(origin, axis, angle)
refl = SymmOp.reflection(axis, origin)
m = np.dot(rot.affine_matrix, refl.affine_matrix)
return SymmOp(m)
def as_dict(self) -> dict:
"""
:return: MSONAble dict.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"matrix": self.affine_matrix.tolist(),
"tolerance": self.tol,
}
def as_xyz_string(self) -> str:
"""
Returns a string of the form 'x, y, z', '-x, -y, z',
'-y+1/2, x+1/2, z+1/2', etc. Only works for integer rotation matrices
"""
# test for invalid rotation matrix
if not np.all(np.isclose(self.rotation_matrix, np.round(self.rotation_matrix))):
warnings.warn("Rotation matrix should be integer")
return transformation_to_string(self.rotation_matrix, translation_vec=self.translation_vector, delim=", ")
@staticmethod
def from_xyz_string(xyz_string: str) -> "SymmOp":
"""
Args:
xyz_string: string of the form 'x, y, z', '-x, -y, z',
'-2y+1/2, 3x+1/2, z-y+1/2', etc.
Returns:
SymmOp
"""
rot_matrix = np.zeros((3, 3))
trans = np.zeros(3)
toks = xyz_string.strip().replace(" ", "").lower().split(",")
re_rot = re.compile(r"([+-]?)([\d\.]*)/?([\d\.]*)([x-z])")
re_trans = re.compile(r"([+-]?)([\d\.]+)/?([\d\.]*)(?![x-z])")
for i, tok in enumerate(toks):
# build the rotation matrix
for m in re_rot.finditer(tok):
factor = -1.0 if m.group(1) == "-" else 1.0
if m.group(2) != "":
factor *= float(m.group(2)) / float(m.group(3)) if m.group(3) != "" else float(m.group(2))
j = ord(m.group(4)) - 120
rot_matrix[i, j] = factor
# build the translation vector
for m in re_trans.finditer(tok):
factor = -1 if m.group(1) == "-" else 1
num = float(m.group(2)) / float(m.group(3)) if m.group(3) != "" else float(m.group(2))
trans[i] = num * factor
return SymmOp.from_rotation_and_translation(rot_matrix, trans)
@classmethod
def from_dict(cls, d) -> "SymmOp":
"""
:param d: dict
:return: SymmOp from dict representation.
"""
return cls(d["matrix"], d["tolerance"])
class MagSymmOp(SymmOp):
"""
Thin wrapper around SymmOp to extend it to support magnetic symmetry
by including a time reversal operator. Magnetic symmetry is similar
to conventional crystal symmetry, except symmetry is reduced by the
addition of a time reversal operator which acts on an atom's magnetic
moment.
"""
def __init__(self, affine_transformation_matrix: ArrayLike, time_reversal: int, tol: float = 0.01):
"""
Initializes the MagSymmOp from a 4x4 affine transformation matrix
and time reversal operator.
In general, this constructor should not be used unless you are
transferring rotations. Use the static constructors instead to
generate a SymmOp from proper rotations and translation.
Args:
affine_transformation_matrix (4x4 array): Representing an
affine transformation.
time_reversal (int): 1 or -1
tol (float): Tolerance for determining if matrices are equal.
"""
SymmOp.__init__(self, affine_transformation_matrix, tol=tol)
if time_reversal not in (-1, 1):
raise Exception(
"Time reversal operator not well defined: {0}, {1}".format(time_reversal, type(time_reversal))
)
self.time_reversal = time_reversal
def __eq__(self, other):
return np.allclose(self.affine_matrix, other.affine_matrix, atol=self.tol) and (
self.time_reversal == other.time_reversal
)
def __str__(self):
return self.as_xyzt_string()
def __repr__(self):
output = [
"Rot:",
str(self.affine_matrix[0:3][:, 0:3]),
"tau",
str(self.affine_matrix[0:3][:, 3]),
"Time reversal:",
str(self.time_reversal),
]
return "\n".join(output)
def __hash__(self):
# useful for obtaining a set of unique MagSymmOps
hashable_value = tuple(self.affine_matrix.flatten()) + (self.time_reversal,)
return hashable_value.__hash__()
def operate_magmom(self, magmom):
"""
Apply time reversal operator on the magnetic moment. Note that
magnetic moments transform as axial vectors, not polar vectors.
See 'Symmetry and magnetic structures', Rodríguez-Carvajal and
Bourée for a good discussion. DOI: 10.1051/epjconf/20122200010
Args:
magmom: Magnetic moment as electronic_structure.core.Magmom
class or as list or np array-like
Returns:
Magnetic moment after operator applied as Magmom class
"""
magmom = Magmom(magmom) # type casting to handle lists as input
transformed_moment = (
self.apply_rotation_only(magmom.global_moment) * np.linalg.det(self.rotation_matrix) * self.time_reversal
)
# retains input spin axis if different from default
return Magmom.from_global_moment_and_saxis(transformed_moment, magmom.saxis)
@classmethod
def from_symmop(cls, symmop, time_reversal) -> "MagSymmOp":
"""
Initialize a MagSymmOp from a SymmOp and time reversal operator.
Args:
symmop (SymmOp): SymmOp
time_reversal (int): Time reversal operator, +1 or -1.
Returns:
MagSymmOp object
"""
magsymmop = cls(symmop.affine_matrix, time_reversal, symmop.tol)
return magsymmop
@staticmethod
def from_rotation_and_translation_and_time_reversal(
rotation_matrix: ArrayLike = ((1, 0, 0), (0, 1, 0), (0, 0, 1)),
translation_vec: ArrayLike = (0, 0, 0),
time_reversal: int = 1,
tol: float = 0.1,
) -> "MagSymmOp":
"""
Creates a symmetry operation from a rotation matrix, translation
vector and time reversal operator.
Args:
rotation_matrix (3x3 array): Rotation matrix.
translation_vec (3x1 array): Translation vector.
time_reversal (int): Time reversal operator, +1 or -1.
tol (float): Tolerance to determine if rotation matrix is valid.
Returns:
MagSymmOp object
"""
symmop = SymmOp.from_rotation_and_translation(
rotation_matrix=rotation_matrix, translation_vec=translation_vec, tol=tol
)
return MagSymmOp.from_symmop(symmop, time_reversal)
@staticmethod
def from_xyzt_string(xyzt_string: str) -> "MagSymmOp":
"""
Args:
xyz_string: string of the form 'x, y, z, +1', '-x, -y, z, -1',
'-2y+1/2, 3x+1/2, z-y+1/2, +1', etc.
Returns:
MagSymmOp object
"""
symmop = SymmOp.from_xyz_string(xyzt_string.rsplit(",", 1)[0])
try:
time_reversal = int(xyzt_string.rsplit(",", 1)[1])
except Exception:
raise Exception("Time reversal operator could not be parsed.")
return MagSymmOp.from_symmop(symmop, time_reversal)
def as_xyzt_string(self) -> str:
"""
Returns a string of the form 'x, y, z, +1', '-x, -y, z, -1',
'-y+1/2, x+1/2, z+1/2, +1', etc. Only works for integer rotation matrices
"""
xyzt_string = SymmOp.as_xyz_string(self)
return xyzt_string + ", {:+}".format(self.time_reversal)
def as_dict(self) -> dict:
"""
:return: MSONABle dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"matrix": self.affine_matrix.tolist(),
"tolerance": self.tol,
"time_reversal": self.time_reversal,
}
@classmethod
def from_dict(cls, d: dict) -> "MagSymmOp":
"""
:param d: dict
:return: MagneticSymmOp from dict representation.
"""
return cls(d["matrix"], tol=d["tolerance"], time_reversal=d["time_reversal"])
|
davidwaroquiers/pymatgen
|
pymatgen/core/operations.py
|
Python
|
mit
| 22,058
|
[
"CRYSTAL",
"pymatgen"
] |
e997da2099c7f7b618010af3acd281a9db72a56d2f74e1eea97b7e84b1efd9f9
|
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
from pycuda import gpuarray
from pylab import *
from numpy import *
from time import time
from ..model.sample_prep import Q_space
from .approximations import wavefunction_format
import numpy
import time
def loadkernelsrc(name, precision='float32', defines={}):
import os
src = readfile(os.path.join(os.path.dirname(__file__),name))
# The following are currently defined by cufloat.h/cudouble.h, so aren't
# needed here.
#defines['CUDA_KERNEL'] = 'extern "C" __global__ void'
#defines['INLINE'] = '__inline__ __host__ __device__'
defines = "\n".join(('#define %s %s'%(k,str(defines[k])))
for k in sorted(defines.keys()))
#define sin __sinf
#define cos __cosf
if precision == 'float32':
typedefs = '''
#define HAVE_CUDA
#include <lib/cufloat.h>
#define sincos __sincosf
'''
else:
typedefs = '''
#define HAVE_CUDA
#include <lib/cudouble.h>
'''
src = defines+typedefs+src
return SourceModule(src, no_extern_c=True,
include_dirs=[os.path.abspath(os.path.dirname(__file__))])
def readfile(name):
file = open(name)
txt = file.read()
file.close()
return txt
def DWBA_form(cell,lattice,beam,q,refract = True):
'''
The scattering is calculated in scatCalc because we need to open up the
possibility for qx refraction on the interpolation.
'''
if refract == True:
from scipy.interpolate import interp1d
scat = zeros(q.points, dtype = 'complex')
qvec = q.vectorize()
q.getKSpace(beam.wavelength)
qx_refract = qvec[0].repeat(q.points[1],axis=1)
qx_refract = qx_refract.repeat(q.points[2],axis=2)
qx_refract[q.kin <= 0.0] += beam.wavelength*cell.inc_sub[1,0]
qx_refract[q.kout >= 0.0] -= beam.wavelength*cell.inc_sub[1,0]
q.qx_refract = qx_refract
qxMinTemp = qx_refract.min()-3*q.q_step[0]
qxMaxTemp = qx_refract.max()+3*q.q_step[0]
#doubles the interpolation q for a more accurate interpolation
newX = arange(qxMinTemp,qxMaxTemp,q.q_step[0]/2.0)
newQ = Q_space([qxMinTemp,q.minimums[1],q.minimums[2]],
[qxMaxTemp,q.maximums[1],q.maximums[2]],
[size(newX),q.points[1],q.points[2]])
largScat = scatCalc(cell,lattice,beam,newQ)
for ii in range (size(q.q_list[1])):
for iii in range(size(q.q_list[2])):
realSplineFunc = interp1d(newQ.q_list[0],largScat.real[:,ii,iii])
imagSplineFunc = interp1d(newQ.q_list[0],largScat.imag[:,ii,iii])
interpReal = realSplineFunc(qx_refract[:,ii,iii])
interpImag = imagSplineFunc(qx_refract[:,ii,iii])
scat[:,ii,iii].real = interpReal
scat[:,ii,iii].imag = interpImag
else:
scat = scatCalc(cell,lattice,beam,q)
'''
imshow(log10(rot90(sum(((abs(scat)**2)).real,axis=1))), extent = q.getExtent(), aspect = 'auto')
show()
'''
return(scat)
def scatCalc(cell,lattice,beam,q):
'''
Math from Kentzinger et al. in Physical Review B, 77, 1044335(2008)
'''
#Front of Eq (20)
m = 1.674e-27
h_bar = 6.62607e-14
Vfac = -m/(2*pi*h_bar**2)
q.getKSpace(beam.wavelength)
scat = zeros(q.points,dtype = 'complex')
# PSI in one
# PSI in two
# PSI out one
# PSI out two
pio = [None]*cell.n[2]
pit = [None]*cell.n[2]
poo = [None]*cell.n[2]
pot = [None]*cell.n[2]
pil = [None]*cell.n[2]
pfl = [None]*cell.n[2]
q_piopoo = [None]*cell.n[2]
q_piopot = [None]*cell.n[2]
q_pitpoo = [None]*cell.n[2]
q_pitpot = [None]*cell.n[2]
x = cell.value_list[0].reshape((cell.n[0],1,1))
y = cell.value_list[1].reshape((1,cell.n[1],1))
z = cell.value_list[2].reshape((1,1,cell.n[2]))
#Averages the in-plane scattering length density and formats the new
#object as [SLD,Thickeness,Absorbtion] for each z layer
SLDArray = wavefunction_format(cell.unit, cell.step[2], absorbtion = None)
#This is the calculation of the critical edge. It is needed for the
#calculation of p.
pcl = sqrt(4*pi*SLDArray[:,0])
#The cell is originally oriented so that the the bottom of the unit cell
#is located at the origin. This flips the cell so that the stack is ordered
#in the opposite direction.
flipCell = zeros(shape(cell.unit))
for i in range(cell.n[2]):
flipCell[:,:,i] = cell.unit[:,:,shape(cell.unit)[2]-i-1]
#This calculates the residual potential by taking the difference between
#the reference potential and the actual potential
Vres = flipCell - (SLDArray[:,0]).reshape((1,1,cell.n[2]))
#This is the rho used in eq. 20. The integration is the residual potential
#relative to the reference potential.
rhoTilOverRho = Vres/(SLDArray[:,0]).reshape((1,1,cell.n[2]))
rhoTilOverRho[isnan(rhoTilOverRho)] = 0.0
# calculates the structure factor using the gaussian convolution if there
# is a lattice specified
if lattice != None:
lattice_flag = True
SF = lattice.gauss_struc_calc(q)
else:
lattice_flag = False
# Allocate space in memory for GPU output
ftwRef = numpy.zeros(size(q.q_list[2]),dtype='complex')
for i in range(size(q.q_list[0])):
print 'qx number: ', i, ' calculating'
for ii in range(size(q.q_list[1])):
#The next few lines calculate the c and d values for each layer.
#This is done by calculating the specular reflectivity and then
#tracing the final reflected intensity back into the sample.
'''
poskiWavePar = dwbaWavefunction(q.kin[i,ii,:],SLDArray)
negkfWavePar = dwbaWavefunction(-q.kout[i,ii,:],(SLDArray))
pio = poskiWavePar.c
pit = poskiWavePar.d
k_inl =poskiWavePar.kz_l
poo = negkfWavePar.c
pot = negkfWavePar.d
k_outl =negkfWavePar.kz_l
'''
kin = q.kin
kout = -q.kout
# Load CUDA source
cudamod1 = loadkernelsrc("lib/DWBA_kernel1.c")
#Grab function(s)
cudaDWBA1 = cudamod1.get_function("cudaDWBA")
crtor = gpuarray.to_gpu(rhoTilOverRho)
cSLDArray = gpuarray.to_gpu(SLDArray)
ckin = gpuarray.to_gpu(kin)
ckout = gpuarray.to_gpu(kout)
cscatout = cuda.mem_alloc(ftwRef.nbytes)
# Call DWBA function on the GPU
cudaDWBA1(crtor,
#const Real x[MAX_DIM][MAX_DIM][MAX_DIM],
#const Real y[MAX_DIM][MAX_DIM][MAX_DIM],
#const Real z[MAX_DIM][MAX_DIM][MAX_DIM],
cell.step[0], cell.step[1], cell.step[2],
cSLDArray,
ckin, ckout,
numpy.int32(size(q.q_list[0])),
numpy.int32(size(q.q_list[1])),
numpy.int32(size(q.q_list[2])),
cscatout,
block=(50,50,50), grid=(10,10))
# Copy array back from the device(GPU) to the host (CPU)
cuda.memcpy_dtoh(pio, pio)
cuda.memcpy_dtoh(pit, pit)
cuda.memcpy_dtoh(poo, poo)
cuda.memcpy_dtoh(pot, pot)
for l in range(cell.n[2]):
#Solves the equation shown after eq. 11 on page 5.
pil[l]=sqrt(asarray((q.kin[i,ii,:]**2)-(pcl[l]**2),
dtype = 'complex'))
pfl[l]=sqrt(asarray((q.kout[i,ii,:]**2)-(pcl[l]**2),
dtype = 'complex'))
#Equations directly after eq (18).
q_piopoo[l] = -pfl[l] - pil[l]
q_piopot[l] = -pfl[l] + pil[l]
q_pitpoo[l] = pfl[l] - pil[l]
q_pitpot[l] = pfl[l] + pil[l]
pil = asarray(pil)
pfl = asarray(pfl)
q_piopoo = asarray(q_piopoo)
q_piopot = asarray(q_piopot)
q_pitpoo = asarray(q_pitpoo)
q_pitpot = asarray(q_pitpot)
pio = asarray(pio)
pit = asarray(pit)
poo = asarray(poo)
pot = asarray(pot)
k_inl = asarray(k_inl)
k_outl = asarray(k_outl)
# Load CUDA source
cudamod = loadkernelsrc("lib/DWBA_kernel.c")
#Grab function(s)
cudaDWBA = cudamod.get_function("cudaDWBA_part1")
# Copy over arrays and allocate memory on the GPU
cxx = gpuarray.to_gpu(x)
cyy = gpuarray.to_gpu(y)
crtor = gpuarray.to_gpu(rhoTilOverRho)
coutput = cuda.mem_alloc(ftwRef.nbytes)
# Call DWBA function on the GPU
cudaDWBA(q.q_list[0][i], q.q_list[1][ii],
cell.step[0], cell.step[1],
numpy.int32(size(x[0])), numpy.int32(size(y[0])),
cxx, cyy, crtor,
numpy.float32(Vfac), coutput,
block=(400,1,1), grid=(1,1))
#cuda_sync()
# Copy array back from the device(GPU) to the host (CPU)
cuda.memcpy_dtoh(ftwRef, coutput)
#Eq. 19
ftwRef = ((SLDArray[:,0]).reshape((1,1,cell.n[2]))*
ftwRef.reshape((1,1,cell.n[2])))
for iii in range(size(q.q_list[2])):
ft = ftwRef.copy()
pioSel = pio[:,iii].reshape((1,1,cell.n[2]))
pitSel = pit[:,iii].reshape((1,1,cell.n[2]))
pooSel = poo[:,iii].reshape((1,1,cell.n[2]))
potSel = pot[:,iii].reshape((1,1,cell.n[2]))
q_piopoo_sel = q_piopoo[:,iii].reshape((1,1,cell.n[2]))
q_piopot_sel = q_piopot[:,iii].reshape((1,1,cell.n[2]))
q_pitpoo_sel = q_pitpoo[:,iii].reshape((1,1,cell.n[2]))
q_pitpot_sel = q_pitpot[:,iii].reshape((1,1,cell.n[2]))
pil_sel = pil[:,iii].reshape((1,1,cell.n[2]))
pfl_sel = pfl[:,iii].reshape((1,1,cell.n[2]))
#equation 15
scat_PioPoo = (pioSel * exp(1j*pil_sel*z)*ft*
exp(1j*pfl_sel*z) * pooSel)
scat_PioPot = (pioSel * exp(1j*pil_sel*z)*ft*
exp(-1j*pfl_sel*z)*potSel)
scat_PitPoo = (pitSel * exp(-1j*pil_sel*z)*ft*
exp(1j*pfl_sel*z) *pooSel)
scat_PitPot = (pitSel * exp(-1j*pil_sel*z)*ft*
exp(-1j*pfl_sel*z)* potSel)
#equation 15
# edit 7/23/2012, bbm:
# the integration over z is taken care of by eq. 17 and 18,
# giving the Laue factor -
# the mu and nu sum comes out to 1/4 * 4 * g for unpolarized
# NO - Wait - changing my mind.
#
# looks like Chris was right - the S propagator in eq. 11
# is for a wavefunction referenced to the boundary of the
# current layer, while our c and d are calculated with respect
# to z = 0 (not z=z_l), so the extra factor of e^{ikz_l} might
# be necessary.
# scat_PioPoo = (pioSel * ft * pooSel)
# scat_PioPot = (pioSel * ft * potSel)
# scat_PitPoo = (pitSel * ft * pooSel)
# scat_PitPot = (pitSel * ft * potSel)
#equation 18
# edit 7/23/12, bbm:
# scat_ was incorrectly set to = cell.step[2] for q==0 case,
# instead of multiplying (should be *= )
mask = (q_piopoo_sel != 0)
scat_PioPoo[mask] *= ((-1j / q_piopoo_sel[mask]) *
(exp(1j *q_piopoo_sel[mask] * cell.step[2]) - 1.0))
scat_PioPoo[q_piopoo == 0] *= cell.step[2]
mask = (q_piopot_sel != 0)
scat_PioPot *= ((-1j / q_piopot_sel[mask]) *
(exp(1j *q_piopot_sel[mask] * cell.step[2]) - 1.0))
scat_PioPot[q_piopot == 0] *= cell.step[2]
mask = (q_pitpoo_sel != 0)
scat_PitPoo *= ((-1j / q_pitpoo_sel[mask]) *
(exp(1j *q_pitpoo_sel[mask] * cell.step[2]) - 1.0))
scat_PitPoo[q_pitpoo == 0] *= cell.step[2]
mask = (q_pitpot_sel != 0)
scat_PitPot *= ((-1j / q_pitpot_sel[mask]) *
(exp(1j *q_pitpot_sel[mask] * cell.step[2]) - 1.0))
scat_PitPot[q_pitpot == 0] *= cell.step[2]
#Exactly equation15
scat[i,ii,iii]= sum(scat_PioPoo + scat_PioPot +
scat_PitPoo + scat_PitPot)
'''
k_spec = q.q_list[2]/2.0
dwba_spec = dwbaWavefunction(k_spec,SLDArray)
locx = q.q_list[0].searchsorted(0.0)
locy = q.q_list[1].searchsorted(0.0)
#scat[locx,locy,:] = dwba_spec.r
semilogy(q.q_list[2],(abs(dwba_spec.r)**2))
semilogy(q.q_list[2],sum((abs(scat)**2).real,axis=1)[locx+5,:])
figure()
'''
return(scat)
class dwbaWavefunction:
def __init__(self, kz, SLDArray):
self.kz = kz
self.SLDArray = SLDArray
self.layerCount = SLDArray.shape[0]
self.thickness = sum(SLDArray[1:-1,1])
SLD_inc = SLDArray[0,0]
SLD_sub = SLDArray[-1,0]
B11 = ones(shape(kz),dtype='complex')
B22 = ones(shape(kz),dtype='complex')
B21 = zeros(shape(kz),dtype='complex')
B12 = zeros(shape(kz),dtype='complex')
M11 = [None]*self.layerCount
M12 = [None]*self.layerCount
M21 = [None]*self.layerCount
M22 = [None]*self.layerCount
Bl11 = [None]*self.layerCount
Bl12 = [None]*self.layerCount
Bl21 = [None]*self.layerCount
Bl22 = [None]*self.layerCount
Bl11[0] = B11
Bl12[0] = B22
Bl21[0] = B21
Bl22[0] = B12
self.c = [None]*self.layerCount
self.d = [None]*self.layerCount
nz =[None]*self.layerCount
k0z = sqrt(asarray(kz**2 + 4 * pi * SLD_inc,dtype = 'complex'))
nz[0] = sqrt( complex(1) - 4 * pi * SLD_inc / k0z**2 )
nz[-1] = sqrt( complex(1) - 4 * pi * SLD_sub / k0z**2 )
for l in range(1, self.layerCount-1):
#leaving off the incident medium and substrate from sum
SLD,thickness,mu = self.SLDArray[l]
nz[l] = sqrt(complex(1) - 4 * pi * SLD/ k0z**2 )
kzl =( nz[l] * k0z ) # edit: BBM 02/10/2012
n = nz[l]
M11[l] = asarray(cos(kzl * thickness),dtype = 'complex')
M12[l] = asarray(1/n * sin(kzl * thickness),dtype = 'complex')
M21[l] = asarray((-n) * sin(kzl * thickness),dtype = 'complex')
M22[l] = asarray(cos(kzl * thickness),dtype = 'complex')
C1 = B11*M11[l] + B21*M12[l]
C2 = B11*M21[l] + B21*M22[l]
B11 = C1
B21 = C2
C1 = B12*M11[l] + B22*M12[l]
C2 = B12*M21[l] + B22*M22[l]
B12 = C1
B22 = C2
Bl11[l] = B11
Bl21[l] = B21
Bl12[l] = B12
Bl22[l] = B22
self.kz_l = nz * k0z
r = (B11 + (1j * nz[0] * B12) + (1/(1j * nz[-1])*(
-B21 - 1j * nz[0] * B22))) / ((-B11 + 1j * nz[0] * B12) + (
1/(1j * nz[-1])*( B21 - 1j * nz[0] * B22)))
Bl11[-1] = ones(shape(kz))
Bl12[-1] = zeros(shape(kz))
Bl21[-1] = ones(shape(kz))
Bl22[-1] = zeros(shape(kz))
self.r = r
self.t = zeros(shape(r),dtype = 'complex')
self.t[nz[-1].real != 0.0] = 1.0 + self.r[nz[-1].real != 0.0]
self.c[0] = ones(shape(kz),dtype='complex') # incident beam has intensity 1
self.d[0] = r # reflected beam has intensity |r|**2
p = asarray(1.0 + r,dtype ='complex') #psi
pp = asarray(1j * kz[0] * (1 - r),dtype='complex') #psi prime
M11[0] = ones(shape(kz),dtype='complex')
M12[0] = ones(shape(kz),dtype='complex')
M21[0] = ones(shape(kz),dtype='complex')
M22[0] = ones(shape(kz),dtype='complex')
M11[-1] = zeros(shape(kz),dtype='complex')
M12[-1] = ones(shape(kz),dtype='complex')
M21[-1] = ones(shape(kz),dtype='complex')
M22[-1] = zeros(shape(kz),dtype='complex')
z_interface = 0.0
for l in range(1,self.layerCount-1):
## this algorithm works all the way into the substrate
pForDot = copy(p)
ppForDot = copy(pp)
p = (M11[l]*pForDot) + (M12[l]*ppForDot/k0z)
pp = (k0z*M21[l]*pForDot) + (M22[l]*ppForDot)
#Fine, This is c and d
kzl =( nz[l] * k0z )
self.c[l] = (.5* exp(-1j*kzl*(z_interface))*
(p + (pp/(1j*kzl))))
self.d[l] = (.5* exp(1j*kzl*(z_interface))*
(p - (pp/(1j*kzl))))
z_interface += thickness
# fill final c,d
self.c[-1] = self.t
self.d[-1] = zeros(shape(kz),dtype='complex')
return
def cuda_partition(n):
'''
Overview:
Auto grids the thread blocks to achieve some level of calculation
efficiency.
'''
max_gx,max_gy = 65535,65535
blocksize = 32
#max_gx,max_gy = 5,65536
#blocksize = 3
block = (blocksize,1,1)
num_blocks = int((n+blocksize-1)/blocksize)
if num_blocks < max_gx:
grid = (num_blocks,1)
else:
gx = max_gx
gy = (num_blocks + max_gx - 1) / max_gx
if gy >= max_gy: raise ValueError("vector is too large")
grid = (gx,gy)
#print "block",block,"grid",grid
#print "waste",block[0]*block[1]*block[2]*grid[0]*grid[1] - n
return dict(block=block,grid=grid)
|
reflectometry/osrefl
|
osrefl/theory/DWBA_Cuda.py
|
Python
|
bsd-3-clause
| 18,701
|
[
"Gaussian"
] |
abe18cee881fdda79cc7e5fad2a8c863174c60ba7cc535d956172239aba2695d
|
# pymol -c generate2.py
from chempy import io
from glob import glob
from copy import deepcopy
# backbone-independent rotamers
lines = io.lst.fromFile("bbdep02.May.lib")
chi = { 'CYS' :
{ '1': ('N' , 'CA' , 'CB' , 'SG' ) },
'ASP' :
{ '1': ('N' , 'CA' , 'CB' , 'CG' ),
'2': ('CA' , 'CB' , 'CG' , 'OD1'), },
'GLU' :
{ '1': ('N' , 'CA' , 'CB' , 'CG' ),
'2': ('CA' , 'CB' , 'CG' , 'CD' ),
'3': ('CB' , 'CG' , 'CD' , 'OE1'), },
'PHE' :
{ '1': ('N' , 'CA' , 'CB' , 'CG' ),
'2': ('CA' , 'CB' , 'CG' , 'CD1'), },
'HIS' :
{ '1': ('N' , 'CA' , 'CB' , 'CG' ),
'2': ('CA' , 'CB' , 'CG' , 'ND1'), },
'ILE' :
{ '1': ('N' , 'CA' , 'CB' , 'CG1'),
'2': ('CA' , 'CB' , 'CG1', 'CD1+CD'), },
'LYS' :
{ '1': ('N' , 'CA' , 'CB' ,'CG' ),
'2': ('CA' , 'CB' , 'CG' ,'CD' ),
'3': ('CB' , 'CG' , 'CD' ,'CE' ),
'4': ('CG' , 'CD' , 'CE' ,'NZ' ), },
'LEU' :
{ '1': ('N' , 'CA' , 'CB' , 'CG' ),
'2': ('CA' , 'CB' , 'CG' , 'CD1'), },
'MET' :
{ '1': ('N' , 'CA' , 'CB' ,'CG' ),
'2': ('CA' , 'CB' , 'CG' ,'SD' ),
'3': ('CB' , 'CG' , 'SD' ,'CE' ), },
'ASN' :
{ '1': ('N' , 'CA' , 'CB' , 'CG' ),
'2': ('CA' , 'CB' , 'CG' , 'OD1'), },
'PRO' :
{ '1': ('N' , 'CA' , 'CB' , 'CG' ),
'2': ('CA' , 'CB' , 'CG' , 'CD' ), },
'GLN' :
{ '1': ('N' , 'CA' , 'CB' , 'CG' ),
'2': ('CA' , 'CB' , 'CG' , 'CD' ),
'3': ('CB' , 'CG' , 'CD' , 'OE1'), },
'ARG' :
{ '1': ('N' , 'CA' , 'CB' ,'CG' ),
'2': ('CA' , 'CB' , 'CG' ,'CD' ),
'3': ('CB' , 'CG' , 'CD' ,'NE' ),
'4': ('CG' , 'CD' , 'NE' ,'CZ' ), },
'SER' :
{ '1': ('N' , 'CA' , 'CB' , 'OG' ), },
'THR' :
{ '1': ('N' , 'CA' , 'CB' , 'OG1'), },
'VAL' :
{ '1': ('N' , 'CA' , 'CB' , 'CG1'), },
'TRP' :
{ '1': ('N' , 'CA' , 'CB' , 'CG' ),
'2': ('CA' , 'CB' , 'CG' , 'CD1'), },
'TYR' :
{ '1': ('N' , 'CA' , 'CB' , 'CG' ),
'2': ('CA' , 'CB' , 'CG' , 'CD1'), },
}
total = {}
output = {}
# first, total the number of rotamers for each residue
for line in lines:
field = line.split()
if len(field)>6:
resn = field[0]
phi = int(field[1])
psi = int(field[2])
key = (resn, phi, psi)
total[key] = total.get(key,0) + float(field[8])
# now build library, including frequency value
dd = {}
for line in lines:
field = line.split()
len_field = len(field)
if len_field>11:
resn = field[0]
phi = int(field[1])
psi = int(field[2])
if (((psi==(60*(psi/60))) and (phi==(60*(phi/60)))) or
((psi==(20*(psi/20))) and (phi==(20*(phi/20))) and (int(field[3])>0)) or
((psi==(10*(psi/10))) and (phi==(10*(phi/10))) and (int(field[3])>=250))):
if not dd.has_key((phi,psi)):
# print phi,psi
dd[(phi,psi)]=1
key = (resn, phi, psi)
list = output.get(key,[])
fsum = float(field[8])
dict = {}
if total[key]>0.0:
freq = fsum/total[key]
else:
freq = 0.0
if freq>0.01:
dict['FREQ'] = freq
len_chi = len(chi[resn].keys())
if len_chi>0:
chi1 = float(field[9])
dict[chi[resn]['1']] = chi1
if len_chi>1:
chi2 = float(field[10])
dict[chi[resn]['2']] = chi2
if len_chi>2:
chi3 = float(field[11])
dict[chi[resn]['3']] = chi3
if len_chi>3:
chi4 = float(field[12])
dict[chi[resn]['4']] = chi4
list.append((freq,dict))
output[key] = list
# sort by priority
for key in output.keys():
list = output[key]
list.sort()
list.reverse()
output[key] = map(lambda x:x[1],list)
#for key in output.keys():
#
# if key[0] == 'HIS':
# output[( 'HIE', ) + key[1:]] = output[key]
# output[( 'HID', ) + key[1:]] = output[key]
# output[( 'HIP', ) + key[1:]] = output[key]
# elif key[0] == 'CYS':
# output[( 'CYX', ) + key[1:]] = output[key]
io.pkl.toFile(output,"sc_bb_dep.pkl")
|
gratefulfrog/lib
|
python/pymol/pymol_path/data/chempy/sidechains/sc_bb_dep.py
|
Python
|
gpl-2.0
| 4,774
|
[
"ChemPy",
"PyMOL"
] |
7ad01b9aa365b07f0f296367e873c8cb72b181b89967e6678acc85e99172f743
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from TestHarnessTestCase import TestHarnessTestCase
class TestHarnessTester(TestHarnessTestCase):
def testRequiredApps(self):
"""
Test that the required_apps check works
"""
output = self.runTests('--no-color', '-i', 'required_apps')
self.assertRegexpMatches(output.decode('utf-8'), r'test_harness\.bad_app.*? \[APP DOESNOTEXIST NOT REGISTERED IN EXECUTABLE\] SKIP')
self.assertRegexpMatches(output.decode('utf-8'), r'test_harness\.good_app.*? OK')
self.checkStatus(output.decode('utf-8'), passed=1, skipped=1)
|
nuclear-wizard/moose
|
python/TestHarness/tests/test_RequiredApps.py
|
Python
|
lgpl-2.1
| 881
|
[
"MOOSE"
] |
82c091c79ba13999a5c658e99407886fe259efaeea503d2c96e7b6d167fe7257
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from io import BytesIO
from threading import Lock
import contextlib
import itertools
import os.path
import pickle
import shutil
import tempfile
import unittest
import sys
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray import (Dataset, DataArray, open_dataset, open_dataarray,
open_mfdataset, backends, save_mfdataset)
from xarray.backends.common import robust_getitem
from xarray.backends.netCDF4_ import _extract_nc4_variable_encoding
from xarray.core import indexing
from xarray.core.pycompat import iteritems, PY2, ExitStack, basestring
from . import (TestCase, requires_scipy, requires_netCDF4, requires_pydap,
requires_scipy_or_netCDF4, requires_dask, requires_h5netcdf,
requires_pynio, requires_pathlib, has_netCDF4, has_scipy,
assert_allclose, flaky, network, requires_rasterio,
assert_identical)
from .test_dataset import create_test_data
try:
import netCDF4 as nc4
except ImportError:
pass
try:
import dask.array as da
except ImportError:
pass
try:
from pathlib import Path
except ImportError:
try:
from pathlib2 import Path
except ImportError:
pass
ON_WINDOWS = sys.platform == 'win32'
def open_example_dataset(name, *args, **kwargs):
return open_dataset(os.path.join(os.path.dirname(__file__), 'data', name),
*args, **kwargs)
def create_masked_and_scaled_data():
x = np.array([np.nan, np.nan, 10, 10.1, 10.2])
encoding = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1), 'dtype': 'i2'}
return Dataset({'x': ('t', x, {}, encoding)})
def create_encoded_masked_and_scaled_data():
attributes = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1)}
return Dataset({'x': ('t', [-1, -1, 0, 1, 2], attributes)})
def create_unsigned_masked_scaled_data():
encoding = {'_FillValue': 255, '_Unsigned': 'true', 'dtype': 'i1',
'add_offset': 10, 'scale_factor': np.float32(0.1)}
x = np.array([10.0, 10.1, 22.7, 22.8, np.nan])
return Dataset({'x': ('t', x, {}, encoding)})
def create_encoded_unsigned_masked_scaled_data():
# These are values as written to the file: the _FillValue will
# be represented in the signed form.
attributes = {'_FillValue': -1, '_Unsigned': 'true',
'add_offset': 10, 'scale_factor': np.float32(0.1)}
# Create signed data corresponding to [0, 1, 127, 128, 255] unsigned
sb = np.asarray([0, 1, 127, -128, -1], dtype='i1')
return Dataset({'x': ('t', sb, attributes)})
def create_boolean_data():
attributes = {'units': '-'}
return Dataset({'x': ('t', [True, False, False, True], attributes)})
class TestCommon(TestCase):
def test_robust_getitem(self):
class UnreliableArrayFailure(Exception):
pass
class UnreliableArray(object):
def __init__(self, array, failures=1):
self.array = array
self.failures = failures
def __getitem__(self, key):
if self.failures > 0:
self.failures -= 1
raise UnreliableArrayFailure
return self.array[key]
array = UnreliableArray([0])
with self.assertRaises(UnreliableArrayFailure):
array[0]
self.assertEqual(array[0], 0)
actual = robust_getitem(array, 0, catch=UnreliableArrayFailure,
initial_delay=0)
self.assertEqual(actual, 0)
class Only32BitTypes(object):
pass
class DatasetIOTestCases(object):
autoclose = False
def create_store(self):
raise NotImplementedError
def roundtrip(self, data, **kwargs):
raise NotImplementedError
def test_zero_dimensional_variable(self):
expected = create_test_data()
expected['float_var'] = ([], 1.0e9, {'units': 'units of awesome'})
expected['string_var'] = ([], np.array('foobar', dtype='S'))
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_write_store(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
# we need to cf decode the store because it has time and
# non-dimension coordinates
with xr.decode_cf(store) as actual:
self.assertDatasetAllClose(expected, actual)
def check_dtypes_roundtripped(self, expected, actual):
for k in expected:
expected_dtype = expected.variables[k].dtype
if (isinstance(self, Only32BitTypes) and
expected_dtype == 'int64'):
# downcast
expected_dtype = np.dtype('int32')
actual_dtype = actual.variables[k].dtype
# TODO: check expected behavior for string dtypes more carefully
string_kinds = {'O', 'S', 'U'}
assert (expected_dtype == actual_dtype or
(expected_dtype.kind in string_kinds and
actual_dtype.kind in string_kinds))
def test_roundtrip_test_data(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
self.check_dtypes_roundtripped(expected, actual)
self.assertDatasetAllClose(expected, actual)
def test_load(self):
expected = create_test_data()
@contextlib.contextmanager
def assert_loads(vars=None):
if vars is None:
vars = expected
with self.roundtrip(expected) as actual:
for k, v in actual.variables.items():
# IndexVariables are eagerly loaded into memory
self.assertEqual(v._in_memory, k in actual.dims)
yield actual
for k, v in actual.variables.items():
if k in vars:
self.assertTrue(v._in_memory)
self.assertDatasetAllClose(expected, actual)
with self.assertRaises(AssertionError):
# make sure the contextmanager works!
with assert_loads() as ds:
pass
with assert_loads() as ds:
ds.load()
with assert_loads(['var1', 'dim1', 'dim2']) as ds:
ds['var1'].load()
# verify we can read data even after closing the file
with self.roundtrip(expected) as ds:
actual = ds.load()
self.assertDatasetAllClose(expected, actual)
def test_dataset_compute(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
# Test Dataset.compute()
for k, v in actual.variables.items():
# IndexVariables are eagerly cached
self.assertEqual(v._in_memory, k in actual.dims)
computed = actual.compute()
for k, v in actual.variables.items():
self.assertEqual(v._in_memory, k in actual.dims)
for v in computed.variables.values():
self.assertTrue(v._in_memory)
self.assertDatasetAllClose(expected, actual)
self.assertDatasetAllClose(expected, computed)
def test_pickle(self):
expected = Dataset({'foo': ('x', [42])})
with self.roundtrip(
expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped:
raw_pickle = pickle.dumps(roundtripped)
# windows doesn't like opening the same file twice
roundtripped.close()
unpickled_ds = pickle.loads(raw_pickle)
self.assertDatasetIdentical(expected, unpickled_ds)
def test_pickle_dataarray(self):
expected = Dataset({'foo': ('x', [42])})
with self.roundtrip(
expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped:
unpickled_array = pickle.loads(pickle.dumps(roundtripped['foo']))
self.assertDatasetIdentical(expected['foo'], unpickled_array)
def test_dataset_caching(self):
expected = Dataset({'foo': ('x', [5, 6, 7])})
with self.roundtrip(expected) as actual:
assert isinstance(actual.foo.variable._data,
indexing.MemoryCachedArray)
assert not actual.foo.variable._in_memory
actual.foo.values # cache
assert actual.foo.variable._in_memory
with self.roundtrip(expected, open_kwargs={'cache': False}) as actual:
assert isinstance(actual.foo.variable._data,
indexing.CopyOnWriteArray)
assert not actual.foo.variable._in_memory
actual.foo.values # no caching
assert not actual.foo.variable._in_memory
def test_roundtrip_None_variable(self):
expected = Dataset({None: (('x', 'y'), [[0, 1], [2, 3]])})
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_object_dtype(self):
floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object)
floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object)
letters = np.array(['ab', 'cdef', 'g'], dtype=object)
letters_nans = np.array(['ab', 'cdef', np.nan], dtype=object)
all_nans = np.array([np.nan, np.nan], dtype=object)
original = Dataset({'floats': ('a', floats),
'floats_nans': ('a', floats_nans),
'letters': ('b', letters),
'letters_nans': ('b', letters_nans),
'all_nans': ('c', all_nans),
'nan': ([], np.nan)})
expected = original.copy(deep=True)
if isinstance(self, Only32BitTypes):
# for netCDF3 tests, expect the results to come back as characters
expected['letters_nans'] = expected['letters_nans'].astype('S')
expected['letters'] = expected['letters'].astype('S')
with self.roundtrip(original) as actual:
try:
self.assertDatasetIdentical(expected, actual)
except AssertionError:
# Most stores use '' for nans in strings, but some don't
# first try the ideal case (where the store returns exactly)
# the original Dataset), then try a more realistic case.
# ScipyDataTest, NetCDF3ViaNetCDF4DataTest and NetCDF4DataTest
# all end up using this case.
expected['letters_nans'][-1] = ''
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_string_data(self):
expected = Dataset({'x': ('t', ['ab', 'cdef'])})
with self.roundtrip(expected) as actual:
if isinstance(self, Only32BitTypes):
expected['x'] = expected['x'].astype('S')
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_datetime_data(self):
times = pd.to_datetime(['2000-01-01', '2000-01-02', 'NaT'])
expected = Dataset({'t': ('t', times), 't0': times[0]})
kwds = {'encoding': {'t0': {'units': 'days since 1950-01-01'}}}
with self.roundtrip(expected, save_kwargs=kwds) as actual:
self.assertDatasetIdentical(expected, actual)
self.assertEquals(actual.t0.encoding['units'],
'days since 1950-01-01')
def test_roundtrip_timedelta_data(self):
time_deltas = pd.to_timedelta(['1h', '2h', 'NaT'])
expected = Dataset({'td': ('td', time_deltas), 'td0': time_deltas[0]})
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_float64_data(self):
expected = Dataset({'x': ('y', np.array([1.0, 2.0, np.pi],
dtype='float64'))})
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_example_1_netcdf(self):
expected = open_example_dataset('example_1.nc')
with self.roundtrip(expected) as actual:
# we allow the attributes to differ since that
# will depend on the encoding used. For example,
# without CF encoding 'actual' will end up with
# a dtype attribute.
self.assertDatasetEqual(expected, actual)
def test_roundtrip_coordinates(self):
original = Dataset({'foo': ('x', [0, 1])},
{'x': [2, 3], 'y': ('a', [42]), 'z': ('x', [4, 5])})
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(original, actual)
expected = original.drop('foo')
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_boolean_dtype(self):
original = create_boolean_data()
self.assertEqual(original['x'].dtype, 'bool')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(original, actual)
self.assertEqual(actual['x'].dtype, 'bool')
def test_orthogonal_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
indexers = {'dim1': np.arange(3), 'dim2': np.arange(4),
'dim3': np.arange(5)}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
self.assertDatasetAllClose(expected, actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.isel(**indexers)
self.assertDatasetAllClose(expected, actual)
class CFEncodedDataTest(DatasetIOTestCases):
def test_roundtrip_strings_with_fill_value(self):
values = np.array(['ab', 'cdef', np.nan], dtype=object)
encoding = {'_FillValue': np.string_('X'), 'dtype': np.dtype('S1')}
original = Dataset({'x': ('t', values, {}, encoding)})
expected = original.copy(deep=True)
expected['x'][:2] = values[:2].astype('S')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(expected, actual)
original = Dataset({'x': ('t', values, {}, {'_FillValue': '\x00'})})
if not isinstance(self, Only32BitTypes):
# these stores can save unicode strings
expected = original.copy(deep=True)
if isinstance(self, BaseNetCDF4Test):
# netCDF4 can't keep track of an empty _FillValue for VLEN
# variables
expected['x'][-1] = ''
elif (isinstance(self, (NetCDF3ViaNetCDF4DataTest,
NetCDF4ClassicViaNetCDF4DataTest))
or (has_netCDF4 and
(type(self) is GenericNetCDFDataTest or
type(self) is GenericNetCDFDataTestAutocloseTrue))):
# netCDF4 can't keep track of an empty _FillValue for nc3, either:
# https://github.com/Unidata/netcdf4-python/issues/273
expected['x'][-1] = np.string_('')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(expected, actual)
def test_unsigned_roundtrip_mask_and_scale(self):
decoded = create_unsigned_masked_scaled_data()
encoded = create_encoded_unsigned_masked_scaled_data()
with self.roundtrip(decoded) as actual:
for k in decoded.variables:
self.assertEqual(decoded.variables[k].dtype,
actual.variables[k].dtype)
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(decoded,
open_kwargs=dict(decode_cf=False)) as actual:
for k in encoded.variables:
self.assertEqual(encoded.variables[k].dtype,
actual.variables[k].dtype)
self.assertDatasetAllClose(encoded, actual)
with self.roundtrip(encoded,
open_kwargs=dict(decode_cf=False)) as actual:
for k in encoded.variables:
self.assertEqual(encoded.variables[k].dtype,
actual.variables[k].dtype)
self.assertDatasetAllClose(encoded, actual)
# make sure roundtrip encoding didn't change the
# original dataset.
self.assertDatasetIdentical(
encoded, create_encoded_unsigned_masked_scaled_data())
with self.roundtrip(encoded) as actual:
for k in decoded.variables:
self.assertEqual(decoded.variables[k].dtype,
actual.variables[k].dtype)
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(encoded,
open_kwargs=dict(decode_cf=False)) as actual:
for k in encoded.variables:
self.assertEqual(encoded.variables[k].dtype,
actual.variables[k].dtype)
self.assertDatasetAllClose(encoded, actual)
def test_roundtrip_mask_and_scale(self):
decoded = create_masked_and_scaled_data()
encoded = create_encoded_masked_and_scaled_data()
with self.roundtrip(decoded) as actual:
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(decoded,
open_kwargs=dict(decode_cf=False)) as actual:
# TODO: this assumes that all roundtrips will first
# encode. Is that something we want to test for?
self.assertDatasetAllClose(encoded, actual)
with self.roundtrip(encoded,
open_kwargs=dict(decode_cf=False)) as actual:
self.assertDatasetAllClose(encoded, actual)
# make sure roundtrip encoding didn't change the
# original dataset.
self.assertDatasetIdentical(encoded,
create_encoded_masked_and_scaled_data())
with self.roundtrip(encoded) as actual:
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(encoded,
open_kwargs=dict(decode_cf=False)) as actual:
self.assertDatasetAllClose(encoded, actual)
def test_coordinates_encoding(self):
def equals_latlon(obj):
return obj == 'lat lon' or obj == 'lon lat'
original = Dataset({'temp': ('x', [0, 1]), 'precip': ('x', [0, -1])},
{'lat': ('x', [2, 3]), 'lon': ('x', [4, 5])})
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(actual, original)
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds['temp'].attrs['coordinates']))
self.assertTrue(equals_latlon(ds['precip'].attrs['coordinates']))
self.assertNotIn('coordinates', ds.attrs)
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
modified = original.drop(['temp', 'precip'])
with self.roundtrip(modified) as actual:
self.assertDatasetIdentical(actual, modified)
with create_tmp_file() as tmp_file:
modified.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds.attrs['coordinates']))
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
def test_roundtrip_endian(self):
ds = Dataset({'x': np.arange(3, 10, dtype='>i2'),
'y': np.arange(3, 20, dtype='<i4'),
'z': np.arange(3, 30, dtype='=i8'),
'w': ('x', np.arange(3, 10, dtype=np.float))})
with self.roundtrip(ds) as actual:
# technically these datasets are slightly different,
# one hold mixed endian data (ds) the other should be
# all big endian (actual). assertDatasetIdentical
# should still pass though.
self.assertDatasetIdentical(ds, actual)
if type(self) is NetCDF4DataTest:
ds['z'].encoding['endian'] = 'big'
with self.assertRaises(NotImplementedError):
with self.roundtrip(ds) as actual:
pass
def test_invalid_dataarray_names_raise(self):
te = (TypeError, 'string or None')
ve = (ValueError, 'string must be length 1 or')
data = np.random.random((2, 2))
da = xr.DataArray(data)
for name, e in zip([0, (4, 5), True, ''], [te, te, te, ve]):
ds = Dataset({name: da})
with self.assertRaisesRegexp(*e):
with self.roundtrip(ds) as actual:
pass
def test_encoding_kwarg(self):
ds = Dataset({'x': ('y', np.arange(10.0))})
kwargs = dict(encoding={'x': {'dtype': 'f4'}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.x.encoding['dtype'], 'f4')
self.assertEqual(ds.x.encoding, {})
kwargs = dict(encoding={'x': {'foo': 'bar'}})
with self.assertRaisesRegexp(ValueError, 'unexpected encoding'):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
kwargs = dict(encoding={'x': 'foo'})
with self.assertRaisesRegexp(ValueError, 'must be castable'):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
kwargs = dict(encoding={'invalid': {}})
with self.assertRaises(KeyError):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
ds = Dataset({'t': pd.date_range('2000-01-01', periods=3)})
units = 'days since 1900-01-01'
kwargs = dict(encoding={'t': {'units': units}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.t.encoding['units'], units)
self.assertDatasetIdentical(actual, ds)
def test_default_fill_value(self):
# Test default encoding for float:
ds = Dataset({'x': ('y', np.arange(10.0))})
kwargs = dict(encoding={'x': {'dtype': 'f4'}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.x.encoding['_FillValue'],
np.nan)
self.assertEqual(ds.x.encoding, {})
# Test default encoding for int:
ds = Dataset({'x': ('y', np.arange(10.0))})
kwargs = dict(encoding={'x': {'dtype': 'int16'}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertTrue('_FillValue' not in actual.x.encoding)
self.assertEqual(ds.x.encoding, {})
# Test default encoding for implicit int:
ds = Dataset({'x': ('y', np.arange(10, dtype='int16'))})
with self.roundtrip(ds) as actual:
self.assertTrue('_FillValue' not in actual.x.encoding)
self.assertEqual(ds.x.encoding, {})
def test_encoding_same_dtype(self):
ds = Dataset({'x': ('y', np.arange(10.0, dtype='f4'))})
kwargs = dict(encoding={'x': {'dtype': 'f4'}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.x.encoding['dtype'], 'f4')
self.assertEqual(ds.x.encoding, {})
_counter = itertools.count()
@contextlib.contextmanager
def create_tmp_file(suffix='.nc', allow_cleanup_failure=False):
temp_dir = tempfile.mkdtemp()
path = os.path.join(temp_dir, 'temp-%s%s' % (next(_counter), suffix))
try:
yield path
finally:
try:
shutil.rmtree(temp_dir)
except OSError:
if not allow_cleanup_failure:
raise
@contextlib.contextmanager
def create_tmp_files(nfiles, suffix='.nc', allow_cleanup_failure=False):
with ExitStack() as stack:
files = [stack.enter_context(create_tmp_file(suffix,
allow_cleanup_failure))
for apath in np.arange(nfiles)]
yield files
@requires_netCDF4
class BaseNetCDF4Test(CFEncodedDataTest):
def test_open_group(self):
# Create a netCDF file with a dataset stored within a group
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as rootgrp:
foogrp = rootgrp.createGroup('foo')
ds = foogrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo', '/foo', 'foo/', '/foo/':
with open_dataset(tmp_file, group=group) as actual:
self.assertVariableEqual(actual['x'], expected['x'])
# check that missing group raises appropriate exception
with self.assertRaises(IOError):
open_dataset(tmp_file, group='bar')
with self.assertRaisesRegexp(ValueError, 'must be a string'):
open_dataset(tmp_file, group=(1, 2, 3))
def test_open_subgroup(self):
# Create a netCDF file with a dataset stored within a group within a group
with create_tmp_file() as tmp_file:
rootgrp = nc4.Dataset(tmp_file, 'w')
foogrp = rootgrp.createGroup('foo')
bargrp = foogrp.createGroup('bar')
ds = bargrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
rootgrp.close()
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo/bar', '/foo/bar', 'foo/bar/', '/foo/bar/':
with open_dataset(tmp_file, group=group) as actual:
self.assertVariableEqual(actual['x'], expected['x'])
def test_write_groups(self):
data1 = create_test_data()
data2 = data1 * 2
with create_tmp_file() as tmp_file:
data1.to_netcdf(tmp_file, group='data/1')
data2.to_netcdf(tmp_file, group='data/2', mode='a')
with open_dataset(tmp_file, group='data/1') as actual1:
self.assertDatasetIdentical(data1, actual1)
with open_dataset(tmp_file, group='data/2') as actual2:
self.assertDatasetIdentical(data2, actual2)
def test_roundtrip_character_array(self):
with create_tmp_file() as tmp_file:
values = np.array([['a', 'b', 'c'], ['d', 'e', 'f']], dtype='S')
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 2)
nc.createDimension('string3', 3)
v = nc.createVariable('x', np.dtype('S1'), ('x', 'string3'))
v[:] = values
values = np.array(['abc', 'def'], dtype='S')
expected = Dataset({'x': ('x', values)})
with open_dataset(tmp_file) as actual:
self.assertDatasetIdentical(expected, actual)
# regression test for #157
with self.roundtrip(actual) as roundtripped:
self.assertDatasetIdentical(expected, roundtripped)
def test_default_to_char_arrays(self):
data = Dataset({'x': np.array(['foo', 'zzzz'], dtype='S')})
with self.roundtrip(data) as actual:
self.assertDatasetIdentical(data, actual)
self.assertEqual(actual['x'].dtype, np.dtype('S4'))
def test_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
expected = Dataset()
time = pd.date_range('1999-01-05', periods=10)
encoding = {'units': units, 'dtype': np.dtype('int32')}
expected['time'] = ('time', time, {}, encoding)
with open_dataset(tmp_file) as actual:
self.assertVariableEqual(actual['time'], expected['time'])
actual_encoding = dict((k, v) for k, v in
iteritems(actual['time'].encoding)
if k in expected['time'].encoding)
self.assertDictEqual(actual_encoding, expected['time'].encoding)
def test_dump_encodings(self):
# regression test for #709
ds = Dataset({'x': ('y', np.arange(10.0))})
kwargs = dict(encoding={'x': {'zlib': True}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertTrue(actual.x.encoding['zlib'])
def test_dump_and_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
with open_dataset(tmp_file) as xarray_dataset:
with create_tmp_file() as tmp_file2:
xarray_dataset.to_netcdf(tmp_file2)
with nc4.Dataset(tmp_file2, 'r') as ds:
self.assertEqual(ds.variables['time'].getncattr('units'), units)
self.assertArrayEqual(ds.variables['time'], np.arange(10) + 4)
def test_compression_encoding(self):
data = create_test_data()
data['var2'].encoding.update({'zlib': True,
'chunksizes': (5, 5),
'fletcher32': True,
'original_shape': data.var2.shape})
with self.roundtrip(data) as actual:
for k, v in iteritems(data['var2'].encoding):
self.assertEqual(v, actual['var2'].encoding[k])
# regression test for #156
expected = data.isel(dim1=0)
with self.roundtrip(expected) as actual:
self.assertDatasetEqual(expected, actual)
def test_mask_and_scale(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('t', 5)
nc.createVariable('x', 'int16', ('t',), fill_value=-1)
v = nc.variables['x']
v.set_auto_maskandscale(False)
v.add_offset = 10
v.scale_factor = 0.1
v[:] = np.array([-1, -1, 0, 1, 2])
# first make sure netCDF4 reads the masked and scaled data
# correctly
with nc4.Dataset(tmp_file, mode='r') as nc:
expected = np.ma.array([-1, -1, 10, 10.1, 10.2],
mask=[True, True, False, False, False])
actual = nc.variables['x'][:]
self.assertArrayEqual(expected, actual)
# now check xarray
with open_dataset(tmp_file) as ds:
expected = create_masked_and_scaled_data()
self.assertDatasetIdentical(expected, ds)
def test_0dimensional_variable(self):
# This fix verifies our work-around to this netCDF4-python bug:
# https://github.com/Unidata/netcdf4-python/pull/220
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
v = nc.createVariable('x', 'int16')
v[...] = 123
with open_dataset(tmp_file) as ds:
expected = Dataset({'x': ((), 123)})
self.assertDatasetIdentical(expected, ds)
def test_already_open_dataset(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
v = nc.createVariable('x', 'int')
v[...] = 42
nc = nc4.Dataset(tmp_file, mode='r')
with backends.NetCDF4DataStore(nc, autoclose=False) as store:
with open_dataset(store) as ds:
expected = Dataset({'x': ((), 42)})
self.assertDatasetIdentical(expected, ds)
def test_variable_len_strings(self):
with create_tmp_file() as tmp_file:
values = np.array(['foo', 'bar', 'baz'], dtype=object)
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 3)
v = nc.createVariable('x', str, ('x',))
v[:] = values
expected = Dataset({'x': ('x', values)})
for kwargs in [{}, {'decode_cf': True}]:
with open_dataset(tmp_file, **kwargs) as actual:
self.assertDatasetIdentical(expected, actual)
@requires_netCDF4
class NetCDF4DataTest(BaseNetCDF4Test, TestCase):
autoclose = False
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(tmp_file, mode='w') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with create_tmp_file(
allow_cleanup_failure=allow_cleanup_failure) as tmp_file:
data.to_netcdf(tmp_file, **save_kwargs)
with open_dataset(tmp_file,
autoclose=self.autoclose, **open_kwargs) as ds:
yield ds
def test_variable_order(self):
# doesn't work with scipy or h5py :(
ds = Dataset()
ds['a'] = 1
ds['z'] = 2
ds['b'] = 3
ds.coords['c'] = 4
with self.roundtrip(ds) as actual:
self.assertEqual(list(ds), list(actual))
def test_unsorted_index_raises(self):
# should be fixed in netcdf4 v1.2.1
random_data = np.random.random(size=(4, 6))
dim0 = [0, 1, 2, 3]
dim1 = [0, 2, 1, 3, 5, 4] # We will sort this in a later step
da = xr.DataArray(data=random_data, dims=('dim0', 'dim1'),
coords={'dim0': dim0, 'dim1': dim1}, name='randovar')
ds = da.to_dataset()
with self.roundtrip(ds) as ondisk:
inds = np.argsort(dim1)
ds2 = ondisk.isel(dim1=inds)
try:
print(ds2.randovar.values) # should raise IndexError in netCDF4
except IndexError as err:
self.assertIn('first by calling .load', str(err))
class NetCDF4DataStoreAutocloseTrue(NetCDF4DataTest):
autoclose = True
@requires_netCDF4
@requires_dask
class NetCDF4ViaDaskDataTest(NetCDF4DataTest):
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with NetCDF4DataTest.roundtrip(
self, data, save_kwargs, open_kwargs,
allow_cleanup_failure) as ds:
yield ds.chunk()
def test_unsorted_index_raises(self):
# Skip when using dask because dask rewrites indexers to getitem,
# dask first pulls items by block.
pass
def test_dataset_caching(self):
# caching behavior differs for dask
pass
class NetCDF4ViaDaskDataTestAutocloseTrue(NetCDF4ViaDaskDataTest):
autoclose = True
@requires_scipy
class ScipyInMemoryDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, 'w')
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
serialized = data.to_netcdf(**save_kwargs)
with open_dataset(serialized, engine='scipy',
autoclose=self.autoclose, **open_kwargs) as ds:
yield ds
def test_to_netcdf_explicit_engine(self):
# regression test for GH1321
Dataset({'foo': 42}).to_netcdf(engine='scipy')
@pytest.mark.skipif(PY2, reason='cannot pickle BytesIO on Python 2')
def test_bytesio_pickle(self):
data = Dataset({'foo': ('x', [1, 2, 3])})
fobj = BytesIO(data.to_netcdf())
with open_dataset(fobj, autoclose=self.autoclose) as ds:
unpickled = pickle.loads(pickle.dumps(ds))
self.assertDatasetIdentical(unpickled, data)
class ScipyInMemoryDataTestAutocloseTrue(ScipyInMemoryDataTest):
autoclose = True
@requires_scipy
class ScipyFileObjectTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, 'w')
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with create_tmp_file() as tmp_file:
with open(tmp_file, 'wb') as f:
data.to_netcdf(f, **save_kwargs)
with open(tmp_file, 'rb') as f:
with open_dataset(f, engine='scipy', **open_kwargs) as ds:
yield ds
@pytest.mark.skip(reason='cannot pickle file objects')
def test_pickle(self):
pass
@pytest.mark.skip(reason='cannot pickle file objects')
def test_pickle_dataarray(self):
pass
@requires_scipy
class ScipyFilePathTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.ScipyDataStore(tmp_file, mode='w') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with create_tmp_file(
allow_cleanup_failure=allow_cleanup_failure) as tmp_file:
data.to_netcdf(tmp_file, engine='scipy', **save_kwargs)
with open_dataset(tmp_file, engine='scipy',
autoclose=self.autoclose, **open_kwargs) as ds:
yield ds
def test_array_attrs(self):
ds = Dataset(attrs={'foo': [[1, 2], [3, 4]]})
with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'):
with self.roundtrip(ds) as roundtripped:
pass
def test_roundtrip_example_1_netcdf_gz(self):
if sys.version_info[:2] < (2, 7):
with self.assertRaisesRegexp(ValueError,
'gzipped netCDF not supported'):
open_example_dataset('example_1.nc.gz')
else:
with open_example_dataset('example_1.nc.gz') as expected:
with open_example_dataset('example_1.nc') as actual:
self.assertDatasetIdentical(expected, actual)
def test_netcdf3_endianness(self):
# regression test for GH416
expected = open_example_dataset('bears.nc', engine='scipy')
for var in expected.values():
self.assertTrue(var.dtype.isnative)
@requires_netCDF4
def test_nc4_scipy(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w', format='NETCDF4') as rootgrp:
rootgrp.createGroup('foo')
with self.assertRaisesRegexp(TypeError, 'pip install netcdf4'):
open_dataset(tmp_file, engine='scipy')
class ScipyFilePathTestAutocloseTrue(ScipyFilePathTest):
autoclose = True
@requires_netCDF4
class NetCDF3ViaNetCDF4DataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(
tmp_file, mode='w', format='NETCDF3_CLASSIC') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with create_tmp_file(
allow_cleanup_failure=allow_cleanup_failure) as tmp_file:
data.to_netcdf(tmp_file, format='NETCDF3_CLASSIC',
engine='netcdf4', **save_kwargs)
with open_dataset(tmp_file, engine='netcdf4',
autoclose=self.autoclose, **open_kwargs) as ds:
yield ds
class NetCDF3ViaNetCDF4DataTestAutocloseTrue(NetCDF3ViaNetCDF4DataTest):
autoclose = True
@requires_netCDF4
class NetCDF4ClassicViaNetCDF4DataTest(CFEncodedDataTest, Only32BitTypes,
TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(
tmp_file, mode='w', format='NETCDF4_CLASSIC') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with create_tmp_file(
allow_cleanup_failure=allow_cleanup_failure) as tmp_file:
data.to_netcdf(tmp_file, format='NETCDF4_CLASSIC',
engine='netcdf4', **save_kwargs)
with open_dataset(tmp_file, engine='netcdf4',
autoclose=self.autoclose, **open_kwargs) as ds:
yield ds
class NetCDF4ClassicViaNetCDF4DataTestAutocloseTrue(
NetCDF4ClassicViaNetCDF4DataTest):
autoclose = True
@requires_scipy_or_netCDF4
class GenericNetCDFDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
# verify that we can read and write netCDF3 files as long as we have scipy
# or netCDF4-python installed
def test_write_store(self):
# there's no specific store to test here
pass
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with create_tmp_file(
allow_cleanup_failure=allow_cleanup_failure) as tmp_file:
data.to_netcdf(tmp_file, format='netcdf3_64bit', **save_kwargs)
with open_dataset(tmp_file,
autoclose=self.autoclose, **open_kwargs) as ds:
yield ds
def test_engine(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'unrecognized engine'):
data.to_netcdf('foo.nc', engine='foobar')
with self.assertRaisesRegexp(ValueError, 'invalid engine'):
data.to_netcdf(engine='netcdf4')
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with self.assertRaisesRegexp(ValueError, 'unrecognized engine'):
open_dataset(tmp_file, engine='foobar')
netcdf_bytes = data.to_netcdf()
with self.assertRaisesRegexp(ValueError, 'can only read'):
open_dataset(BytesIO(netcdf_bytes), engine='foobar')
def test_cross_engine_read_write_netcdf3(self):
data = create_test_data()
valid_engines = set()
if has_netCDF4:
valid_engines.add('netcdf4')
if has_scipy:
valid_engines.add('scipy')
for write_engine in valid_engines:
for format in ['NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format=format,
engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file,
engine=read_engine) as actual:
# hack to allow test to work:
# coord comes back as DataArray rather than coord, and so
# need to loop through here rather than in the test
# function (or we get recursion)
[assert_allclose(data[k].variable, actual[k].variable)
for k in data]
def test_encoding_unlimited_dims(self):
ds = Dataset({'x': ('y', np.arange(10.0))})
with self.roundtrip(ds,
save_kwargs=dict(unlimited_dims=['y'])) as actual:
self.assertEqual(actual.encoding['unlimited_dims'], set('y'))
self.assertDatasetEqual(ds, actual)
ds.encoding = {'unlimited_dims': ['y']}
with self.roundtrip(ds) as actual:
self.assertEqual(actual.encoding['unlimited_dims'], set('y'))
self.assertDatasetEqual(ds, actual)
class GenericNetCDFDataTestAutocloseTrue(GenericNetCDFDataTest):
autoclose = True
@requires_h5netcdf
@requires_netCDF4
class H5NetCDFDataTest(BaseNetCDF4Test, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
yield backends.H5NetCDFStore(tmp_file, 'w')
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with create_tmp_file(
allow_cleanup_failure=allow_cleanup_failure) as tmp_file:
data.to_netcdf(tmp_file, engine='h5netcdf', **save_kwargs)
with open_dataset(tmp_file, engine='h5netcdf',
autoclose=self.autoclose, **open_kwargs) as ds:
yield ds
def test_orthogonal_indexing(self):
# doesn't work for h5py (without using dask as an intermediate layer)
pass
def test_complex(self):
expected = Dataset({'x': ('y', np.ones(5) + 1j * np.ones(5))})
with self.roundtrip(expected) as actual:
self.assertDatasetEqual(expected, actual)
@pytest.mark.xfail(reason='https://github.com/pydata/xarray/issues/535')
def test_cross_engine_read_write_netcdf4(self):
# Drop dim3, because its labels include strings. These appear to be
# not properly read with python-netCDF4, which converts them into
# unicode instead of leaving them as bytes.
data = create_test_data().drop('dim3')
data.attrs['foo'] = 'bar'
valid_engines = ['netcdf4', 'h5netcdf']
for write_engine in valid_engines:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file, engine=read_engine) as actual:
self.assertDatasetIdentical(data, actual)
def test_read_byte_attrs_as_unicode(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as nc:
nc.foo = b'bar'
with open_dataset(tmp_file) as actual:
expected = Dataset(attrs={'foo': 'bar'})
self.assertDatasetIdentical(expected, actual)
def test_encoding_unlimited_dims(self):
ds = Dataset({'x': ('y', np.arange(10.0))})
ds.encoding = {'unlimited_dims': ['y']}
with create_tmp_file() as tmp_file:
with pytest.warns(UserWarning):
ds.to_netcdf(tmp_file, engine='h5netcdf')
with pytest.warns(UserWarning):
ds.to_netcdf(tmp_file, engine='h5netcdf', unlimited_dims=['y'])
# tests pending h5netcdf fix
@pytest.mark.xfail
class H5NetCDFDataTestAutocloseTrue(H5NetCDFDataTest):
autoclose = True
class OpenMFDatasetManyFilesTest(TestCase):
def validate_open_mfdataset_autoclose(self, engine, nfiles=10):
randdata = np.random.randn(nfiles)
original = Dataset({'foo': ('x', randdata)})
# test standard open_mfdataset approach with too many files
with create_tmp_files(nfiles) as tmpfiles:
for readengine in engine:
writeengine = (readengine if readengine != 'pynio'
else 'netcdf4')
# split into multiple sets of temp files
for ii in original.x.values:
subds = original.isel(x=slice(ii, ii+1))
subds.to_netcdf(tmpfiles[ii], engine=writeengine)
# check that calculation on opened datasets works properly
ds = open_mfdataset(tmpfiles, engine=readengine,
autoclose=True)
self.assertAllClose(ds.x.sum().values, (nfiles*(nfiles-1))/2)
self.assertAllClose(ds.foo.sum().values, np.sum(randdata))
self.assertAllClose(ds.sum().foo.values, np.sum(randdata))
ds.close()
def validate_open_mfdataset_large_num_files(self, engine):
self.validate_open_mfdataset_autoclose(engine, nfiles=2000)
@requires_dask
@requires_netCDF4
def test_1_autoclose_netcdf4(self):
self.validate_open_mfdataset_autoclose(engine=['netcdf4'])
@requires_dask
@requires_scipy
def test_2_autoclose_scipy(self):
self.validate_open_mfdataset_autoclose(engine=['scipy'])
@requires_dask
@requires_pynio
def test_3_autoclose_pynio(self):
self.validate_open_mfdataset_autoclose(engine=['pynio'])
# use of autoclose=True with h5netcdf broken because of
# probable h5netcdf error
@requires_dask
@requires_h5netcdf
@pytest.mark.xfail
def test_4_autoclose_h5netcdf(self):
self.validate_open_mfdataset_autoclose(engine=['h5netcdf'])
# These tests below are marked as flaky (and skipped by default) because
# they fail sometimes on Travis-CI, for no clear reason.
@requires_dask
@requires_netCDF4
@flaky
@pytest.mark.slow
def test_1_open_large_num_files_netcdf4(self):
self.validate_open_mfdataset_large_num_files(engine=['netcdf4'])
@requires_dask
@requires_scipy
@flaky
@pytest.mark.slow
def test_2_open_large_num_files_scipy(self):
self.validate_open_mfdataset_large_num_files(engine=['scipy'])
@requires_dask
@requires_pynio
@flaky
@pytest.mark.slow
def test_3_open_large_num_files_pynio(self):
self.validate_open_mfdataset_large_num_files(engine=['pynio'])
# use of autoclose=True with h5netcdf broken because of
# probable h5netcdf error
@requires_dask
@requires_h5netcdf
@flaky
@pytest.mark.xfail
@pytest.mark.slow
def test_4_open_large_num_files_h5netcdf(self):
self.validate_open_mfdataset_large_num_files(engine=['h5netcdf'])
@requires_dask
@requires_scipy
@requires_netCDF4
class DaskTest(TestCase, DatasetIOTestCases):
@contextlib.contextmanager
def create_store(self):
yield Dataset()
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
yield data.chunk()
def test_roundtrip_datetime_data(self):
# Override method in DatasetIOTestCases - remove not applicable save_kwds
times = pd.to_datetime(['2000-01-01', '2000-01-02', 'NaT'])
expected = Dataset({'t': ('t', times), 't0': times[0]})
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_write_store(self):
# Override method in DatasetIOTestCases - not applicable to dask
pass
def test_dataset_caching(self):
expected = Dataset({'foo': ('x', [5, 6, 7])})
with self.roundtrip(expected) as actual:
assert not actual.foo.variable._in_memory
actual.foo.values # no caching
assert not actual.foo.variable._in_memory
def test_open_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2],
autoclose=self.autoclose) as actual:
self.assertIsInstance(actual.foo.variable.data, da.Array)
self.assertEqual(actual.foo.variable.data.chunks,
((5, 5),))
self.assertDatasetAllClose(original, actual)
with open_mfdataset([tmp1, tmp2], chunks={'x': 3},
autoclose=self.autoclose) as actual:
self.assertEqual(actual.foo.variable.data.chunks,
((3, 2, 3, 2),))
with self.assertRaisesRegexp(IOError, 'no files to open'):
open_mfdataset('foo-bar-baz-*.nc', autoclose=self.autoclose)
@requires_pathlib
def test_open_mfdataset_pathlib(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2],
autoclose=self.autoclose) as actual:
self.assertDatasetAllClose(original, actual)
def test_attrs_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.attrs['test1'] = 'foo'
ds2.attrs['test2'] = 'bar'
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2]) as actual:
# presumes that attributes inherited from
# first dataset loaded
self.assertEqual(actual.test1, ds1.test1)
# attributes from ds2 are not retained, e.g.,
with self.assertRaisesRegexp(AttributeError,
'no attribute'):
actual.test2
def test_preprocess_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
def preprocess(ds):
return ds.assign_coords(z=0)
expected = preprocess(original)
with open_mfdataset(tmp, preprocess=preprocess,
autoclose=self.autoclose) as actual:
self.assertDatasetIdentical(expected, actual)
def test_save_mfdataset_roundtrip(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
datasets = [original.isel(x=slice(5)),
original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset([tmp1, tmp2],
autoclose=self.autoclose) as actual:
self.assertDatasetIdentical(actual, original)
def test_save_mfdataset_invalid(self):
ds = Dataset()
with self.assertRaisesRegexp(ValueError, 'cannot use mode'):
save_mfdataset([ds, ds], ['same', 'same'])
with self.assertRaisesRegexp(ValueError, 'same length'):
save_mfdataset([ds, ds], ['only one path'])
@requires_pathlib
def test_save_mfdataset_pathlib_roundtrip(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
datasets = [original.isel(x=slice(5)),
original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset([tmp1, tmp2],
autoclose=self.autoclose) as actual:
self.assertDatasetIdentical(actual, original)
def test_open_and_do_math(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset(tmp, autoclose=self.autoclose) as ds:
actual = 1.0 * ds
self.assertDatasetAllClose(original, actual)
def test_open_mfdataset_concat_dim_none(self):
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
data = Dataset({'x': 0})
data.to_netcdf(tmp1)
Dataset({'x': np.nan}).to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2], concat_dim=None,
autoclose=self.autoclose) as actual:
self.assertDatasetIdentical(data, actual)
def test_open_dataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp, chunks={'x': 5}) as actual:
self.assertIsInstance(actual.foo.variable.data, da.Array)
self.assertEqual(actual.foo.variable.data.chunks, ((5, 5),))
self.assertDatasetIdentical(original, actual)
with open_dataset(tmp, chunks=5) as actual:
self.assertDatasetIdentical(original, actual)
with open_dataset(tmp) as actual:
self.assertIsInstance(actual.foo.variable.data, np.ndarray)
self.assertDatasetIdentical(original, actual)
def test_dask_roundtrip(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
chunks = {'dim1': 4, 'dim2': 4, 'dim3': 4, 'time': 10}
with open_dataset(tmp, chunks=chunks) as dask_ds:
self.assertDatasetIdentical(data, dask_ds)
with create_tmp_file() as tmp2:
dask_ds.to_netcdf(tmp2)
with open_dataset(tmp2) as on_disk:
self.assertDatasetIdentical(data, on_disk)
def test_deterministic_names(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
with open_mfdataset(tmp, autoclose=self.autoclose) as ds:
original_names = dict((k, v.data.name)
for k, v in ds.data_vars.items())
with open_mfdataset(tmp, autoclose=self.autoclose) as ds:
repeat_names = dict((k, v.data.name)
for k, v in ds.data_vars.items())
for var_name, dask_name in original_names.items():
self.assertIn(var_name, dask_name)
self.assertEqual(dask_name[:13], 'open_dataset-')
self.assertEqual(original_names, repeat_names)
def test_dataarray_compute(self):
# Test DataArray.compute() on dask backend.
# The test for Dataset.compute() is already in DatasetIOTestCases;
# however dask is the only tested backend which supports DataArrays
actual = DataArray([1,2]).chunk()
computed = actual.compute()
self.assertFalse(actual._in_memory)
self.assertTrue(computed._in_memory)
self.assertDataArrayAllClose(actual, computed)
class DaskTestAutocloseTrue(DaskTest):
autoclose = True
@network
@requires_scipy_or_netCDF4
@requires_pydap
class PydapTest(TestCase):
@contextlib.contextmanager
def create_datasets(self, **kwargs):
url = 'http://test.opendap.org/opendap/hyrax/data/nc/bears.nc'
actual = open_dataset(url, engine='pydap', **kwargs)
with open_example_dataset('bears.nc') as expected:
# don't check attributes since pydap doesn't serialize them
# correctly also skip the "bears" variable since the test DAP
# server incorrectly concatenates it.
actual = actual.drop('bears')
expected = expected.drop('bears')
yield actual, expected
def test_cmp_local_file(self):
with self.create_datasets() as (actual, expected):
self.assertDatasetEqual(actual, expected)
# global attributes should be global attributes on the dataset
self.assertNotIn('NC_GLOBAL', actual.attrs)
self.assertIn('history', actual.attrs)
with self.create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(l=2), expected.isel(l=2))
with self.create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(i=0, j=-1),
expected.isel(i=0, j=-1))
with self.create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(j=slice(1, 2)),
expected.isel(j=slice(1, 2)))
@requires_dask
def test_dask(self):
with self.create_datasets(chunks={'j': 2}) as (actual, expected):
self.assertDatasetEqual(actual, expected)
@requires_scipy
@requires_pynio
class TestPyNio(CFEncodedDataTest, Only32BitTypes, TestCase):
def test_write_store(self):
# pynio is read-only for now
pass
def test_orthogonal_indexing(self):
# pynio also does not support list-like indexing
pass
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with create_tmp_file(
allow_cleanup_failure=allow_cleanup_failure) as tmp_file:
data.to_netcdf(tmp_file, engine='scipy', **save_kwargs)
with open_dataset(tmp_file, engine='pynio',
autoclose=self.autoclose, **open_kwargs) as ds:
yield ds
def test_weakrefs(self):
example = Dataset({'foo': ('x', np.arange(5.0))})
expected = example.rename({'foo': 'bar', 'x': 'y'})
with create_tmp_file() as tmp_file:
example.to_netcdf(tmp_file, engine='scipy')
on_disk = open_dataset(tmp_file, engine='pynio')
actual = on_disk.rename({'foo': 'bar', 'x': 'y'})
del on_disk # trigger garbage collection
self.assertDatasetIdentical(actual, expected)
class TestPyNioAutocloseTrue(TestPyNio):
autoclose = True
@requires_rasterio
class TestRasterio(TestCase):
def test_serialization_utm(self):
import rasterio
from rasterio.transform import from_origin
# Create a geotiff file in utm proj
with create_tmp_file(suffix='.tif') as tmp_file:
# data
nx, ny, nz = 4, 3, 3
data = np.arange(nx*ny*nz,
dtype=rasterio.float32).reshape(nz, ny, nx)
transform = from_origin(5000, 80000, 1000, 2000.)
with rasterio.open(
tmp_file, 'w',
driver='GTiff', height=ny, width=nx, count=nz,
crs={'units': 'm', 'no_defs': True, 'ellps': 'WGS84',
'proj': 'utm', 'zone': 18},
transform=transform,
dtype=rasterio.float32) as s:
s.write(data)
dx, dy = s.res[0], -s.res[1]
# Tests
expected = DataArray(data, dims=('band', 'y', 'x'),
coords={
'band': [1, 2, 3],
'y': -np.arange(ny) * 2000 + 80000 + dy/2,
'x': np.arange(nx) * 1000 + 5000 + dx/2,
})
with xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert 'crs' in rioda.attrs
assert isinstance(rioda.attrs['crs'], basestring)
assert 'res' in rioda.attrs
assert isinstance(rioda.attrs['res'], tuple)
assert 'is_tiled' in rioda.attrs
assert isinstance(rioda.attrs['is_tiled'], np.uint8)
assert 'transform' in rioda.attrs
assert isinstance(rioda.attrs['transform'], tuple)
# Write it to a netcdf and read again (roundtrip)
with create_tmp_file(suffix='.nc') as tmp_nc_file:
rioda.to_netcdf(tmp_nc_file)
with xr.open_dataarray(tmp_nc_file) as ncds:
assert_identical(rioda, ncds)
def test_serialization_platecarree(self):
import rasterio
from rasterio.transform import from_origin
# Create a geotiff file in latlong proj
with create_tmp_file(suffix='.tif') as tmp_file:
# data
nx, ny = 8, 10
data = np.arange(80, dtype=rasterio.float32).reshape(ny, nx)
transform = from_origin(1, 2, 0.5, 2.)
with rasterio.open(
tmp_file, 'w',
driver='GTiff', height=ny, width=nx, count=1,
crs='+proj=latlong',
transform=transform,
dtype=rasterio.float32) as s:
s.write(data, indexes=1)
dx, dy = s.res[0], -s.res[1]
# Tests
expected = DataArray(data[np.newaxis, ...],
dims=('band', 'y', 'x'),
coords={'band': [1],
'y': -np.arange(ny)*2 + 2 + dy/2,
'x': np.arange(nx)*0.5 + 1 + dx/2,
})
with xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert 'crs' in rioda.attrs
assert isinstance(rioda.attrs['crs'], basestring)
assert 'res' in rioda.attrs
assert isinstance(rioda.attrs['res'], tuple)
assert 'is_tiled' in rioda.attrs
assert isinstance(rioda.attrs['is_tiled'], np.uint8)
assert 'transform' in rioda.attrs
assert isinstance(rioda.attrs['transform'], tuple)
# Write it to a netcdf and read again (roundtrip)
with create_tmp_file(suffix='.nc') as tmp_nc_file:
rioda.to_netcdf(tmp_nc_file)
with xr.open_dataarray(tmp_nc_file) as ncds:
assert_identical(rioda, ncds)
def test_indexing(self):
import rasterio
from rasterio.transform import from_origin
# Create a geotiff file in latlong proj
with create_tmp_file(suffix='.tif') as tmp_file:
# data
nx, ny, nz = 8, 10, 3
data = np.arange(nx*ny*nz,
dtype=rasterio.float32).reshape(nz, ny, nx)
transform = from_origin(1, 2, 0.5, 2.)
with rasterio.open(
tmp_file, 'w',
driver='GTiff', height=ny, width=nx, count=nz,
crs='+proj=latlong',
transform=transform,
dtype=rasterio.float32) as s:
s.write(data)
dx, dy = s.res[0], -s.res[1]
# ref
expected = DataArray(data, dims=('band', 'y', 'x'),
coords={'x': (np.arange(nx)*0.5 + 1) + dx/2,
'y': (-np.arange(ny)*2 + 2) + dy/2,
'band': [1, 2, 3]})
with xr.open_rasterio(tmp_file, cache=False) as actual:
# tests
# assert_allclose checks all data + coordinates
assert_allclose(actual, expected)
# Slicing
ex = expected.isel(x=slice(2, 5), y=slice(5, 7))
ac = actual.isel(x=slice(2, 5), y=slice(5, 7))
assert_allclose(ac, ex)
ex = expected.isel(band=slice(1, 2), x=slice(2, 5),
y=slice(5, 7))
ac = actual.isel(band=slice(1, 2), x=slice(2, 5),
y=slice(5, 7))
assert_allclose(ac, ex)
# Selecting lists of bands is fine
ex = expected.isel(band=[1, 2])
ac = actual.isel(band=[1, 2])
assert_allclose(ac, ex)
ex = expected.isel(band=[0, 2])
ac = actual.isel(band=[0, 2])
assert_allclose(ac, ex)
# but on x and y only windowed operations are allowed, more
# exotic slicing should raise an error
err_msg = 'not valid on rasterio'
with self.assertRaisesRegexp(IndexError, err_msg):
actual.isel(x=[2, 4], y=[1, 3]).values
with self.assertRaisesRegexp(IndexError, err_msg):
actual.isel(x=[4, 2]).values
with self.assertRaisesRegexp(IndexError, err_msg):
actual.isel(x=slice(5, 2, -1)).values
# Integer indexing
ex = expected.isel(band=1)
ac = actual.isel(band=1)
assert_allclose(ac, ex)
ex = expected.isel(x=1, y=2)
ac = actual.isel(x=1, y=2)
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=1, y=2)
ac = actual.isel(band=0, x=1, y=2)
assert_allclose(ac, ex)
# Mixed
ex = actual.isel(x=slice(2), y=slice(2))
ac = actual.isel(x=[0, 1], y=[0, 1])
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=1, y=slice(5, 7))
ac = actual.isel(band=0, x=1, y=slice(5, 7))
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=slice(2, 5), y=2)
ac = actual.isel(band=0, x=slice(2, 5), y=2)
assert_allclose(ac, ex)
# One-element lists
ex = expected.isel(band=[0], x=slice(2, 5), y=[2])
ac = actual.isel(band=[0], x=slice(2, 5), y=[2])
assert_allclose(ac, ex)
def test_caching(self):
import rasterio
from rasterio.transform import from_origin
# Create a geotiff file in latlong proj
with create_tmp_file(suffix='.tif') as tmp_file:
# data
nx, ny, nz = 8, 10, 3
data = np.arange(nx*ny*nz,
dtype=rasterio.float32).reshape(nz, ny, nx)
transform = from_origin(1, 2, 0.5, 2.)
with rasterio.open(
tmp_file, 'w',
driver='GTiff', height=ny, width=nx, count=nz,
crs='+proj=latlong',
transform=transform,
dtype=rasterio.float32) as s:
s.write(data)
dx, dy = s.res[0], -s.res[1]
# ref
expected = DataArray(data, dims=('band', 'y', 'x'),
coords={'x': (np.arange(nx)*0.5 + 1) + dx/2,
'y': (-np.arange(ny)*2 + 2) + dy/2,
'band': [1, 2, 3]})
# Cache is the default
with xr.open_rasterio(tmp_file) as actual:
# Without cache an error is raised
err_msg = 'not valid on rasterio'
with self.assertRaisesRegexp(IndexError, err_msg):
actual.isel(x=[2, 4]).values
# This should cache everything
assert_allclose(actual, expected)
# once cached, non-windowed indexing should become possible
ac = actual.isel(x=[2, 4])
ex = expected.isel(x=[2, 4])
assert_allclose(ac, ex)
@requires_dask
def test_chunks(self):
import rasterio
from rasterio.transform import from_origin
# Create a geotiff file in latlong proj
with create_tmp_file(suffix='.tif') as tmp_file:
# data
nx, ny, nz = 8, 10, 3
data = np.arange(nx*ny*nz,
dtype=rasterio.float32).reshape(nz, ny, nx)
transform = from_origin(1, 2, 0.5, 2.)
with rasterio.open(
tmp_file, 'w',
driver='GTiff', height=ny, width=nx, count=nz,
crs='+proj=latlong',
transform=transform,
dtype=rasterio.float32) as s:
s.write(data)
dx, dy = s.res[0], -s.res[1]
# Chunk at open time
with xr.open_rasterio(tmp_file, chunks=(1, 2, 2)) as actual:
import dask.array as da
self.assertIsInstance(actual.data, da.Array)
assert 'open_rasterio' in actual.data.name
# ref
expected = DataArray(data, dims=('band', 'y', 'x'),
coords={'x': np.arange(nx)*0.5 + 1 + dx/2,
'y': -np.arange(ny)*2 + 2 + dy/2,
'band': [1, 2, 3]})
# do some arithmetic
ac = actual.mean()
ex = expected.mean()
assert_allclose(ac, ex)
ac = actual.sel(band=1).mean(dim='x')
ex = expected.sel(band=1).mean(dim='x')
assert_allclose(ac, ex)
class TestEncodingInvalid(TestCase):
def test_extract_nc4_variable_encoding(self):
var = xr.Variable(('x',), [1, 2, 3], {}, {'foo': 'bar'})
with self.assertRaisesRegexp(ValueError, 'unexpected encoding'):
_extract_nc4_variable_encoding(var, raise_on_invalid=True)
var = xr.Variable(('x',), [1, 2, 3], {}, {'chunking': (2, 1)})
encoding = _extract_nc4_variable_encoding(var)
self.assertEqual({}, encoding)
def test_extract_h5nc_encoding(self):
# not supported with h5netcdf (yet)
var = xr.Variable(('x',), [1, 2, 3], {},
{'least_sigificant_digit': 2})
with self.assertRaisesRegexp(ValueError, 'unexpected encoding'):
_extract_nc4_variable_encoding(var, raise_on_invalid=True)
class MiscObject:
pass
@requires_netCDF4
class TestValidateAttrs(TestCase):
def test_validating_attrs(self):
def new_dataset():
return Dataset({'data': ('y', np.arange(10.0))},
{'y': np.arange(10)})
def new_dataset_and_dataset_attrs():
ds = new_dataset()
return ds, ds.attrs
def new_dataset_and_data_attrs():
ds = new_dataset()
return ds, ds.data.attrs
def new_dataset_and_coord_attrs():
ds = new_dataset()
return ds, ds.coords['y'].attrs
for new_dataset_and_attrs in [new_dataset_and_dataset_attrs,
new_dataset_and_data_attrs,
new_dataset_and_coord_attrs]:
ds, attrs = new_dataset_and_attrs()
attrs[123] = 'test'
with self.assertRaisesRegexp(TypeError, 'Invalid name for attr'):
ds.to_netcdf('test.nc')
ds, attrs = new_dataset_and_attrs()
attrs[MiscObject()] = 'test'
with self.assertRaisesRegexp(TypeError, 'Invalid name for attr'):
ds.to_netcdf('test.nc')
ds, attrs = new_dataset_and_attrs()
attrs[''] = 'test'
with self.assertRaisesRegexp(ValueError, 'Invalid name for attr'):
ds.to_netcdf('test.nc')
# This one should work
ds, attrs = new_dataset_and_attrs()
attrs['test'] = 'test'
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = {'a': 5}
with self.assertRaisesRegexp(TypeError, 'Invalid value for attr'):
ds.to_netcdf('test.nc')
ds, attrs = new_dataset_and_attrs()
attrs['test'] = MiscObject()
with self.assertRaisesRegexp(TypeError, 'Invalid value for attr'):
ds.to_netcdf('test.nc')
ds, attrs = new_dataset_and_attrs()
attrs['test'] = 5
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = 3.14
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = [1, 2, 3, 4]
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = (1.9, 2.5)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = np.arange(5)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = np.arange(12).reshape(3, 4)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = 'This is a string'
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = ''
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = np.arange(12).reshape(3, 4)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
@requires_scipy_or_netCDF4
class TestDataArrayToNetCDF(TestCase):
def test_dataarray_to_netcdf_no_name(self):
original_da = DataArray(np.arange(12).reshape((3, 4)))
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
self.assertDataArrayIdentical(original_da, loaded_da)
def test_dataarray_to_netcdf_with_name(self):
original_da = DataArray(np.arange(12).reshape((3, 4)),
name='test')
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
self.assertDataArrayIdentical(original_da, loaded_da)
def test_dataarray_to_netcdf_coord_name_clash(self):
original_da = DataArray(np.arange(12).reshape((3, 4)),
dims=['x', 'y'],
name='x')
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
self.assertDataArrayIdentical(original_da, loaded_da)
def test_open_dataarray_options(self):
data = DataArray(
np.arange(5), coords={'y': ('x', range(5))}, dims=['x'])
with create_tmp_file() as tmp:
data.to_netcdf(tmp)
expected = data.drop('y')
with open_dataarray(tmp, drop_variables=['y']) as loaded:
self.assertDataArrayIdentical(expected, loaded)
@requires_pathlib
def test_dataarray_to_netcdf_no_name_pathlib(self):
original_da = DataArray(np.arange(12).reshape((3, 4)))
with create_tmp_file() as tmp:
tmp = Path(tmp)
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
self.assertDataArrayIdentical(original_da, loaded_da)
|
jhamman/xray
|
xarray/tests/test_backends.py
|
Python
|
apache-2.0
| 81,552
|
[
"NetCDF"
] |
4d5674fdf22048c73d73e598deb446650ae7e631083d0f7f395f1c2afd9a1b2f
|
#!/usr/bin/env python
import ts
import matplotlib.pyplot as plt
import numpy as np
import mdtraj as md
from argparse import ArgumentParser
parser = ArgumentParser( description = 'Calculate Mutual Information')
#
# INPUT FILES
#
parser.add_argument("-f","--traj",dest="traj",action="store",type=str,default=None,help="Input Trajectory",required=True,metavar="TRAJ FILE")
parser.add_argument("-t","--top",dest="top",action="store",type=str,default=None,help="Input Topology",required=True,metavar="TOPOL FILE")
#
# OUTPUT FILES
#
parser.add_argument("-o","--out",dest="out",action="store",type=str,default=None,required=True,help="Output File Name",metavar="DAT FILE")
#
# VAR ARGUMENTS
#
parser.add_argument("-s","--stride",dest="stride",action="store",type=int,default=1,help="time", metavar="INTEGER")
#parser.add_argument("-d","--ndim",dest="ndim",action="store",type=int,default=1,help="nuber of dimensions", metavar="INTEGER")
parser.add_argument("-n","--nbins",dest="nbins",action="store",type=int ,default=10,help="number of bins", metavar="INTEGER")
parser.add_argument("-b","--opt",dest="opt",action="store_true",default=False,help="toggle bins optimization")
parser.add_argument("-p","--plot",dest="plot",action="store_true",default=False,help="toggle auto-saving matrix plot")
#
options = parser.parse_args()
f_traj = options.traj
f_top = options.top
f_out = options.out
stride = options.stride
t = md.load(f_traj,top=f_top,stride=stride)
n_fr = len(t)
Ca = t.top.select('name CA')
aver_str = np.average(t.xyz[:,Ca], axis=0)
dat = np.swapaxes(t.xyz[:,Ca] - aver_str,1,2)
RMSF = np.sqrt(np.average(np.sum(dat,axis=1)**2,axis=0))
del(t)
DATA= ts.TimeSer(dat,n_fr,dim=3,nbins=options.nbins,reshape=False)
DATA.calc_bins(opt=options.opt)
T, D = DATA.transfer_entropy_omp()
fig0=plt.figure()
plt.plot(RMSF)
fig = plt.figure()
ax = fig.add_subplot(111)
mat = ax.matshow(D)
fig.colorbar(mat)
plt.show()
#tx = ax.get_xticks().astype(int)
#ty = ax.get_yticks().astype(int)
#ax.set_xticklabels(ticks[tx])
#ax.set_yticklabels(ticks[ty])
if options.plot:
fig.savefig(f_out.split('.')[0]+".svg",format='svg')
np.savetxt(f_out.split('.')[0]+".dat",D)
quit()
|
sherpaman/MolToolPy
|
bin/calc_transfer_info_traj_fluct.py
|
Python
|
gpl-2.0
| 2,175
|
[
"MDTraj"
] |
585134ac84626d210363e6a807fd3f5aa4250cca1fadf4c13a2224d40ec0fd87
|
from __future__ import division
from __future__ import print_function
""" 30.06.2016 copied from "produce_forecasts_nrt.py"
which was operational on SatLive
"""
from datetime import datetime
import sys, string, os
import logging
sys.path.insert(0, "/home/lom/users/cll/pytroll/install/lib/python2.6/site-packages")
from mpop.satellites import GeostationaryFactory
from mpop.projector import get_area_def
from mpop.utils import debug_on
from pyresample import plot
import numpy as np
from pydecorate import DecoratorAGG
import aggdraw
from PIL import ImageFont, ImageDraw
from os.path import exists
from os import makedirs
from mpop.imageo.HRWimage import HRW_2dfield # , HRWstreamplot, HRWimage
from datetime import timedelta
from plot_msg import create_PIL_image, add_borders_and_rivers, add_title
from pycoast import ContourWriterAGG
from my_msg_module import check_near_real_time, format_name, fill_with_closest_pixel
from copy import deepcopy
from my_msg_module import convert_NWCSAF_to_radiance_format, get_NWC_pge_name
from mpop.imageo.palettes import convert_palette2colormap
from plot_msg import load_products
import matplotlib.pyplot as plt
import time
import copy
from particles_displacement import particles_displacement
import numpy.ma as ma
import netCDF4
import pickle
from scipy import ndimage
from my_msg_module import check_input
from Cells import Cells
import imp
import scp_settings
scpOutputDir = scp_settings.scpOutputDir
scpID = scp_settings.scpID
import glob
import inspect
from pycoast import ContourWriterAGG
# debug_on()
import trollimage
def read_HRW(sat, sat_nr, instrument, time_slot, ntimes, dt=5, read_basic_or_detailed='detailed',
min_correlation=85, min_conf_nwp=80, min_conf_no_nwp=80, cloud_type=None, level=None, p_limits=None):
""" Reads the High Resolution Wind (Nowcasting Saf) .
Parameters
----------
sat : satellite (string e.g. 'Meteosat')
sat_nr : satellite number (string e.g. '09')
instrument : satellite instrument (string e.g. 'seviri')
time_slot : datetime object
the time of interest for which the extraction is wanted
ntimes : int
number of dt timesteps before time_slot considered for the derivation of the wind vectors
dt : int
number of minutes in one timestep
read_basic_or_detailed : "basic" or "detailed"
???
min_correlation : int (0:100)
correlation minimum required for a vector to be included
min_conf_nwp : int(0,100)
min confidence for a vector to be included with NWP
min_conf_no_nwp : int (0:100)
min confidence for a vector to be included without NWP
cloud_type : list of int
the cloud types included
level : "seviri-levelX"
reader level
p_limits : int in hPa
pressure limits for the vectors
Returns :
----------
data : wind vectors extracted for given settings
Raises
----------
"""
#print time_slot
data = GeostationaryFactory.create_scene(sat, sat_nr, instrument, time_slot)
data.load(['HRW'], reader_level="seviri-level5", read_basic_or_detailed=read_basic_or_detailed)
# read data for previous time steps if needed
for it in range(1,ntimes):
time_slot_i = time_slot - timedelta( minutes = it*5 )
data_i = GeostationaryFactory.create_scene(in_msg.sat_str(), in_msg.sat_nr_str(), "seviri", time_slot_i)
data_i.load(['HRW'], reader_level="seviri-level5", read_basic_or_detailed=read_basic_or_detailed)
# merge all datasets (addition of datasets defined in class HRW_class, see mpop/mpop/satin/nwcsaf_hrw_hdf.py)
data['HRW'].HRW_detailed = data['HRW'].HRW_detailed + data_i['HRW'].HRW_detailed
data['HRW'].HRW_basic = data['HRW'].HRW_basic + data_i['HRW'].HRW_basic
# apply quality filter
data['HRW'].HRW_detailed = data['HRW'].HRW_detailed.filter(min_correlation=min_correlation, \
min_conf_nwp=min_conf_nwp, min_conf_no_nwp=min_conf_no_nwp, cloud_type=cloud_type, level=level, p_limits=p_limits)
return data
# ------------------------------------------
def m_to_pixel(value, size, conversion): #,coordinate):
""" Converts coordinates from meters to pixels.
Parameters
----------
value : sequence
coordinate to convert (in px or m)
size : int
number corresponding to the meter in one satellite pixel
conversion : "to_pixel" or other
conversion to apply, either px to m or viceversa
Returns
----------
m or px : number
coordinate converted into meter or px
Raises
----------
"""
if conversion=='to_pixel':
px = np.round(value//size)
px[np.where(value==np.nan)] = np.nan
px = px.astype(int)
return px
else:
m = (value*size)+size/2
m[value==np.nan] = np.nan
return m
def string_date(t):
""" Compute the product of a sequence of numbers.
Parameters
----------
t : datetime object
datetime to convert in string
Returns
----------
yearS, monthS, dayS, hourS, minS : strings
string corresponding to year, month, day, hour and minute.
month, day, hour, minute all have 2 characters (with leading 0 if necessary).
Raises
----------
"""
yearS = str(t.year)
monthS = "%02d" % t.month
dayS = "%02d" % t.day
hourS = "%02d" % t.hour
minS = "%02d" % t.minute
return yearS, monthS, dayS, hourS, minS
def check_cosmo_area (nc_cosmo, area):
""" Compares the cosmo area with the wanted area and provides coordinates corners.
Parameters
----------
nc_cosmo : netcdf object, wind data cosmo
example: netCDF4.Dataset(file_cosmo_1,'r',format='NETCDF4')
netcdf must contain:
- the coordinates (x_1, y_1)
area : str
string representing one of the areas present in area_def
Returns
----------
x_min_cut, x_max_cut, y_min_cut, y_max_cut : int
coordinates of the edges of the wanted area withing the cosmo area
Raises
----------
stops execution if:
- area wanted is larger than the cosmo area
- size of cosmo px in m is different from size of wanted area px in m
"""
#reads the x and y coordinates from the netcdf cosmo file
x = nc_cosmo.variables['y_1'][:]
y = nc_cosmo.variables['x_1'][:]
#obtains the coordinates corner pixels (+-500 because cosmo coordinates center of cell)
# !!! !hau! improve this: does assume fixed pixel size of 1000m !!!
x_min_cosmo = x.min()-500
x_max_cosmo = x.max()+500
y_min_cosmo = y.min()-500
y_max_cosmo = y.max()+500
#gets the area definition and the coordinates of its corner pixels
area_wanted = get_area_def(area)
x_min_wanted = area_wanted.area_extent[1]
x_max_wanted = area_wanted.area_extent[3]
y_min_wanted = area_wanted.area_extent[0]
y_max_wanted = area_wanted.area_extent[2]
#obtains the pixel size of cosmo
x = np.sort(x)
dx_cosmo = x[1]-x[0]
y = np.sort(y)
dy_cosmo = y[1]-y[0]
#checks if the pixel size of the cosmo model and the wanted area match, if not quits
if dy_cosmo != area_wanted.pixel_size_y or dx_cosmo != area_wanted.pixel_size_x:
print("Error: the pixel size of the wind data doesn't match with the chosen area definition")
quit()
#checks if the area wanted is smaller or equal to the cosmo area, if not it quits
if x_min_cosmo <= x_min_wanted and x_max_cosmo >= x_max_wanted and y_min_cosmo <= y_min_wanted and y_max_cosmo >= y_max_wanted:
x_min_cut = abs(x_min_cosmo - x_min_wanted)/1000
x_max_cut = abs(x_max_cosmo - x_max_wanted)/1000
y_min_cut = abs(y_min_cosmo - y_min_wanted)/1000
y_max_cut = abs(y_max_cosmo - y_max_wanted)/1000
else:
print("Error: the area chosen ("+area+") is larger than the wind data (cosmo) area available")
quit()
return x_min_cut, x_max_cut, y_min_cut, y_max_cut
def get_cosmo_filenames (t_sat, nrt=True, runs_before=0, area = "ccs4c2" ):
""" Provides the cosmo filename.
Parameters
----------
t_sat : datetime object
time for which the cosmo data is wanted (usually same as satellite images)
nrt: boolean
True if the system is run real time, false else (depending on realtime or not it will look for the
cosmo data in different paths, and look for different cosmo models
runs_before: int
It starts looking for the run runs_before*3hours earlier than t_sat (useful if latest run not yet available)
Returns
----------
cosmoDir+cosmo_file1, cosmoDir+cosmo_file2 : str (paths)
returns the string corresponding to the paths where the two cosmo files are stored (the hour before and after t_sat)
Raises
----------
"""
# get COSMO model start time (assuming COSMO start each 3h, 0,3,6,9...UTC)
hour_run = t_sat.hour //3 * 3
t_run = datetime(t_sat.year, t_sat.month, t_sat.day, hour_run, 0)
# if runs_before is set to a value >0, it subtracts 3*runs_before hours to t_run
if runs_before != 0:
print(" try ", runs_before ," model start(s) before ")
t_run -= runs_before * timedelta(hours = 3)
# gets the forecasting time corresponding to the t_sat, given the t_run (hour before and hour after)
dt = t_sat - t_run
hour_forecast1 = "%02d" % int (dt.total_seconds() // 3600) # using integer devision, changed from / to // so it is Python 3 compatible
hour_forecast2 = "%02d" % int (dt.total_seconds() // 3600 +1) # using integer devision
yearS, monthS, dayS, hourS, minS = string_date(t_run)
# sets model and path depending on offline or online version
if nrt:
cosmo = "cosmo-1"
cosmoDir='/data/cinesat/in/cosmo/' #2016052515_05_cosmo-1_UV_swissXXL
elif t_sat.year < 2016:
cosmo = "cosmo2"
cosmoDir='/data/COALITION2/database/cosmo/test_wind/'+yearS+monthS+dayS+"_"+cosmo+"_"+area+"/"
#20150515_cosmo2_ccs4c2 / 2015051506_00_cosmo2_UVccs4c2.nc or 2015070706_00_cosmo2_UV_ccs4c2.nc
else:
cosmo = "cosmo-1"
cosmoDir='/data/COALITION2/database/cosmo/wind/'+yearS+"/"+monthS+"/"+dayS+"/"
cosmo_file1 = yearS+monthS+dayS+hourS+"_"+hour_forecast1+"_"+cosmo+"_UV*.nc"
cosmo_file2 = yearS+monthS+dayS+hourS+"_"+hour_forecast2+"_"+cosmo+"_UV*.nc"
return cosmoDir+cosmo_file1, cosmoDir+cosmo_file2
def interpolate_cosmo(year, month, day, hour, minute, layers, zlevel='pressure', area='ccs4', cosmo = None, nrt = False, rapid_scan_mode_satellite = True):
""" Calculate linearly interpolated wind fields for a specific time of interest
Parameters
----------
year, month, day, hour, minute : int
year, month, day, hour, minute for which the cosmo wind is wanted wanted
layers: list of int
Pressure/model levels at which the cosmo wind is wanted
zlevel : 'pressure' or 'model' !!!!!!!!!! for the moment only implemented 'pressure'
It starts looking for the run runs_before*3hours earlier than t_sat (useful if latest run not yet available)
area: str
area identifier of the area over which the cosmo wind is wanted
cosmo : str
identifier of the cosmo model wanted
nrt: boolean
True if the system is run real time, false else (depending on realtime or not, it will look for the
cosmo data in different paths, and look for different cosmo models
rapid_scan_mode_satellite: boolean
True if the satellite is in rapid scan mode (scans every 5 minutes) and False else (every 15 minutes)
Returns
----------
u_d, v_d : dict
dictionaries containing the u and v cosmo wind components. !!!!! THE ORDER OF THE PRESSURE IS OPPOSITE OF THOSE IN INPUT: u_d[0,:,:] corresponds to the last pressure in the input "layers"
Raises
----------
"""
#print "WARNING: the output wind levels will be in order opposite to that in the input 'layers' "
# rough rule, if latest COSMO run is already available
if nrt == False and hour % 3 == 0 : # !!! !hau! this suggest to use 3 and 4h forecast, instead of Analysis and 1h forecast. Should it be like this?
runs_before = 1
else:
runs_before = 0
file1, file2 = get_cosmo_filenames ( datetime(year,month,day,hour,minute), nrt=nrt, runs_before = runs_before, area = area )
print("... search for ", file1, " and ", file2)
filename1 = glob.glob(file1)
filename2 = glob.glob(file2)
if len(filename1)>1 or len(filename2)>1:
print("Warning, more than one cosmo file available!!")
print("Files t1", filename1)
print("Files t2", filename2)
if len(filename1)<1 or len(filename2)<1:
print("*** Warning, found no cosmo wind data ")
# look for the result of the COSMO run before
file1, file2 = get_cosmo_filenames ( datetime(year,month,day,hour,minute),nrt = nrt, runs_before = runs_before + 1, area = area )
print("... search for preveous COSMO run: ", file1, " and ", file2)
filename1 = glob.glob(file1)
filename2 = glob.glob(file2)
if len(filename1)>1 or len(filename2)>1:
print("Warning, more than one cosmo file available!!")
print("Files t1", filename1)
print("Files t2", filename2)
elif len(filename1)<1 or len(filename2)<1:
print("*** Error, no cosmo wind data with model start ") #, str(datetime(year,month,day,hour,minute))
print(file1)
print(file2)
quit()
file_cosmo_1 = filename1[0]
file_cosmo_2 = filename2[0]
#cosmoDir='/data/cinesat/in/cosmo' #2016052400_03_cosmo-1_UV_swissXXL.nc
print('... read ', file_cosmo_1)
print('... read ', file_cosmo_2)
nc_cosmo_1 = netCDF4.Dataset(file_cosmo_1,'r',format='NETCDF4')
nc_cosmo_2 = netCDF4.Dataset(file_cosmo_2,'r',format='NETCDF4')
pressure1 = nc_cosmo_1.variables['z_1'][:]
pressure1 = pressure1.astype(int)
pressure2 = nc_cosmo_2.variables['z_1'][:]
pressure2 = pressure2.astype(int)
print(" pressure levels in file1: ", pressure1)
print(" pressure levels in file2: ", pressure2)
print(" pressures chosen: ", layers)
u_all1 = nc_cosmo_1.variables['U'][:]
v_all1 = nc_cosmo_1.variables['V'][:]
u_all2 = nc_cosmo_2.variables['U'][:]
v_all2 = nc_cosmo_2.variables['V'][:]
nx1 = u_all1.shape[2]
ny1 = u_all1.shape[3]
nx2 = u_all2.shape[2]
ny2 = u_all2.shape[3]
# check if data in COSMO file covers the whole area of interest
x_min_cut1, x_max_cut1, y_min_cut1, y_max_cut1 = check_cosmo_area (nc_cosmo_1, area)
x_min_cut2, x_max_cut2, y_min_cut2, y_max_cut2 = check_cosmo_area (nc_cosmo_2, area)
if len(layers)>1:
p_chosen = np.sort(layers)[::-1]
print("layers ", layers)
print("p_chosen ", p_chosen)
#quit()
else:
p_chosen = layers
#if nrt:
# p_chosen *= 100 # 100 == convert hPa to Pa
u_d = np.zeros((len(p_chosen),nx1,ny1))
v_d = np.zeros((len(p_chosen),nx1,ny1))
# time it takes to scan the disk
if rapid_scan_mode_satellite:
dt = 2
else:
dt = 12
position_t = (minute+dt)/5
previous = 1-(1./12*position_t)
# !!! !hau! this is not nice
if p_chosen[0] not in pressure1: # pressure1.all() != p_chosen[0]:
print("no value in: ", pressure1, "is equal to p_chosen: ",p_chosen[0])
for elem in range(len(p_chosen)):
p_chosen[elem]*=100 # convert hPa to Pa
for g in range(len(p_chosen)):
#print " interpolate wind for ", p_chosen[g]
#print np.where(pressure1==p_chosen[g])
#print pressure1
# search index, where the pressure is equal to the pressure of interest (might be different for file1 and file2)
i1 = np.where(pressure1==p_chosen[g])[0][0]
i2 = np.where(pressure2==p_chosen[g])[0][0]
print("... temporal interpolation for wind field at", p_chosen[g])
print(g, len(p_chosen), np.where(pressure1==p_chosen[g])[0][0])
u1 = u_all1[0, i1, x_max_cut1 : nx1-x_min_cut1, y_min_cut1 : ny1 - y_max_cut1]
u2 = u_all2[0, i2, x_max_cut2 : nx2-x_min_cut2, y_min_cut2 : ny2 - y_max_cut2] #### UH index changed i1 -> i2 !!! ###
v1 = v_all1[0, i1, x_max_cut1 : nx1-x_min_cut1, y_min_cut1 : ny1 - y_max_cut1] #### UH index changed i2 -> i1 !!! ###
v2 = v_all2[0, i2, x_max_cut2 : nx2-x_min_cut2, y_min_cut2 : ny2 - y_max_cut2]
u_d[g,:,:] = previous*u1 + (1-previous)*u2
v_d[g,:,:] = previous*v1 + (1-previous)*v2
print("*** u_d[0].shape", u_d.shape[1], u_d.shape[2])
print("previous ", previous)
return u_d, v_d, p_chosen
def calculate_displacement(u_d,v_d,n_levels,size_x,size_y,ForecastTime,NumComputationSteps):
print("***")
print("*** calculate displacement")
dx_d = np.zeros(u_d.shape)
dy_d = np.zeros(v_d.shape)
for level in range(n_levels): # !!!!!!!
#for level in [0]:
print("... calculate displacement for level ", level, n_levels)
u = u_d[level,:,:]
v = v_d[level,:,:]
max_x = u.shape[0]
max_y = u.shape[1]
x_matrix = np.array([[i*size_x+size_x/2 for i in range(u.shape[0])],]*max_y).transpose()
y_matrix = np.array([[i*size_y+size_y/2 for i in range(u.shape[1])],]*max_x)
x = np.reshape(x_matrix,x_matrix.size)
y = np.reshape(y_matrix,y_matrix.size)
xy1 = np.column_stack((x,y))
dt = float(ForecastTime)/float(NumComputationSteps)
a=np.zeros(xy1.shape)
xy2=deepcopy(xy1)
# !!! SLOW !!!
for num_compSteps in range(NumComputationSteps):
xy2 = particles_displacement(u_func=v*(-1), v_func=u,method=method,dt=dt*60,pts=xy2, size_x=size_x, size_y=size_y, wind_source=wind_source) #u_func=v*(-1)
c = 0
for item in xy2: #because output particles displacement is a list
a[c,:]=item
c+=1
xy2=a
x_matrix1 = np.reshape(xy2[:,0],x_matrix.shape)
y_matrix1 = np.reshape(xy2[:,1],y_matrix.shape)
dx_d[level,:,:] = x_matrix1-x_matrix
dy_d[level,:,:] = y_matrix1-y_matrix
return (xy1, xy2, dx_d, dy_d, x_matrix, y_matrix)
def add_points_outside(forecast,x_matrix,y_matrix, xy_inside):
x_new = np.where(np.isnan(forecast),x_matrix,np.nan)
y_new = np.where(np.isnan(forecast),y_matrix,np.nan)
x_new = np.reshape(x_new,x_new.size)
y_new = np.reshape(y_new,y_new.size)
xy_new = np.column_stack((x_new,y_new))
xy_new = xy_new[~np.isnan(xy_new).any(axis=1)]
print("*** add new particles for empty pixels")
#store tracking coordinates for next step of this level
print("xy2.shape, xy_new.shape", xy2.shape, xy_new.shape)
xy_new = np.vstack((xy2, xy_new))
print("... number of particles for next step (xy_new.shape)", xy_new.shape)
xy_levels = deepcopy(xy_new)
return xy_levels
def nowcastRGB(forecast1,xy1_py,xy2_px):
forecast2 = deepcopy(forecast1)
forecast2[:,:] = np.nan
# search no clouds in the old data
ind_no_cl = np.where ( forecast1[xy1_px[:,0],xy1_px[:,1]] == no_data )
# copy values only for not cloudy pixels
forecast2[xy2_px[ind_no_cl,0],xy2_px[ind_no_cl,1]] = no_data
# search clouds in the old data
ind_cl = np.where ( forecast1[xy1_px[:,0],xy1_px[:,1]] > 0 )
# copy values only for cloudy pixels
forecast2[xy2_px[ind_cl,0],xy2_px[ind_cl,1]] = forecast1[xy1_px[ind_cl,0],xy1_px[ind_cl,1]]
return forecast2
def load_rgb(satellite, satellite_nr, satellites_name, time_slot, rgb, area, in_msg, data_CTP):
if rgb != 'CTP':
# read the data we would like to forecast
global_data_RGBforecast = GeostationaryFactory.create_scene(in_msg.sat_str(),in_msg.sat_nr_str(), "seviri", time_slot)
#global_data_RGBforecast = GeostationaryFactory.create_scene(in_msg.sat, str(10), "seviri", time_slot)
# area we would like to read
area_loaded = get_area_def("EuropeCanary95")#(in_windshift.areaExtraction)
# load product, global_data is changed in this step!
area_loaded = load_products(global_data_RGBforecast, [rgb], in_msg, area_loaded)
print('... project data to desired area ', area)
fns = global_data_RGBforecast.project(area, precompute=True)
else:
fns = deepcopy(data_CTP["CTP"].data)
return fns[rgb].data
def initial_xy(x_matrix,y_matrix):
x = np.reshape(x_matrix,x_matrix.size)
y = np.reshape(y_matrix,y_matrix.size)
xy1 = np.column_stack((x,y))
return xy1 #store coordinate centers xy=[x_center,y_center] xy_levels[level,:,:]
def compute_new_xy(xy1, dx_ds, dy_ds, max_x_m, max_y_m):
xy2 = np.zeros(xy1.shape)
#conversion to pixel to obtain corresponding dx and dy
xy1_px = np.zeros(xy1.shape, dtype=np.int) ### initialize as integer ###
xy1_px[:,0] = m_to_pixel(xy1[:,0],size_x,'to_pixel')
xy1_px[:,1] = m_to_pixel(xy1[:,1],size_y,'to_pixel')
print('... calculate new particle positions')
xy2 = np.zeros(xy1.shape)
xy2[:,0] = xy1[:,0] + dx_ds[level, xy1_px[:,0], xy1_px[:,1] ]
xy2[:,1] = xy1[:,1] + dy_ds[level, xy1_px[:,0], xy1_px[:,1] ]
# remove particles outside the domain
print("... limits before removing: ", xy2.min(), xy2.max(), end=' ')
ind_inside = np.where( np.logical_and( np.logical_and(0<=xy2[:,0],xy2[:,0]<max_x_m), np.logical_and(0<=xy2[:,1],xy2[:,1]<max_y_m) ) )
print(type(ind_inside))
print(np.array(ind_inside).max(), np.array(ind_inside).min())
print(np.array(ind_inside).shape)
xy2 = np.squeeze(xy2[ind_inside,:])
xy1_px = np.squeeze(xy1_px[ind_inside,:])
print("... limits after removing: ", xy2.min(), xy2.max(), end=' ')
print("... number of particles after removing those outside (xy2_px.shape)", xy2.shape)
#convert xy2 in pixel to do the shift for each channel
xy2_px = np.zeros(xy2.shape, dtype=np.int) ### initialize as integer ###
xy2_px[:,0] = m_to_pixel(xy2[:,0],size_x,'to_pixel')
xy2_px[:,1] = m_to_pixel(xy2[:,1],size_y,'to_pixel')
return (xy1_px, xy2_px, xy2)
def mask_rgb_based_pressure(data,p_min,p_max,data_CTP):
####################################data[data.mask==True ] = no_data
data = np.where(np.logical_or(data_CTP['CTP'].data>=p_max,data_CTP['CTP'].data<p_min),no_data,data)
return data
##########################################################################################
##########################################################################################
def print_usage():
print("*** ")
print("*** Error, not enough command line arguments")
print("*** please specify at least an input file")
print("*** possible calls are:")
print("*** python "+inspect.getfile(inspect.currentframe())+" input_coalition2 ")
print("*** python "+inspect.getfile(inspect.currentframe())+" input_coalition2 2014 07 23 16 10 ")
print(" date and time must be completely given")
print("*** ")
quit() # quit at this point
#----------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
time_start_TOT = time.time()
detailed = True
from get_input_msg import get_date_and_inputfile_from_commandline
in_msg = get_date_and_inputfile_from_commandline(print_usage=print_usage)
print(in_msg.dt_forecast1)
print(in_msg.dt_forecast2)
if len(sys.argv) > 7:
if len(sys.argv) <12:
print_usage()
else:
yearSTOP = int(sys.argv[7])
monthSTOP = int(sys.argv[8])
daySTOP = int(sys.argv[9])
hourSTOP = int(sys.argv[10])
minuteSTOP = int(sys.argv[11])
time_slotSTOP = datetime(yearSTOP, monthSTOP, daySTOP, hourSTOP, minuteSTOP)
else:
time_slotSTOP = in_msg.datetime
time_slot = in_msg.datetime
print("")
print("*** define more input parameters")
area = in_msg.area_forecast
ntimes=2 #in_windshift.ntimes
print("... aggregate winddata for ", ntimes, " timesteps")
min_correlation = 85 #in_windshift.min_correlation
min_conf_nwp = 80 #in_windshift.min_conf_nwp
min_conf_no_nwp = 80 #in_windshift.min_conf_no_nwp
cloud_type = [5,6,7,8,9,10,11,12,13,14] #in_windshift.cloud_type
delay = 5
rgbs = ['CTT'] #['WV_062','WV_073','IR_039','IR_087','IR_097','IR_108','IR_120','IR_134','CTT'] #in_windshift.rgb
# in_msg.nwcsaf_calibrate = True
rgbs_only15min = ['IR_039','IR_087','IR_120']
#channel = rgb.replace("c","")
name_tmp = "3layer"
# load a few standard things
#from get_input_msg import get_input_msg
#in_msg = get_input_msg('input_coalition2')
in_msg.resolution = 'i'
#in_msg.fill_value = None # transparent
#colormap='rainbow'
colormap='greys'
rapid_scan_mode = in_msg.forecasts_in_rapid_scan_mode
dt_forecast1 = in_msg.dt_forecast1
dt_forecast2 = in_msg.dt_forecast2
if rapid_scan_mode == True:
print("... RAPID SCAN MODE")
else:
print("... NOT RAPID SCAN MODE")
dt_forecast1S = "%02d" % dt_forecast1
dt_forecast2S = "%02d" % dt_forecast2
ForecastTime = 5 #time in minutes from observation at t=0 when you want each observation (first forecast after ForecastTime, second after 2*ForecastTime...)
NumComputationSteps = 1 #number of computation time steps: the number of steps when the velocity should be updated within each ForecastTime
NumForecast = dt_forecast2/ForecastTime #number of forecasts you want to produce from observation at t=0
mode_downscaling = in_msg.settingsLocal['mode_downscaling']
if mode_downscaling != 'no_downscaling':
downscaling_data = True
else:
downscaling_data = False
method=in_msg.integration_method_velocity
pressure_limits = in_msg.pressure_limits
n_levels=len(pressure_limits)+1
wind_source = in_msg.wind_source
zlevel = in_msg.zlevel
if wind_source=="cosmo":
if zlevel == 'pressure':
#layers= [800,500,300]#[700,500,300]#[900,800,700,600,500,400,300,200,100] #[700,500,300]#[100] # [400,300,100]#[600,300,100]##pressure layers
layers= [800,700,600]
elif zlevel == 'modellevel':
layers=[36,24,16] #cosmo model layers
else:
print("*** Error in main ("+inspect.getfile(inspect.currentframe())+")")
print(" unknown zlevel", zlevel)
quit()
# ------------------------------------------
outDir_completed = 0
in_msg.mapDir = "/opt/users/common/shapes/"
cw = ContourWriterAGG(in_msg.mapDir)
# define area
obj_area = get_area_def("ccs4")
proj4_string = obj_area.proj4_string
# e.g. proj4_string = '+proj=geos +lon_0=0.0 +a=6378169.00 +b=6356583.80 +h=35785831.0'
area_extent = obj_area.area_extent
# e.g. area_extent = (-5570248.4773392612, -5567248.074173444, 5567248.074173444, 5570248.4773392612)
area_tuple = (proj4_string, area_extent)
print("in_msg.nwcsaf_calibrate ", in_msg.nwcsaf_calibrate)
print("")
print("*** start production of forecasts")
while time_slot <= time_slotSTOP:
print("process "+str(time_slot))
outputDir = time_slot.strftime(in_msg.outputDirForecasts)
in_msg.datetime = time_slot
if False:
if type(in_msg.sat_nr) is int:
if in_msg.sat[0:8]=="meteosat":
sat_nr_str = str(in_msg.sat_nr).zfill(2)
elif in_msg.sat[0:8]=="Meteosat":
sat_nr_str = str(in_msg.sat_nr)
elif type(in_msg.sat_nr) is str:
sat_nr_str = in_msg.sat_nr
if in_msg.sat[0:8]=="Meteosat":
sat_nr_str = str(int(sat_nr_str)) # get rid of leading zeros (0)
else:
print("*** Waring, unknown type of sat_nr", type(in_msg.sat_nr))
sat_nr_str = in_msg.sat_nr
#print in_msg.sat, " and ", sat_nr_str
#in_msg.sat_nr_str = sat_nr_str
year = time_slot.year
month = time_slot.month
day = time_slot.day
hour = time_slot.hour
minute = time_slot.minute
yearS = str(year)
#yearS = yearS[2:]
monthS = "%02d" % month
dayS = "%02d" % day
hourS = "%02d" % hour
minS = "%02d" % minute
dateS = yearS+'-'+monthS+'-'+dayS
timeS = hourS+':'+minS+" UTC"
# define area object
obj_area = get_area_def(area)#(in_windshift.ObjArea)
size_x = obj_area.pixel_size_x
size_y = obj_area.pixel_size_y
#print obj_area
print("area extent:\n", obj_area.area_extent)
print("x min ", obj_area.area_extent[0])
print("x size ", obj_area.pixel_size_x)
# check if input data is complete
if in_msg.verbose:
print("*** check input data", in_msg.RGBs)
#RGBs = check_input(in_msg, in_msg.sat+sat_nr_str, in_msg.datetime)
# in_msg.sat_nr might be changed to backup satellite
for i_try in range(30):
# check if 'CTH' file is present
RGBs = check_input(in_msg, in_msg.sat_str()+in_msg.sat_nr_str(), in_msg.datetime, RGBs=in_msg.RGBs)
if len(RGBs) > 0:
# exit loop, if input is found
break
else:
# else wait 20s and try again
import time
time.sleep(25)
for i_try in range(30):
# check if 'CTH' file is present
RGBs = check_input(in_msg, in_msg.sat_str()+in_msg.sat_nr_str(), in_msg.datetime, RGBs="CTP")
if len(RGBs) > 0:
# exit loop, if input is found
break
else:
# else wait 20s and try again
import time
time.sleep(25)
# read CTP to distinguish high, medium and low clouds
print(("*** read data for ", in_msg.sat_str(), in_msg.sat_nr_str(), "seviri", time_slot))
global_data_CTP = GeostationaryFactory.create_scene(in_msg.sat_str(),in_msg.sat_nr_str(), "seviri", time_slot)
#global_data_CTP = GeostationaryFactory.create_scene(in_msg.sat, str(10), "seviri", time_slot)
#area_loaded = get_area_def("EuropeCanary95") #(in_windshift.areaExtraction)
area_loaded = load_products(global_data_CTP, ['CTP'], in_msg, get_area_def("ccs4"))
data_CTP = global_data_CTP.project(area, precompute=True)
[nx,ny]=data_CTP['CTP'].data.shape
if 'pressure_levels' in in_msg.aux_results:
tmp_press = deepcopy(in_msg.pressure_limits)
tmp_press.append(1001)
tmp_press.sort() #ordered decreasing
print(tmp_press)
n_levels_pressure = len(tmp_press)
p_levels = np.zeros(data_CTP['CTP'].data.shape)
p_levels[:,:]=np.nan
p_min = 0
print((" n_levels_pressure (should be 3): ", n_levels_pressure))
print(" unique pressure levels: ",np.unique(p_levels))
for i_plot_press in range(n_levels_pressure):
p_max = tmp_press[i_plot_press]
print((" p_min: ", p_min, "p_max: ",p_max))
p_levels[np.where(np.logical_and(data_CTP['CTP'].data>p_min,data_CTP['CTP'].data<=p_max))] = i_plot_press + 2
print(" unique: ",np.unique(p_levels))
print(" index: ", i_plot_press)
p_min = deepcopy(p_max)
if True:
fig = plt.figure()
plt.imshow(p_levels[20:nx-40,85:ny-135],cmap = plt.get_cmap("Blues"), vmin = 0)
plt.axis('off')
plt.colorbar()
plt.show()
plt.savefig("test_Pressure.png")
#plt.savefig("/data/COALITION2/PicturesSatellite/LEL_results_wind//"+yearS+"-"+monthS+"-"+dayS+"/channels_fig//PressureLevels_"+yearS+monthS+dayS+hourS+minS+".png")
plt.close(fig)
quit()
else:
img = trollimage.image.Image(p_levels[20:nx-40,85:ny-135], mode="L", fill_value=None) #fill_value,[1,1,1], None
from trollimage.colormap import rdbu
#jet.set_range(
img.colorize(rdbu)
pil_im = img.pil_image()
#pil_im = array2PIL(p_levels[20:nx-40,85:ny-135], p_levels[20:nx-40,85:ny-135].size)
pil_im = add_borders_and_rivers( pil_im, cw, area_tuple,
add_borders=in_msg.add_borders, border_color=in_msg.border_color,
add_rivers=in_msg.add_rivers, river_color=in_msg.river_color,
resolution=in_msg.resolution, verbose=in_msg.verbose)
pil_im.save("test_Pressure.png")#"/data/COALITION2/PicturesSatellite/LEL_results_wind//"+yearS+"-"+monthS+"-"+dayS+"/channels_fig//PressureLevels_"+yearS+monthS+dayS+hourS+minS+".png")
quit()
fig = plt.figure()
plt.imshow(data_CTP['CTP'].data[20:nx-40,85:ny-135],cmap = plt.get_cmap("Blues_r"))
plt.axis('off')
plt.colorbar()
plt.show()
plt.savefig("/data/COALITION2/PicturesSatellite/LEL_results_wind//"+yearS+"-"+monthS+"-"+dayS+"/channels_fig//Pressure_"+yearS+monthS+dayS+hourS+minS+".png")
plt.close(fig)
print(np.unique(p_levels))
# read all rgbs
print(("*** read data for ", in_msg.sat_str(),in_msg.sat_nr_str(), "seviri", time_slot))
global_data = GeostationaryFactory.create_scene(in_msg.sat_str(), in_msg.sat_nr_str(), "seviri", time_slot)
#global_data_CTP = GeostationaryFactory.create_scene(in_msg.sat, str(10), "seviri", time_slot)
area_loaded = get_area_def("EuropeCanary95") #(in_windshift.areaExtraction)
area_loaded = load_products(global_data, rgbs, in_msg, area_loaded)
data = global_data.project(area, precompute=True)
if 'forecast_channels' in in_msg.aux_results:
for rgb_plot in rgbs:
fig = plt.figure()
plt.imshow(data[rgb_plot].data[20:nx-40,85:ny-135], vmin=220, vmax=290) #forecasts_out[channel_nr[rgb],ind_time,:,:]>0)
plt.axis('off')
plt.colorbar()
plt.show()
plt.savefig("/data/COALITION2/PicturesSatellite/LEL_results_wind//"+yearS+"-"+monthS+"-"+dayS+"/channels_fig//"+rgb_plot+"_"+yearS+monthS+dayS+hourS+minS+"_Obs.png")
plt.close(fig)
if False:
from trollimage.image import Image as trollimage
from trollimage.colormap import rainbow
prop = data["IR_108"].data
min_data = prop.min()
max_data = prop.max()
colormap = deepcopy(rainbow)
colormap.set_range(min_data, max_data)
img = trollimage(prop, mode="L", fill_value=[0,0,0])
img.colorize(colormap)
img.show()
quit()
if downscaling_data == True:
from plot_coalition2 import downscale
if "CTT" in rgbs:
mask_Safe = deepcopy(data['CTT'].data.mask)
elif "CTP" in rgbs:
mask_Safe = deepcopy(data['CTP'].data.mask)
else:
mask_Safe = deepcopy(data[rgbs[0]].data.mask)
data = downscale(deepcopy(data), mode = mode_downscaling, mask = mask_Safe)
if 'forecast_channels' in in_msg.aux_results:
for rgb_plot in rgbs:
fig = plt.figure()
plt.imshow(data[rgb_plot].data[20:nx-40,85:ny-135],vmin = 220, vmax = 290) #forecasts_out[channel_nr[rgb],ind_time,:,:]>0)
plt.axis('off')
plt.colorbar()
plt.show()
plt.savefig("/data/COALITION2/PicturesSatellite/LEL_results_wind//"+yearS+"-"+monthS+"-"+dayS+"/channels_fig//"+rgb_plot+"_"+yearS+monthS+dayS+hourS+minS+"_ObsDownscaled.png")
plt.close(fig)
print("")
print("*** read wind fields")
if wind_source=="HRW":
u_d=np.zeros((n_levels,nx,ny))
v_d=np.zeros((n_levels,nx,ny))
for level in range(n_levels):
p_max=p_min
if level==n_levels-1:
p_min=0
else:
p_min=pressure_limits[len(pressure_limits)-1-level]
hrw_data = read_HRW(in_msg.sat_str(), in_msg.sat_nr_str(), "seviri", time_slot, ntimes, \
min_correlation=min_correlation, min_conf_nwp=min_conf_nwp, \
min_conf_no_nwp=min_conf_no_nwp, cloud_type=cloud_type, p_limits=[p_min,p_max])
# choose basic or detailed (and get a fresh copy)
if detailed:
print('... calculate gridded 2d wind field High')
hrw_detbas = hrw_data['HRW'].HRW_detailed
else:
print('... calculate gridded 2d wind field High')
hrw_detbas = hrw_data['HRW'].HRW_basic
u_d[level,:,:], v_d[level,:,:] = HRW_2dfield( hrw_detbas, obj_area )
elif wind_source=="cosmo":
u_d, v_d, pressures_wind = interpolate_cosmo(year, month, day, hour, minute,
layers, zlevel, area, nrt=in_msg.nrt, rapid_scan_mode_satellite = True)
else:
print("*** Error in main ("+inspect.getfile(inspect.currentframe())+")")
print(" unknown wind source ", wind_source)
quit()
### calculate particle displacement ###
(xy1s,xy2s,dx_ds,dy_ds,x_matrixs,y_matrixs) = calculate_displacement(u_d,v_d,n_levels,size_x,size_y,ForecastTime,NumComputationSteps)
### prepare dictionary for channel numbers:
channel_nr={}
for i in range(len(rgbs)):
rgb=rgbs[i]
channel_nr[rgb]=i
p_min = 1001
no_data = -1000000000
#xy_levels = np.zeros((n_levels,nx*ny,2))
print("nx ",nx)
print("ny ")
forecasts_out = np.zeros((len(channel_nr),2,nx,ny))
forecasts_NextStep = np.zeros((len(channel_nr),n_levels,nx,ny))
max_x_m = nx*size_x
max_y_m = ny*size_y
for level in range(n_levels):
print("... calculation for level ", level)
p_max = p_min
if level==n_levels-1:
p_min = 0
else:
p_min = pressure_limits[len(pressure_limits)-1-level]
if p_min == p_max:
continue
if pressures_wind[level] > p_max or pressures_wind[level] < p_min:
print("ERROR: you are moving the clouds at a certain level (%s-%s) with wind from another level (%s)"%(str(p_min),str(p_max),str(pressures_wind[level])))
quit()
u = u_d[level,:,:]
v = v_d[level,:,:]
for t in range(1,NumForecast+1):
print("*** timestep ",t," of ",NumForecast)
if t==1: #if first timestep create x and y matrices covering entire image
xy_levels = initial_xy(x_matrixs,y_matrixs)
xy1 = deepcopy(xy_levels) #xy_levels[level,:,:] #take the initial coordinates from array storage (step before)
(xy1_px, xy2_px, xy2) = compute_new_xy (xy1, dx_ds, dy_ds, max_x_m, max_y_m)
for rgb_num in range(len(rgbs)):
rgb=rgbs[rgb_num]
if t==1:
forecasts_NextStep[channel_nr[rgb],level,:,:] = mask_rgb_based_pressure(data[rgb].data,p_min,p_max, data_CTP)
#check if for current channel (rgb) you also need the 30 min forecast
if t*ForecastTime > dt_forecast1:
if any(rgb in s for s in rgbs_only15min):
continue
forecast1 = forecasts_NextStep[channel_nr[rgb],level,:,:]
print("*** calculating the nowcasted values of ", rgb)
forecast2 = nowcastRGB(forecast1,xy1_px,xy2_px)
#get coordinates of points that are nan before interpolation (are not in xy anymore because they went outside or come from outside)
if rgb_num == 0: #only need to do it once, same for all channels!!
xy_levels = add_points_outside(forecast2,x_matrixs,y_matrixs, xy2)
forecast2 = fill_with_closest_pixel(forecast2)
forecasts_NextStep[channel_nr[rgb],level,:,:] = forecast2
if t*ForecastTime == dt_forecast1 or t*ForecastTime == dt_forecast2:
if t*ForecastTime == dt_forecast1:
ind_time = 0
else:
ind_time = 1
print("forecast 2: ",forecast2.shape)
print("forecast out: ",forecasts_out.shape)
#forecasts_out[channel_nr[rgb],ind_time,np.where(forecast2!=no_data)] = forecast2[np.where(forecast2!=no_data)]
temp = deepcopy(forecasts_out[channel_nr[rgb],ind_time,:,:])
temp [forecast2!=no_data] = forecast2[forecast2!=no_data]
print("temp ",temp.shape)
forecasts_out[channel_nr[rgb],ind_time,:,:] = deepcopy(temp) #np.where(forecast2!=no_data,forecast2,forecasts_out)
if level == (n_levels-1) or p_min == 0:
forecasts_out[channel_nr[rgb],ind_time,forecasts_out[channel_nr[rgb],ind_time,:,:]==no_data] = np.nan
forecasts_out[channel_nr[rgb],ind_time,:,:] = ma.masked_invalid(forecasts_out[channel_nr[rgb],ind_time,:,:])
# time_slot.strftime( outputDir )
if not in_msg.nrt and outDir_completed == 0:
outputDir = outputDir+"/"+ yearS+"-"+monthS+"-"+dayS+"/channels/"
outDir_completed = 1
outputFile = outputDir +"/"+ "%s_%s_%s_t%s.p" % (yearS+monthS+dayS,hourS+minS,rgb,str(t*ForecastTime))
#outputFile = "/opt/users/lel/PyTroll/scripts/channels/%s_%s_%s_t%s.p" % (yearS+monthS+dayS,hourS+minS,rgb,str(t*ForecastTime))
#####outputFile = "pickles/%s_%s_%s_t%s_%s.p" % (yearS+monthS+dayS,hourS+minS,rgb,str(t*ForecastTime),name_tmp)
print("... pickle data to file: ", outputFile)
PIK = []
if area == "ccs4":
PIK.append( forecasts_out[channel_nr[rgb],ind_time,:,:])
elif area == "ccs4c2":
PIK.append( forecasts_out[channel_nr[rgb],ind_time,20:nx-40,85:ny-135])
else:
print("unknown area, saving entire domain")
PIK.append( forecasts_out[channel_nr[rgb],ind_time,:,:])
PIK.append(mode_downscaling)
print(mode_downscaling)
pickle.dump(PIK, open(outputFile,"wb"))
plot_fore = forecasts_out[channel_nr[rgb],ind_time,:,:]
plot_fore = np.where (plot_fore>0,plot_fore,np.nan)
print("makes it before plot")
if 'forecast_channels' in in_msg.aux_results:
fig = plt.figure()
plt.imshow(plot_fore[20:nx-40,85:ny-135],vmin = 220, vmax = 290) #forecasts_out[channel_nr[rgb],ind_time,:,:]>0)
plt.colorbar()
if wind_source=="HRW":
plt.title("%s, %s, New Velocity every step,\n Displacement in Meters, (HRW %s min):\n t0 + %s"%(rgb,method,ntimes*5,str(t*ForecastTime)))
else:
plt.title("%s, %s, New Velocity every step,\n Displacement in Meters, (cosmo):\n t0 + %s"%(rgb,method,str(t*ForecastTime)))
if wind_source=="HRW":
name_to_save = "HRW%smin"%(ntimes*5)
elif zlevel == "pressure":
name_to_save = "cosmoPL"
elif zlevel == "modellevel":
name_to_save = "cosmoPL"
else:
name_to_save = "cosmoML"
plt.axis('off')
#plt.tick_params(
# axis='both', # changes apply to both axis
# which='both', # both major and minor ticks are affected
# bottom='off', # ticks along the bottom edge are off
# top='off', # ticks along the top edge are off
# labelbottom='off') # labels along the bottom edge are off
time_string = "%02d" % (t*ForecastTime)
####outputFig = "/data/COALITION2/PicturesSatellite/LEL_results_wind//"+yearS+"-"+monthS+"-"+dayS+"/channels_fig//%s_%s_t%s_%s_DisplMeter_%s.png"%(rgb,yearS+dayS+hourS+minS,time_string,method,name_to_save) #time_string,
outputFig = "/data/COALITION2/PicturesSatellite/LEL_results_wind//"+yearS+"-"+monthS+"-"+dayS+"/channels_fig//%s_%s_t%s_%s.png"%(rgb,yearS+dayS+hourS+minS,time_string,name_tmp) #time_string,
fig.savefig(outputFig)
plt.close(fig)
print("TOTAL TIME: ", time.time()-time_start_TOT)
print("Final check lead time:", dt_forecast1)
print(dt_forecast2)
time_slot = time_slot + timedelta(minutes=5)
|
meteoswiss-mdr/monti-pytroll
|
scripts/produce_forecasts_develop.py
|
Python
|
lgpl-3.0
| 49,128
|
[
"NetCDF"
] |
b32e8e93c5b5c1d51637f5bb734bf0848335e996660b8defa56f6ee185de49a3
|
#!/usr/bin/env python
# -*- coding: utf-8
# Functions that perform mathematical operations on an image.
import logging
import numpy as np
from skimage.morphology import erosion, dilation, disk, ball, square, cube
from skimage.filters import threshold_local, threshold_otsu
from scipy.ndimage.filters import gaussian_filter, gaussian_laplace
from scipy.stats import pearsonr, spearmanr
from dipy.denoise.noise_estimate import estimate_sigma
from dipy.segment.mask import median_otsu
from dipy.denoise.nlmeans import nlmeans
from sklearn.metrics import normalized_mutual_info_score, mutual_info_score
from spinalcordtoolbox.image import Image
logger = logging.getLogger(__name__)
ALMOST_ZERO = 0.000000001
def _get_selem(shape, size, dim):
"""
Create structuring element of desired shape and radius
:param shape: str: Shape of the structuring element. See available options below in the code
:param size: int: size of the element.
:param dim: {0, 1, 2}: Dimension of the array which 2D structural element will be orthogonal to. For example, if
you wish to apply a 2D disk kernel in the X-Y plane, leaving Z unaffected, parameters will be: shape=disk, dim=2.
:return: numpy array: structuring element
"""
# TODO: enable custom selem
if shape == 'square':
selem = square(size)
elif shape == 'cube':
selem = cube(size)
elif shape == 'disk':
selem = disk(size)
elif shape == 'ball':
selem = ball(size)
else:
ValueError("This shape is not a valid entry: {}".format(shape))
if not (len(selem.shape) in [2, 3] and selem.shape[0] == selem.shape[1]):
raise ValueError("Invalid shape")
# If 2d kernel, replicate it along the specified dimension
if len(selem.shape) == 2:
selem3d = np.zeros([selem.shape[0]] * 3)
imid = np.floor(selem.shape[0] / 2).astype(int)
if dim == 0:
selem3d[imid, :, :] = selem
elif dim == 1:
selem3d[:, imid, :] = selem
elif dim == 2:
selem3d[:, :, imid] = selem
else:
raise ValueError("dim can only take values: {0, 1, 2}")
selem = selem3d
return selem
def dice(im1, im2):
"""
Computes the Dice coefficient, a measure of set similarity.
:param im1 : array-like, bool\
Any array of arbitrary size. If not boolean, will be converted.
:param im2 : array-like, bool\
Any other array of identical size. If not boolean, will be converted.
:return dice : float\
Dice coefficient as a float on range [0,1].\
Maximum similarity = 1\
No similarity = 0
.. note::
The order of inputs for `dice` is irrelevant. The result will be
identical if `im1` and `im2` are switched.
Source: https://gist.github.com/JDWarner/6730747
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / (im1.sum() + im2.sum())
def dilate(data, size, shape, dim=None):
"""
Dilate data using ball structuring element
:param data: Image or numpy array: 2d or 3d array
:param size: int: If shape={'square', 'cube'}: Corresponds to the length of an edge (size=1 has no effect).\
If shape={'disk', 'ball'}: Corresponds to the radius, not including the center element (size=0 has no effect).
:param shape: {'square', 'cube', 'disk', 'ball'}
:param dim: {0, 1, 2}: Dimension of the array which 2D structural element will be orthogonal to. For example, if\
you wish to apply a 2D disk kernel in the X-Y plane, leaving Z unaffected, parameters will be: shape=disk, dim=2.
:return: numpy array: data dilated
"""
if isinstance(data, Image):
im_out = data.copy()
im_out.data = dilate(data.data, size, shape, dim)
return im_out
else:
return dilation(data, selem=_get_selem(shape, size, dim), out=None)
def erode(data, size, shape, dim=None):
"""
Dilate data using ball structuring element
:param data: Image or numpy array: 2d or 3d array
:param size: int: If shape={'square', 'cube'}: Corresponds to the length of an edge (size=1 has no effect).\
If shape={'disk', 'ball'}: Corresponds to the radius, not including the center element (size=0 has no effect).
:param shape: {'square', 'cube', 'disk', 'ball'}
:param dim: {0, 1, 2}: Dimension of the array which 2D structural element will be orthogonal to. For example, if\
you wish to apply a 2D disk kernel in the X-Y plane, leaving Z unaffected, parameters will be: shape=disk, dim=2.
:return: numpy array: data dilated
"""
if isinstance(data, Image):
im_out = data.copy()
im_out.data = erode(data.data, size, shape, dim)
return im_out
else:
return erosion(data, selem=_get_selem(shape, size, dim), out=None)
def mutual_information(x, y, nbins=32, normalized=False):
"""
Compute mutual information
:param x: 1D numpy.array : flatten data from an image
:param y: 1D numpy.array : flatten data from an image
:param nbins: number of bins to compute the contingency matrix (only used if normalized=False)
:return: float non negative value : mutual information
"""
if normalized:
mi = normalized_mutual_info_score(x, y)
else:
c_xy = np.histogram2d(x, y, nbins)[0]
mi = mutual_info_score(None, None, contingency=c_xy)
return mi
def correlation(x, y, type='pearson'):
"""
Compute pearson or spearman correlation coeff
Pearson's R is parametric whereas Spearman's R is non parametric (less sensitive)
:param x: 1D numpy.array : flatten data from an image
:param y: 1D numpy.array : flatten data from an image
:param type: str: 'pearson' or 'spearman': type of R correlation coeff to compute
:return: float value : correlation coefficient (between -1 and 1)
"""
if type == 'pearson':
corr = pearsonr(x, y)[0]
if type == 'spearman':
corr = spearmanr(x, y)[0]
return corr
def smooth(data, sigmas):
"""
Smooth data by convolving Gaussian kernel
:param data: input 3D numpy array
:param sigmas: Kernel SD in voxel
:return:
"""
assert len(data.shape) == len(sigmas)
return gaussian_filter(data.astype(float), sigmas, order=0, truncate=4.0)
def laplacian(data, sigmas):
"""
Apply Laplacian filter
"""
assert len(data.shape) == len(sigmas)
return gaussian_laplace(data.astype(float), sigmas)
def compute_similarity(data1, data2, metric):
'''
Compute a similarity metric between two images data
:param data1: numpy.array 3D data
:param data2: numpy.array 3D data
:param fname_out: file name of the output file. Output file should be either a text file ('.txt') or a pickle file ('.pkl', '.pklz' or '.pickle')
:param metric: 'mi' for mutual information or 'corr' for pearson correlation coefficient
:return: tuple with computetd results of similarity, data1 flattened array, data2 flattened array
'''
data1_1d = data1.ravel()
data2_1d = data2.ravel()
# get indices of non-null voxels from the intersection of both data
data_mult = data1_1d * data2_1d
ind_nonnull = np.where(data_mult > ALMOST_ZERO)[0]
# set new variables with non-null voxels
data1_1d = data1_1d[ind_nonnull]
data2_1d = data2_1d[ind_nonnull]
# compute similarity metric
if metric == 'mi':
res = mutual_information(data1_1d, data2_1d, normalized=False)
if metric == 'minorm':
res = mutual_information(data1_1d, data2_1d, normalized=True)
if metric == 'corr':
res = correlation(data1_1d, data2_1d)
else:
raise ValueError(f"Don't know what metric to use! Got unsupported: {metric}")
return res, data1_1d, data2_1d
def otsu(data, nbins):
thresh = threshold_otsu(data, nbins)
return data > thresh
def adap(data, block_size, offset):
mask = data
for iz in range(data.shape[2]):
adaptive_thresh = threshold_local(data[:, :, iz], block_size, method='gaussian', offset=offset)
mask[:, :, iz] = mask[:, :, iz] > adaptive_thresh
return mask
def otsu_median(data, size, n_iter):
data, mask = median_otsu(data, size, n_iter)
return mask
def threshold(data, thr_value):
data[data < thr_value] = 0
return data
def perc(data, perc_value):
perc = np.percentile(data, perc_value)
return data > perc
def binarize(data, bin_thr=0):
return data > bin_thr
def concatenate_along_4th_dimension(data1, data2):
"""
Concatenate two data along 4th dimension.
:param data1: 3d or 4d array
:param data2: 3d or 4d array
:return data_concat: concate(data1, data2)
"""
if len(np.shape(data1)) == 3:
data1 = data1[..., np.newaxis]
if len(np.shape(data2)) == 3:
data2 = data2[..., np.newaxis]
return np.concatenate((data1, data2), axis=3)
def denoise_nlmeans(data_in, patch_radius=1, block_radius=5):
"""
:param data_in: nd_array to denoise
.. note::
for more info about patch_radius and block radius, please refer to the dipy website: http://nipy.org/dipy/reference/dipy.denoise.html#dipy.denoise.nlmeans.nlmeans
"""
data_in = np.asarray(data_in)
block_radius_max = min(data_in.shape) - 1
block_radius = block_radius_max if block_radius > block_radius_max else block_radius
sigma = estimate_sigma(data_in)
denoised = nlmeans(data_in, sigma, patch_radius=patch_radius, block_radius=block_radius)
return denoised
|
neuropoly/spinalcordtoolbox
|
spinalcordtoolbox/math.py
|
Python
|
mit
| 9,798
|
[
"Gaussian"
] |
ec77fc75e939432cc9a5a5de51b830e505827a4d9dd1bba9f54d5a320a53b2f0
|
"""
GEMPRO
======
"""
import logging
import os
import os.path as op
import shutil
from copy import copy
import pandas as pd
from Bio import SeqIO
from bioservices import KEGG
from bioservices import UniProt
from cobra.core import DictList
from six.moves.urllib.error import HTTPError
from slugify import Slugify
import ssbio.core.modelpro
import ssbio.databases.kegg
import ssbio.databases.pdb
import ssbio.databases.uniprot
import ssbio.protein.sequence.properties.residues
import ssbio.protein.sequence.properties.tmhmm
import ssbio.protein.sequence.utils.fasta
import ssbio.protein.structure.properties.msms
import ssbio.protein.structure.properties.quality
import ssbio.protein.structure.properties.residues
from ssbio import utils
from ssbio.core.genepro import GenePro
from ssbio.core.modelpro import ModelPro
from ssbio.core.object import Object
from ssbio.databases.kegg import KEGGProp
from ssbio.databases.uniprot import UniProtProp
from ssbio.protein.sequence.properties.scratch import SCRATCH
if utils.is_ipynb():
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
custom_slugify = Slugify(safe_chars='-_.')
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
log = logging.getLogger(__name__)
date = utils.Date()
bs_unip = UniProt()
bs_kegg = KEGG()
class GEMPRO(Object):
"""Generic class to represent all information for a GEM-PRO project.
Initialize the GEM-PRO project with a genome-scale model, a list of genes, or a dict of genes and sequences.
Specify the name of your project, along with the root directory where a folder with that name will be created.
Main methods provided are:
#. Automated mapping of sequence IDs
* With KEGG mapper
* With UniProt mapper
* Allowing manual gene ID --> protein sequence entry
* Allowing manual gene ID --> UniProt ID
#. Consolidating sequence IDs and setting a representative sequence
* Currently these are set based on available PDB IDs
#. Mapping of representative sequence --> structures
* With UniProt --> ranking of PDB structures
* BLAST representative sequence --> PDB database
#. Preparation of files for homology modeling (currently for I-TASSER)
* Mapping to existing models
* Preparation for running I-TASSER
* Parsing I-TASSER runs
#. Running QC/QA on structures and setting a representative structure
* Various cutoffs (mutations, insertions, deletions) can be set to filter structures
#. Automation of protein sequence and structure property calculation
#. Creation of Pandas DataFrame summaries directly from downloaded metadata
Args:
gem_name (str): The name of your GEM or just your project in general. This will be the name of the main folder
that is created in root_dir.
root_dir (str): Path to where the folder named after ``gem_name`` will be created. If not provided, directories
will not be created and output directories need to be specified for some steps.
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
gem (Model): COBRApy Model object
gem_file_path (str): Path to GEM file
gem_file_type (str): GEM model type - ``sbml`` (or ``xml``), ``mat``, or ``json`` formats
genes_list (list): List of gene IDs that you want to map
genes_and_sequences (dict): Dictionary of gene IDs and their amino acid sequence strings
genome_path (str): FASTA file of all protein sequences
write_protein_fasta_files (bool): If individual protein FASTA files should be written out
description (str): Description string of your project
custom_spont_id (str): ID of spontaneous genes in a COBRA model which will be ignored for analysis
"""
def __init__(self, gem_name, root_dir=None, pdb_file_type='mmtf',
gem=None, gem_file_path=None, gem_file_type=None,
genes_list=None, genes_and_sequences=None, genome_path=None,
write_protein_fasta_files=True,
description=None, custom_spont_id=None):
Object.__init__(self, id=gem_name, description=description)
self.genes = DictList()
"""DictList: All protein-coding genes in this GEM-PRO project"""
self.custom_spont_id = custom_spont_id
"""str: ID of spontaneous genes in a COBRA model which will be ignored for analysis"""
self.pdb_file_type = pdb_file_type
"""str: ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB"""
self.genome_path = genome_path
"""str: Simple link to the filepath of the FASTA file containing all protein sequences"""
self.model = None
"""Model: COBRApy model object"""
# Create directories
self._root_dir = None
if root_dir:
self.root_dir = root_dir
# TODO: add some checks for multiple inputs (only allow one!)
# Load a Model object
if gem:
self.load_cobra_model(gem)
# Or, load a GEM file
elif gem_file_path and gem_file_type:
gem = ssbio.core.modelpro.model_loader(gem_file_path=gem_file_path,
gem_file_type=gem_file_type)
self.load_cobra_model(gem)
# Or, load a list of gene IDs
elif genes_list:
self.add_gene_ids(genes_list)
# Or, load a dictionary of genes and their sequences
elif genes_and_sequences:
self.add_gene_ids(list(genes_and_sequences.keys()))
self.manual_seq_mapping(genes_and_sequences, write_fasta_files=write_protein_fasta_files)
# Or, load the provided FASTA file
elif genome_path:
genes_and_sequences = ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqrecords(genome_path)
self.add_gene_ids(list(genes_and_sequences.keys()))
self.manual_seq_mapping(genes_and_sequences, write_fasta_files=write_protein_fasta_files)
# If neither a model or genes are input, you can still add IDs with method add_gene_ids later
else:
log.warning('No model or genes input')
log.info('{}: number of genes'.format(len(self.genes)))
@property
def root_dir(self):
"""str: Directory where GEM-PRO project folder named after the attribute ``base_dir`` is located."""
return self._root_dir
@root_dir.setter
def root_dir(self, path):
if not path:
raise ValueError('No path specified')
if not op.exists(path):
raise ValueError('{}: folder does not exist'.format(path))
if self._root_dir:
log.info('Changing root directory of GEM-PRO project "{}" from {} to {}'.format(self.id, self.root_dir, path))
if not op.exists(op.join(path, self.id)):
raise IOError('GEM-PRO project "{}" does not exist in folder {}'.format(self.id, path))
else:
log.info('Creating GEM-PRO project directory in folder {}'.format(path))
self._root_dir = path
for d in [self.base_dir, self.model_dir, self.data_dir, self.genes_dir]:#, self.structures_dir]:
ssbio.utils.make_dir(d)
log.info('{}: GEM-PRO project location'.format(self.base_dir))
# Propagate changes to gene
if hasattr(self, 'genes'):
for g in self.genes:
g.root_dir = self.genes_dir
@property
def base_dir(self):
"""str: GEM-PRO project folder."""
if self.root_dir:
return op.join(self.root_dir, self.id)
else:
return None
@property
def model_dir(self):
"""str: Directory where original GEMs and GEM-related files are stored."""
if self.base_dir:
return op.join(self.base_dir, 'model')
else:
return None
@property
def data_dir(self):
"""str: Directory where all data are stored."""
if self.base_dir:
return op.join(self.base_dir, 'data')
else:
return None
@property
def genes_dir(self):
"""str: Directory where all gene specific information is stored."""
if self.base_dir:
return op.join(self.base_dir, 'genes')
else:
return None
# @property
# def structures_dir(self):
# """str: Directory where all structures are stored."""
# # XTODO: replace storage of structures in individual protein directories with this to reduce redundancy
# if self.base_dir:
# return op.join(self.base_dir, 'structures')
# else:
# return None
def load_cobra_model(self, model):
"""Load a COBRApy Model object into the GEM-PRO project.
Args:
model (Model): COBRApy ``Model`` object
"""
self.model = ModelPro(model)
for g in self.model.genes:
if self.genes_dir:
g.root_dir = self.genes_dir
g.protein.pdb_file_type = self.pdb_file_type
self.genes = self.model.genes
log.info('{}: loaded model'.format(model.id))
log.info('{}: number of reactions'.format(len(self.model.reactions)))
log.info('{}: number of reactions linked to a gene'.format(ssbio.core.modelpro.true_num_reactions(self.model)))
log.info('{}: number of genes (excluding spontaneous)'.format(ssbio.core.modelpro.true_num_genes(self.model,
custom_spont_id=self.custom_spont_id)))
log.info('{}: number of metabolites'.format(len(self.model.metabolites)))
log.warning('IMPORTANT: All Gene objects have been transformed into GenePro '
'objects, and will be for any new ones')
@property
def genes_with_structures(self):
"""DictList: All genes with any mapped protein structures."""
return DictList(x for x in self.genes if x.protein.num_structures > 0)
@property
def genes_with_experimental_structures(self):
"""DictList: All genes that have at least one experimental structure."""
return DictList(x for x in self.genes_with_structures if x.protein.num_structures_experimental > 0)
@property
def genes_with_homology_models(self):
"""DictList: All genes that have at least one homology model."""
return DictList(x for x in self.genes_with_structures if x.protein.num_structures_homology > 0)
@property
def genes_with_a_representative_sequence(self):
"""DictList: All genes with a representative sequence."""
return DictList(x for x in self.genes if x.protein.representative_sequence)
# return DictList(y for y in tmp if y.protein.representative_sequence.seq)
@property
def genes_with_a_representative_structure(self):
"""DictList: All genes with a representative protein structure."""
tmp = DictList(x for x in self.genes if x.protein.representative_structure)
return DictList(y for y in tmp if y.protein.representative_structure.structure_file)
@property
def functional_genes(self):
"""DictList: All functional genes with a representative sequence"""
return DictList(x for x in self.genes if x.functional)
# @property
# def genes(self):
# """DictList: All genes excluding spontaneous ones."""
# return ssbio.core.modelpro.filter_out_spontaneous_genes(self._genes, custom_spont_id=self.custom_spont_id)
# @genes.setter
# def genes(self, genes_list):
# """Set the genes attribute to be a DictList of GenePro objects.
#
# A "protein" attribute will be added to each Gene.
#
# Args:
# genes_list: DictList of COBRApy Gene objects, or list of gene IDs
#
# """
#
# if not isinstance(genes_list, DictList):
# tmp_list = []
# for x in list(set(genes_list)):
# x = str(x)
# new_gene = GenePro(id=x, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)
# tmp_list.append(new_gene)
# self._genes = DictList(tmp_list)
# else:
# self._genes = genes_list
def add_gene_ids(self, genes_list):
"""Add gene IDs manually into the GEM-PRO project.
Args:
genes_list (list): List of gene IDs as strings.
"""
orig_num_genes = len(self.genes)
for g in list(set(genes_list)):
if not self.genes.has_id(g):
new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)
if self.model:
self.model.genes.append(new_gene)
else:
self.genes.append(new_gene)
log.info('Added {} genes to GEM-PRO project'.format(len(self.genes)-orig_num_genes))
####################################################################################################################
### SEQUENCE RELATED METHODS ###
def kegg_mapping_and_metadata(self, kegg_organism_code, custom_gene_mapping=None, outdir=None,
set_as_representative=False, force_rerun=False):
"""Map all genes in the model to KEGG IDs using the KEGG service.
Steps:
1. Download all metadata and sequence files in the sequences directory
2. Creates a KEGGProp object in the protein.sequences attribute
3. Returns a Pandas DataFrame of mapping results
Args:
kegg_organism_code (str): The three letter KEGG code of your organism
custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map,
custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones.
Dictionary keys must match model gene IDs.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_as_representative (bool): If mapped KEGG IDs should be set as representative sequences
force_rerun (bool): If you want to overwrite any existing mappings and files
"""
# First map all of the organism's KEGG genes to UniProt
kegg_to_uniprot = ssbio.databases.kegg.map_kegg_all_genes(organism_code=kegg_organism_code, target_db='uniprot')
successfully_mapped_counter = 0
for g in tqdm(self.genes):
if custom_gene_mapping:
kegg_g = custom_gene_mapping[g.id]
else:
kegg_g = g.id
if kegg_g not in kegg_to_uniprot:
log.debug('{}: unable to map to KEGG'.format(g.id))
continue
# Download both FASTA and KEGG metadata files
kegg_prop = g.protein.load_kegg(kegg_id=kegg_g, kegg_organism_code=kegg_organism_code,
download=True, outdir=outdir, set_as_representative=set_as_representative,
force_rerun=force_rerun)
# Update potentially old UniProt ID
if kegg_g in kegg_to_uniprot.keys():
kegg_prop.uniprot = kegg_to_uniprot[kegg_g]
if g.protein.representative_sequence:
if g.protein.representative_sequence.kegg == kegg_prop.kegg:
g.protein.representative_sequence.uniprot = kegg_to_uniprot[kegg_g]
# Keep track of missing mappings - missing is defined by no available sequence
if kegg_prop.sequence_file:
successfully_mapped_counter += 1
log.debug('{}: loaded KEGG information for gene'.format(g.id))
log.info('{}/{}: number of genes mapped to KEGG'.format(successfully_mapped_counter, len(self.genes)))
log.info('Completed ID mapping --> KEGG. See the "df_kegg_metadata" attribute for a summary dataframe.')
def kegg_mapping_and_metadata_parallelize(self, sc, kegg_organism_code, custom_gene_mapping=None, outdir=None,
set_as_representative=False, force_rerun=False):
"""Map all genes in the model to KEGG IDs using the KEGG service.
Steps:
1. Download all metadata and sequence files in the sequences directory
2. Creates a KEGGProp object in the protein.sequences attribute
3. Returns a Pandas DataFrame of mapping results
Args:
sc (SparkContext): Spark Context to parallelize this function
kegg_organism_code (str): The three letter KEGG code of your organism
custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map,
custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones.
Dictionary keys must match model gene IDs.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_as_representative (bool): If mapped KEGG IDs should be set as representative sequences
force_rerun (bool): If you want to overwrite any existing mappings and files
"""
# First map all of the organism's KEGG genes to UniProt
kegg_to_uniprot = ssbio.databases.kegg.map_kegg_all_genes(organism_code=kegg_organism_code, target_db='uniprot')
# Parallelize the genes list
genes_rdd = sc.parallelize(self.genes)
# Write a sub-function to carry out the bulk of the original function
def gp_kegg_sc(g):
if custom_gene_mapping:
kegg_g = custom_gene_mapping[g.id]
else:
kegg_g = g.id
# Download both FASTA and KEGG metadata files
kegg_prop = g.protein.load_kegg(kegg_id=kegg_g, kegg_organism_code=kegg_organism_code,
download=True, outdir=outdir,
set_as_representative=set_as_representative,
force_rerun=force_rerun)
# Update potentially old UniProt ID
if kegg_g in kegg_to_uniprot.keys():
kegg_prop.uniprot = kegg_to_uniprot[kegg_g]
if g.protein.representative_sequence:
if g.protein.representative_sequence.kegg == kegg_prop.kegg:
g.protein.representative_sequence.uniprot = kegg_to_uniprot[kegg_g]
# Tracker for if it mapped successfully to KEGG
if kegg_prop.sequence_file:
success = True
else:
success = False
return g, success
# Run a map operation to execute the function on all items in the RDD
result = genes_rdd.map(gp_kegg_sc).collect()
# Copy the results over to the GEM-PRO object's genes using the GenePro function "copy_modified_gene"
# Also count how many genes mapped to KEGG
successfully_mapped_counter = 0
for modified_g, success in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
if success:
successfully_mapped_counter += 1
log.info('{}/{}: number of genes mapped to KEGG'.format(successfully_mapped_counter, len(self.genes)))
log.info('Completed ID mapping --> KEGG. See the "df_kegg_metadata" attribute for a summary dataframe.')
@property
def df_kegg_metadata(self):
"""DataFrame: Pandas DataFrame of KEGG metadata per protein."""
kegg_pre_df = []
df_cols = ['gene', 'kegg', 'refseq', 'uniprot', 'num_pdbs', 'pdbs', 'seq_len', 'sequence_file', 'metadata_file']
for g in self.genes:
kegg_mappings = g.protein.filter_sequences(KEGGProp)
for kegg_prop in kegg_mappings:
kegg_dict = kegg_prop.get_dict(df_format=True, only_attributes=df_cols)
kegg_dict['gene'] = g.id
kegg_pre_df.append(kegg_dict)
# Save a dataframe of the file mapping info
df = pd.DataFrame.from_records(kegg_pre_df, columns=df_cols).set_index('gene')
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df)
@property
def missing_kegg_mapping(self):
"""list: List of genes with no mapping to KEGG."""
kegg_missing = []
for g in self.genes:
keggs = g.protein.filter_sequences(KEGGProp)
no_sequence_file_available = True
for k in keggs:
if k.sequence_file:
no_sequence_file_available = False
break
if no_sequence_file_available:
kegg_missing.append(g.id)
return list(set(kegg_missing))
def uniprot_mapping_and_metadata(self, model_gene_source, custom_gene_mapping=None, outdir=None,
set_as_representative=False, force_rerun=False):
"""Map all genes in the model to UniProt IDs using the UniProt mapping service.
Also download all metadata and sequences.
Args:
model_gene_source (str): the database source of your model gene IDs.
See: http://www.uniprot.org/help/api_idmapping
Common model gene sources are:
* Ensembl Genomes - ``ENSEMBLGENOME_ID`` (i.e. E. coli b-numbers)
* Entrez Gene (GeneID) - ``P_ENTREZGENEID``
* RefSeq Protein - ``P_REFSEQ_AC``
custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map,
custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones.
Dictionary keys must match model genes.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_as_representative (bool): If mapped UniProt IDs should be set as representative sequences
force_rerun (bool): If you want to overwrite any existing mappings and files
"""
# Allow model gene --> custom ID mapping ({'TM_1012':'TM1012'})
if custom_gene_mapping:
genes_to_map = list(custom_gene_mapping.values())
else:
genes_to_map = [x.id for x in self.genes]
# Map all IDs first to available UniProts
genes_to_uniprots = bs_unip.mapping(fr=model_gene_source, to='ACC', query=genes_to_map)
successfully_mapped_counter = 0
for g in tqdm(self.genes):
if custom_gene_mapping and g.id in custom_gene_mapping.keys():
uniprot_gene = custom_gene_mapping[g.id]
else:
uniprot_gene = g.id
if uniprot_gene not in genes_to_uniprots:
log.debug('{}: unable to map to UniProt'.format(g.id))
continue
for mapped_uniprot in genes_to_uniprots[uniprot_gene]:
try:
uniprot_prop = g.protein.load_uniprot(uniprot_id=mapped_uniprot, download=True, outdir=outdir,
set_as_representative=set_as_representative,
force_rerun=force_rerun)
except HTTPError as e:
log.error('{}, {}: unable to complete web request'.format(g.id, mapped_uniprot))
print(e)
continue
if uniprot_prop.sequence_file or uniprot_prop.metadata_file:
successfully_mapped_counter += 1
log.info('{}/{}: number of genes mapped to UniProt'.format(successfully_mapped_counter, len(self.genes)))
log.info('Completed ID mapping --> UniProt. See the "df_uniprot_metadata" attribute for a summary dataframe.')
def manual_uniprot_mapping(self, gene_to_uniprot_dict, outdir=None, set_as_representative=True):
"""Read a manual dictionary of model gene IDs --> UniProt IDs. By default sets them as representative.
This allows for mapping of the missing genes, or overriding of automatic mappings.
Input a dictionary of::
{
<gene_id1>: <uniprot_id1>,
<gene_id2>: <uniprot_id2>,
}
Args:
gene_to_uniprot_dict: Dictionary of mappings as shown above
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_as_representative (bool): If mapped UniProt IDs should be set as representative sequences
"""
for g, u in tqdm(gene_to_uniprot_dict.items()):
g = str(g)
gene = self.genes.get_by_id(g)
try:
uniprot_prop = gene.protein.load_uniprot(uniprot_id=u,
outdir=outdir, download=True,
set_as_representative=set_as_representative)
except HTTPError as e:
log.error('{}, {}: unable to complete web request'.format(g, u))
print(e)
continue
log.info('Completed manual ID mapping --> UniProt. See the "df_uniprot_metadata" attribute for a summary dataframe.')
@property
def df_uniprot_metadata(self):
"""DataFrame: Pandas DataFrame of UniProt metadata per protein."""
uniprot_pre_df = []
df_cols = ['gene', 'uniprot', 'reviewed', 'gene_name', 'kegg', 'refseq', 'num_pdbs', 'pdbs', 'ec_number',
'pfam', 'seq_len', 'description', 'entry_date', 'entry_version', 'seq_date', 'seq_version',
'sequence_file', 'metadata_file']
for g in self.genes:
uniprot_mappings = g.protein.filter_sequences(UniProtProp)
for uniprot_prop in uniprot_mappings:
uniprot_dict = uniprot_prop.get_dict(df_format=True, only_attributes=df_cols)
uniprot_dict['gene'] = g.id
uniprot_pre_df.append(uniprot_dict)
df = pd.DataFrame.from_records(uniprot_pre_df, columns=df_cols).set_index('gene')
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df)
@property
def missing_uniprot_mapping(self):
"""list: List of genes with no mapping to UniProt."""
uniprot_missing = []
for g in self.genes:
ups = g.protein.filter_sequences(UniProtProp)
no_sequence_file_available = True
for u in ups:
if u.sequence_file or u.metadata_file:
no_sequence_file_available = False
break
if no_sequence_file_available:
uniprot_missing.append(g.id)
return list(set(uniprot_missing))
# TODO: should also have a seq --> uniprot id function (has to be 100% match) (also needs organism)
def manual_seq_mapping(self, gene_to_seq_dict, outdir=None, write_fasta_files=True, set_as_representative=True):
"""Read a manual input dictionary of model gene IDs --> protein sequences. By default sets them as representative.
Args:
gene_to_seq_dict (dict): Mapping of gene IDs to their protein sequence strings
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
write_fasta_files (bool): If individual protein FASTA files should be written out
set_as_representative (bool): If mapped sequences should be set as representative
"""
if outdir:
outdir_set = True
else:
outdir_set = False
# Save the sequence information in individual FASTA files
for g, s in gene_to_seq_dict.items():
gene = self.genes.get_by_id(str(g))
if not outdir_set and write_fasta_files:
outdir = gene.protein.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
manual_info = gene.protein.load_manual_sequence(ident=g, seq=s, outdir=outdir,
write_fasta_file=write_fasta_files,
set_as_representative=set_as_representative)
log.debug('{}: loaded manually defined sequence information'.format(g))
log.info('Loaded in {} sequences'.format(len(gene_to_seq_dict)))
def set_representative_sequence(self, force_rerun=False):
"""Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative sequence.
Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings
except when KEGG mappings have PDBs associated with them and UniProt doesn't.
Args:
force_rerun (bool): Set to True to recheck stored sequences
"""
# TODO: rethink use of multiple database sources - may lead to inconsistency with genome sources
successfully_mapped_counter = 0
for g in tqdm(self.genes):
repseq = g.protein.set_representative_sequence(force_rerun=force_rerun)
if repseq:
if repseq.sequence_file:
successfully_mapped_counter += 1
log.info('{}/{}: number of genes with a representative sequence'.format(len(self.genes_with_a_representative_sequence),
len(self.genes)))
log.info('See the "df_representative_sequences" attribute for a summary dataframe.')
@property
def df_representative_sequences(self):
"""DataFrame: Pandas DataFrame of representative sequence information per protein."""
seq_mapping_pre_df = []
df_cols = ['gene', 'uniprot', 'kegg', 'num_pdbs', 'pdbs', 'seq_len', 'sequence_file', 'metadata_file']
for g in self.genes_with_a_representative_sequence:
gene_dict = g.protein.representative_sequence.get_dict(df_format=True, only_attributes=df_cols)
gene_dict['gene'] = g.id
seq_mapping_pre_df.append(gene_dict)
df = pd.DataFrame.from_records(seq_mapping_pre_df, columns=df_cols).set_index('gene')
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df)
@property
def missing_representative_sequence(self):
"""list: List of genes with no mapping to a representative sequence."""
return [x.id for x in self.genes if not self.genes_with_a_representative_sequence.has_id(x.id)]
def write_representative_sequences_file(self, outname, outdir=None, set_ids_from_model=True):
"""Write all the model's sequences as a single FASTA file. By default, sets IDs to model gene IDs.
Args:
outname (str): Name of the output FASTA file without the extension
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
set_ids_from_model (bool): If the gene ID source should be the model gene IDs, not the original sequence ID
"""
if not outdir:
outdir = self.data_dir
if not outdir:
raise ValueError('Output directory must be specified')
outfile = op.join(outdir, outname + '.faa')
tmp = []
for x in self.genes_with_a_representative_sequence:
repseq = x.protein.representative_sequence
copied_seq_record = copy(repseq)
if set_ids_from_model:
copied_seq_record.id = x.id
tmp.append(copied_seq_record)
SeqIO.write(tmp, outfile, "fasta")
log.info('{}: wrote all representative sequences to file'.format(outfile))
self.genome_path = outfile
return self.genome_path
def get_sequence_properties(self, clean_seq=False, representatives_only=True):
"""Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of all protein sequences.
Results are stored in the protein's respective SeqProp objects at ``.annotations``
Args:
representative_only (bool): If analysis should only be run on the representative sequences
"""
for g in tqdm(self.genes):
g.protein.get_sequence_properties(clean_seq=clean_seq, representative_only=representatives_only)
def get_sequence_sliding_window_properties(self, scale, window, representatives_only=True):
"""Run Biopython ProteinAnalysis and EMBOSS pepstats to summarize basic statistics of all protein sequences.
Results are stored in the protein's respective SeqProp objects at ``.annotations``
Args:
representative_only (bool): If analysis should only be run on the representative sequences
"""
for g in tqdm(self.genes):
g.protein.get_sequence_sliding_window_properties(scale=scale, window=window,
representative_only=representatives_only)
def get_scratch_predictions(self, path_to_scratch, results_dir, scratch_basename='scratch', num_cores=1,
exposed_buried_cutoff=25, custom_gene_mapping=None):
"""Run and parse ``SCRATCH`` results to predict secondary structure and solvent accessibility.
Annotations are stored in the protein's representative sequence at:
* ``.annotations``
* ``.letter_annotations``
Args:
path_to_scratch (str): Path to SCRATCH executable
results_dir (str): Path to SCRATCH results folder, which will have the files (scratch.ss, scratch.ss8,
scratch.acc, scratch.acc20)
scratch_basename (str): Basename of the SCRATCH results ('scratch' is default)
num_cores (int): Number of cores to use to parallelize SCRATCH run
exposed_buried_cutoff (int): Cutoff of exposed/buried for the acc20 predictions
custom_gene_mapping (dict): Default parsing of SCRATCH output files is to look for the model gene IDs. If
your output files contain IDs which differ from the model gene IDs, use this dictionary to map model
gene IDs to result file IDs. Dictionary keys must match model genes.
"""
if not self.genome_path:
# Write all sequences as one file
all_seqs = self.write_representative_sequences_file(outname=self.id)
# Runs SCRATCH or loads existing results in results_dir
scratch = SCRATCH(project_name=scratch_basename, seq_file=self.genome_path)
scratch.run_scratch(path_to_scratch=path_to_scratch, num_cores=num_cores, outdir=results_dir)
sspro_summary = scratch.sspro_summary()
sspro8_summary = scratch.sspro8_summary()
sspro_results = scratch.sspro_results()
sspro8_results = scratch.sspro8_results()
accpro_summary = scratch.accpro_summary()
accpro20_summary = scratch.accpro20_summary(exposed_buried_cutoff)
accpro_results = scratch.accpro_results()
accpro20_results = scratch.accpro20_results()
counter = 0
# Adding the scratch annotations to the representative_sequences letter_annotations
for g in tqdm(self.genes_with_a_representative_sequence):
if custom_gene_mapping:
g_id = custom_gene_mapping[g.id]
else:
g_id = g.id
if g_id in sspro_summary:
# Secondary structure
g.protein.representative_sequence.annotations.update(sspro_summary[g_id])
g.protein.representative_sequence.annotations.update(sspro8_summary[g_id])
try:
g.protein.representative_sequence.letter_annotations['SS-sspro'] = sspro_results[g_id]
g.protein.representative_sequence.letter_annotations['SS-sspro8'] = sspro8_results[g_id]
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
# Solvent accessibility
g.protein.representative_sequence.annotations.update(accpro_summary[g_id])
g.protein.representative_sequence.annotations.update(accpro20_summary[g_id])
try:
g.protein.representative_sequence.letter_annotations['RSA-accpro'] = accpro_results[g_id]
g.protein.representative_sequence.letter_annotations['RSA-accpro20'] = accpro20_results[g_id]
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
counter += 1
else:
log.error('{}: missing SCRATCH results'.format(g.id))
log.info('{}/{}: number of genes with SCRATCH predictions loaded'.format(counter, len(self.genes)))
def get_tmhmm_predictions(self, tmhmm_results, custom_gene_mapping=None):
"""Parse TMHMM results and store in the representative sequences.
This is a basic function to parse pre-run TMHMM results. Run TMHMM from the
web service (http://www.cbs.dtu.dk/services/TMHMM/) by doing the following:
1. Write all representative sequences in the GEM-PRO using the function ``write_representative_sequences_file``
2. Upload the file to http://www.cbs.dtu.dk/services/TMHMM/ and choose "Extensive, no graphics" as the output
3. Copy and paste the results (ignoring the top header and above "HELP with output formats") into a file and save it
4. Run this function on that file
Args:
tmhmm_results (str): Path to TMHMM results (long format)
custom_gene_mapping (dict): Default parsing of TMHMM output is to look for the model gene IDs. If
your output file contains IDs which differ from the model gene IDs, use this dictionary to map model
gene IDs to result file IDs. Dictionary keys must match model genes.
"""
# TODO: refactor to Protein class?
tmhmm_dict = ssbio.protein.sequence.properties.tmhmm.parse_tmhmm_long(tmhmm_results)
counter = 0
for g in tqdm(self.genes_with_a_representative_sequence):
if custom_gene_mapping:
g_id = custom_gene_mapping[g.id]
else:
g_id = g.id
if g_id in tmhmm_dict:
log.debug('{}: loading TMHMM results'.format(g.id))
if not tmhmm_dict[g_id]:
log.error("{}: missing TMHMM results".format(g.id))
g.protein.representative_sequence.annotations['num_tm_helix-tmhmm'] = tmhmm_dict[g_id]['num_tm_helices']
try:
g.protein.representative_sequence.letter_annotations['TM-tmhmm'] = tmhmm_dict[g_id]['sequence']
counter += 1
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between TMHMM results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
else:
log.error("{}: missing TMHMM results".format(g.id))
log.info('{}/{}: number of genes with TMHMM predictions loaded'.format(counter, len(self.genes)))
### END SEQUENCE RELATED METHODS ###
####################################################################################################################
####################################################################################################################
### STRUCTURE RELATED METHODS ###
def blast_seqs_to_pdb(self, seq_ident_cutoff=0, evalue=0.0001, all_genes=False, display_link=False,
outdir=None, force_rerun=False):
"""BLAST each representative protein sequence to the PDB. Saves raw BLAST results (XML files).
Args:
seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form)
evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal,
0.0001 is stringent (default).
all_genes (bool): If all genes should be BLASTed, or only those without any structures currently mapped
display_link (bool, optional): Set to True if links to the HTML results should be displayed
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False
"""
counter = 0
for g in tqdm(self.genes_with_a_representative_sequence):
# If all_genes=False, BLAST only genes without a uniprot -> pdb mapping
if g.protein.num_structures_experimental > 0 and not all_genes and not force_rerun:
log.debug('{}: skipping BLAST, {} experimental structures already mapped '
'and all_genes flag is False'.format(g.id,
g.protein.num_structures_experimental))
continue
# BLAST the sequence to the PDB
new_pdbs = g.protein.blast_representative_sequence_to_pdb(seq_ident_cutoff=seq_ident_cutoff,
evalue=evalue,
display_link=display_link,
outdir=outdir,
force_rerun=force_rerun)
if new_pdbs:
counter += 1
log.debug('{}: {} PDBs BLASTed'.format(g.id, len(new_pdbs)))
else:
log.debug('{}: no BLAST results'.format(g.id))
log.info('Completed sequence --> PDB BLAST. See the "df_pdb_blast" attribute for a summary dataframe.')
log.info('{}: number of genes with additional structures added from BLAST'.format(counter))
@property
def df_pdb_blast(self):
"""DataFrame: Get a dataframe of PDB BLAST results"""
df = pd.DataFrame()
for g in self.genes_with_experimental_structures:
protein_df = g.protein.df_pdb_blast.copy().reset_index()
if not protein_df.empty:
protein_df['gene'] = g.id
df = df.append(protein_df)
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df.set_index('gene'))
def map_uniprot_to_pdb(self, seq_ident_cutoff=0.0, outdir=None, force_rerun=False):
"""Map all representative sequences' UniProt ID to PDB IDs using the PDBe "Best Structures" API.
Will save a JSON file of the results to each protein's ``sequences`` folder.
The "Best structures" API is available at https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and,
if the same, resolution.
Args:
seq_ident_cutoff (float): Sequence identity cutoff in decimal form
outdir (str): Output directory to cache JSON results of search
force_rerun (bool): Force re-downloading of JSON results if they already exist
Returns:
list: A rank-ordered list of PDBProp objects that map to the UniProt ID
"""
# First get all UniProt IDs and check if they have PDBs
all_representative_uniprots = []
for g in self.genes_with_a_representative_sequence:
uniprot_id = g.protein.representative_sequence.uniprot
if uniprot_id:
# TODO: add warning or something for isoform ids?
if '-' in uniprot_id:
uniprot_id = uniprot_id.split('-')[0]
all_representative_uniprots.append(uniprot_id)
log.info('Mapping UniProt IDs --> PDB IDs...')
uniprots_to_pdbs = bs_unip.mapping(fr='ACC', to='PDB_ID', query=all_representative_uniprots)
counter = 0
# Now run the best_structures API for all genes
for g in tqdm(self.genes_with_a_representative_sequence):
uniprot_id = g.protein.representative_sequence.uniprot
if uniprot_id:
if '-' in uniprot_id:
uniprot_id = uniprot_id.split('-')[0]
if uniprot_id in uniprots_to_pdbs:
best_structures = g.protein.map_uniprot_to_pdb(seq_ident_cutoff=seq_ident_cutoff, outdir=outdir, force_rerun=force_rerun)
if best_structures:
counter += 1
log.debug('{}: {} PDBs mapped'.format(g.id, len(best_structures)))
else:
log.debug('{}, {}: no PDBs available'.format(g.id, uniprot_id))
log.info('{}/{}: number of genes with at least one experimental structure'.format(len(self.genes_with_experimental_structures),
len(self.genes)))
log.info('Completed UniProt --> best PDB mapping. See the "df_pdb_ranking" attribute for a summary dataframe.')
@property
def df_pdb_ranking(self):
"""DataFrame: Get a dataframe of UniProt -> best structure in PDB results"""
df = pd.DataFrame()
for g in self.genes_with_experimental_structures:
protein_df = g.protein.df_pdb_ranking.copy().reset_index()
if not protein_df.empty:
protein_df['gene'] = g.id
df = df.append(protein_df)
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df.set_index('gene'))
@property
def missing_pdb_structures(self):
"""list: List of genes with no mapping to any experimental PDB structure."""
return [x.id for x in self.genes if not self.genes_with_experimental_structures.has_id(x.id)]
def get_manual_homology_models(self, input_dict, outdir=None, clean=True, force_rerun=False):
"""Copy homology models to the GEM-PRO project.
Requires an input of a dictionary formatted like so::
{
model_gene: {
homology_model_id1: {
'model_file': '/path/to/homology/model.pdb',
'file_type': 'pdb'
'additional_info': info_value
},
homology_model_id2: {
'model_file': '/path/to/homology/model.pdb'
'file_type': 'pdb'
}
}
}
Args:
input_dict (dict): Dictionary of dictionaries of gene names to homology model IDs and other information
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
clean (bool): If homology files should be cleaned and saved as a new PDB file
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory
"""
if outdir:
outdir_set = True
else:
outdir_set = False
counter = 0
for g in tqdm(self.genes):
if g.id not in input_dict:
continue
if not outdir_set:
outdir = g.protein.structure_dir
if not outdir:
raise ValueError('Output directory must be specified')
for hid, hdict in input_dict[g.id].items():
if 'model_file' not in hdict or 'file_type' not in hdict:
raise KeyError('"model_file" and "file_type" must be keys in the manual input dictionary.')
new_homology = g.protein.load_pdb(pdb_id=hid, pdb_file=hdict['model_file'],
file_type=hdict['file_type'], is_experimental=False)
if clean:
new_homology.load_structure_path(new_homology.clean_structure(outdir=outdir, force_rerun=force_rerun),
hdict['file_type'])
else:
copy_to = op.join(outdir, op.basename(hdict['model_file']))
if ssbio.utils.force_rerun(force_rerun, copy_to):
# Just copy the file to the structure directory and store the file name
log.debug('{}: copying model from original directory to GEM-PRO directory'.format(op.basename(hdict['model_file'])))
shutil.copy2(hdict['model_file'], outdir)
new_homology.load_structure_path(copy_to, hdict['file_type'])
else:
log.debug('{}: homology model already copied to directory'.format(copy_to))
new_homology.load_structure_path(copy_to, hdict['file_type'])
# TODO: need to better handle other info in the provided dictionary, if any
new_homology.update(hdict)
log.debug('{}: updated homology model information and copied model file.'.format(g.id))
counter += 1
log.info('Updated homology model information for {} genes.'.format(counter))
def get_itasser_models(self, homology_raw_dir, custom_itasser_name_mapping=None, outdir=None, force_rerun=False):
"""Copy generated I-TASSER models from a directory to the GEM-PRO directory.
Args:
homology_raw_dir (str): Root directory of I-TASSER folders.
custom_itasser_name_mapping (dict): Use this if your I-TASSER folder names differ from your model gene names.
Input a dict of {model_gene: ITASSER_folder}.
outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories
were not created initially
force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory
"""
counter = 0
for g in tqdm(self.genes):
if custom_itasser_name_mapping and g.id in custom_itasser_name_mapping:
hom_id = custom_itasser_name_mapping[g.id]
if not op.exists(op.join(homology_raw_dir, hom_id)):
hom_id = g.id
else:
hom_id = g.id
# The name of the actual pdb file will be $GENEID_model1.pdb
new_itasser_name = hom_id + '_model1'
orig_itasser_dir = op.join(homology_raw_dir, hom_id)
try:
itasser_prop = g.protein.load_itasser_folder(ident=hom_id, itasser_folder=orig_itasser_dir,
organize=True, outdir=outdir,
organize_name=new_itasser_name,
force_rerun=force_rerun)
except OSError:
log.debug('{}: homology model folder unavailable'.format(g.id))
continue
except IOError:
log.debug('{}: homology model unavailable'.format(g.id))
continue
if itasser_prop.structure_file:
counter += 1
else:
log.debug('{}: homology model file unavailable, perhaps modelling did not finish'.format(g.id))
log.info('Completed copying of {} I-TASSER models to GEM-PRO directory. See the "df_homology_models" attribute for a summary dataframe.'.format(counter))
@property
def df_homology_models(self):
"""DataFrame: Get a dataframe of I-TASSER homology model results"""
df = pd.DataFrame()
for g in self.genes_with_homology_models:
protein_df = g.protein.df_homology_models.copy().reset_index()
if not protein_df.empty:
protein_df['gene'] = g.id
df = df.append(protein_df)
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df.set_index('gene'))
@property
def missing_homology_models(self):
"""list: List of genes with no mapping to any homology models."""
return [x.id for x in self.genes if not self.genes_with_homology_models.has_id(x.id)]
def set_representative_structure(self, seq_outdir=None, struct_outdir=None, pdb_file_type=None,
engine='needle', always_use_homology=False, rez_cutoff=0.0,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True, skip_large_structures=False,
clean=True, force_rerun=False):
"""Set all representative structure for proteins from a structure in the structures attribute.
Each gene can have a combination of the following, which will be analyzed to set a representative structure.
* Homology model(s)
* Ranked PDBs
* BLASTed PDBs
If the ``always_use_homology`` flag is true, homology models are always set as representative when they exist.
If there are multiple homology models, we rank by the percent sequence coverage.
Args:
seq_outdir (str): Path to output directory of sequence alignment files, must be set if GEM-PRO directories
were not created initially
struct_outdir (str): Path to output directory of structure files, must be set if GEM-PRO directories
were not created initially
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
engine (str): ``biopython`` or ``needle`` - which pairwise alignment program to use.
``needle`` is the standard EMBOSS tool to run pairwise alignments.
``biopython`` is Biopython's implementation of needle. Results can differ!
always_use_homology (bool): If homology models should always be set as the representative structure
rez_cutoff (float): Resolution cutoff, in Angstroms (only if experimental structure)
seq_ident_cutoff (float): Percent sequence identity cutoff, in decimal form
allow_missing_on_termini (float): Percentage of the total length of the reference sequence which will be ignored
when checking for modifications. Example: if 0.1, and reference sequence is 100 AA, then only residues
5 to 95 will be checked for modifications.
allow_mutants (bool): If mutations should be allowed or checked for
allow_deletions (bool): If deletions should be allowed or checked for
allow_insertions (bool): If insertions should be allowed or checked for
allow_unresolved (bool): If unresolved residues should be allowed or checked for
skip_large_structures (bool): Default False -- currently, large structures can't be saved as a PDB file even
if you just want to save a single chain, so Biopython will throw an error when trying to do so. As an
alternative, if a large structure is selected as representative, the pipeline will currently point to it
and not clean it. If you don't want this to happen, set this to true.
clean (bool): If structures should be cleaned
force_rerun (bool): If sequence to structure alignment should be rerun
Todo:
- Remedy large structure representative setting
"""
for g in tqdm(self.genes):
repstruct = g.protein.set_representative_structure(seq_outdir=seq_outdir,
struct_outdir=struct_outdir,
pdb_file_type=pdb_file_type,
engine=engine,
rez_cutoff=rez_cutoff,
seq_ident_cutoff=seq_ident_cutoff,
always_use_homology=always_use_homology,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants,
allow_deletions=allow_deletions,
allow_insertions=allow_insertions,
allow_unresolved=allow_unresolved,
skip_large_structures=skip_large_structures,
clean=clean,
force_rerun=force_rerun)
log.info('{}/{}: number of genes with a representative structure'.format(len(self.genes_with_a_representative_structure),
len(self.genes)))
log.info('See the "df_representative_structures" attribute for a summary dataframe.')
def set_representative_structure_parallelize(self, sc, seq_outdir=None, struct_outdir=None, pdb_file_type=None,
engine='needle', always_use_homology=False, rez_cutoff=0.0,
seq_ident_cutoff=0.5, allow_missing_on_termini=0.2,
allow_mutants=True, allow_deletions=False,
allow_insertions=False, allow_unresolved=True, skip_large_structures=False,
clean=True, force_rerun=False):
def set_repstruct(g, seq_outdir=seq_outdir, struct_outdir=struct_outdir,
pdb_file_type=pdb_file_type, engine=engine,
rez_cutoff=rez_cutoff, seq_ident_cutoff=seq_ident_cutoff,
always_use_homology=always_use_homology,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants, allow_deletions=allow_deletions,
allow_insertions=allow_insertions, allow_unresolved=allow_unresolved,
skip_large_structures=skip_large_structures,
clean=clean, force_rerun=force_rerun):
g.protein.set_representative_structure(seq_outdir=seq_outdir, struct_outdir=struct_outdir,
pdb_file_type=pdb_file_type, engine=engine,
rez_cutoff=rez_cutoff, seq_ident_cutoff=seq_ident_cutoff,
always_use_homology=always_use_homology,
allow_missing_on_termini=allow_missing_on_termini,
allow_mutants=allow_mutants, allow_deletions=allow_deletions,
allow_insertions=allow_insertions, allow_unresolved=allow_unresolved,
skip_large_structures=skip_large_structures,
clean=clean, force_rerun=force_rerun)
return g
genes_rdd = sc.parallelize(self.genes)
result = genes_rdd.map(set_repstruct).collect()
# Copy the results over to the GEM-PRO object's genes using the GenePro function "copy_modified_gene"
# Also count how many genes mapped to KEGG
for modified_g in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
log.info('{}/{}: number of genes with a representative structure'.format(len(self.genes_with_a_representative_structure),
len(self.genes)))
log.info('See the "df_representative_structures" attribute for a summary dataframe.')
@property
def df_representative_structures(self):
"""DataFrame: Get a dataframe of representative protein structure information."""
rep_struct_pre_df = []
df_cols = ['gene', 'id', 'is_experimental', 'file_type', 'structure_file']
for g in self.genes_with_a_representative_structure:
repdict = g.protein.representative_structure.get_dict(df_format=True, only_attributes=df_cols)
repdict['gene'] = g.id
rep_struct_pre_df.append(repdict)
df = pd.DataFrame.from_records(rep_struct_pre_df, columns=df_cols).set_index('gene')
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df)
@property
def missing_representative_structure(self):
"""list: List of genes with no mapping to a representative structure."""
return [x.id for x in self.genes if not self.genes_with_a_representative_structure.has_id(x.id)]
def prep_itasser_modeling(self, itasser_installation, itlib_folder, runtype, create_in_dir=None,
execute_from_dir=None, all_genes=False, print_exec=False, **kwargs):
"""Prepare to run I-TASSER homology modeling for genes without structures, or all genes.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created, if not provided default is the
GEM-PRO's ``data_dir``
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp?
"""
if not create_in_dir:
if not self.data_dir:
raise ValueError('Output directory must be specified')
self.homology_models_dir = op.join(self.data_dir, 'homology_models')
else:
self.homology_models_dir = create_in_dir
ssbio.utils.make_dir(self.homology_models_dir)
if not execute_from_dir:
execute_from_dir = self.homology_models_dir
counter = 0
for g in self.genes_with_a_representative_sequence:
repstruct = g.protein.representative_structure
if repstruct and not all_genes:
log.debug('{}: representative structure set, skipping homology modeling'.format(g.id))
continue
g.protein.prep_itasser_modeling(itasser_installation=itasser_installation,
itlib_folder=itlib_folder, runtype=runtype,
create_in_dir=self.homology_models_dir,
execute_from_dir=execute_from_dir,
print_exec=print_exec, **kwargs)
counter += 1
log.info('Prepared I-TASSER modeling folders for {} genes in folder {}'.format(counter,
self.homology_models_dir))
def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
"""Download ALL mapped experimental structures to each protein's structures directory.
Args:
outdir (str): Path to output directory, if GEM-PRO directories were not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
"""
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
counter = 0
for g in tqdm(self.genes):
pdbs = g.protein.pdb_downloader_and_metadata(outdir=outdir, pdb_file_type=pdb_file_type, force_rerun=force_rerun)
if pdbs:
counter += len(pdbs)
log.info('Updated PDB metadata dataframe. See the "df_pdb_metadata" attribute for a summary dataframe.')
log.info('Saved {} structures total'.format(counter))
def download_all_pdbs(self, outdir=None, pdb_file_type=None, load_metadata=False, force_rerun=False):
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
all_structures = []
for g in tqdm(self.genes):
pdbs = g.protein.download_all_pdbs(outdir=outdir, pdb_file_type=pdb_file_type,
load_metadata=load_metadata, force_rerun=force_rerun)
all_structures.extend(pdbs)
return list(set(all_structures))
@property
def df_pdb_metadata(self):
"""DataFrame: Get a dataframe of PDB metadata (PDBs have to be downloaded first)."""
df = pd.DataFrame()
for g in self.genes_with_experimental_structures:
# Get per protein DataFrame
protein_df = g.protein.df_pdb_metadata.copy().reset_index()
protein_df['gene'] = g.id
df = df.append(protein_df)
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df.set_index('gene'))
@property
def df_proteins(self):
"""DataFrame: Get a summary dataframe of all proteins in the project."""
pre_df = []
df_cols = ['gene', 'id', 'sequences', 'num_sequences', 'representative_sequence',
'repseq_gene_name', 'repseq_uniprot', 'repseq_description',
'num_structures', 'experimental_structures', 'num_experimental_structures',
'homology_models', 'num_homology_models',
'representative_structure', 'representative_chain', 'representative_chain_seq_coverage',
'repstruct_description', 'repstruct_resolution',
'num_sequence_alignments', 'num_structure_alignments']
for g in self.genes:
# Get per protein DataFrame
protein_dict = g.protein.protein_statistics
protein_dict['gene'] = g.id
pre_df.append(protein_dict)
df = pd.DataFrame.from_records(pre_df, columns=df_cols).set_index('gene')
if df.empty:
log.warning('Empty dataframe')
return df
else:
return ssbio.utils.clean_df(df)
def get_dssp_annotations(self, representatives_only=True, force_rerun=False):
"""Run DSSP on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-dssp']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
for g in tqdm(self.genes):
g.protein.get_dssp_annotations(representative_only=representatives_only, force_rerun=force_rerun)
def get_dssp_annotations_parallelize(self, sc, representatives_only=True, force_rerun=False):
"""Run DSSP on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-dssp']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
genes_rdd = sc.parallelize(self.genes)
def get_dssp_annotation(g):
g.protein.get_dssp_annotations(representative_only=representatives_only, force_rerun=force_rerun)
return g
result = genes_rdd.map(get_dssp_annotation).collect()
for modified_g in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
def get_msms_annotations(self, representatives_only=True, force_rerun=False):
"""Run MSMS on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-msms']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
for g in tqdm(self.genes):
g.protein.get_msms_annotations(representative_only=representatives_only, force_rerun=force_rerun)
def get_msms_annotations_parallelize(self, sc, representatives_only=True, force_rerun=False):
"""Run MSMS on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-msms']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
genes_rdd = sc.parallelize(self.genes)
def get_msms_annotation(g):
g.protein.get_msms_annotations(representative_only=representatives_only, force_rerun=force_rerun)
return g
result = genes_rdd.map(get_msms_annotation).collect()
for modified_g in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
def get_freesasa_annotations(self, include_hetatms=False, representatives_only=True, force_rerun=False):
"""Run freesasa on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-freesasa']``
Args:
include_hetatms (bool): If HETATMs should be included in calculations. Defaults to ``False``.
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
for g in tqdm(self.genes):
g.protein.get_freesasa_annotations(include_hetatms=include_hetatms,
representative_only=representatives_only,
force_rerun=force_rerun)
def get_freesasa_annotations_parallelize(self, sc, include_hetatms=False,
representatives_only=True, force_rerun=False):
"""Run freesasa on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-freesasa']``
Args:
include_hetatms (bool): If HETATMs should be included in calculations. Defaults to ``False``.
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
genes_rdd = sc.parallelize(self.genes)
def get_freesasa_annotation(g):
g.protein.get_freesasa_annotations(include_hetatms=include_hetatms,
representative_only=representatives_only,
force_rerun=force_rerun)
return g
result = genes_rdd.map(get_freesasa_annotation).collect()
for modified_g in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
def get_all_pdbflex_info(self):
counter = 0
logging.disable(logging.WARNING)
for g in tqdm(self.genes_with_a_representative_sequence):
try:
g.protein.get_all_pdbflex_info()
counter+=1
except Exception as e:
# log.exception(e)
continue
logging.disable(logging.NOTSET)
log.info('{}: successful PDB flex mappings'.format(counter))
def find_disulfide_bridges(self, representatives_only=True):
"""Run Biopython's disulfide bridge finder and store found bridges.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.annotations['SSBOND-biopython']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
"""
for g in tqdm(self.genes):
g.protein.find_disulfide_bridges(representative_only=representatives_only)
def find_disulfide_bridges_parallelize(self, sc, representatives_only=True):
"""Run Biopython's disulfide bridge finder and store found bridges.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.annotations['SSBOND-biopython']``
Args:
representative_only (bool): If analysis should only be run on the representative structure
"""
genes_rdd = sc.parallelize(self.genes)
def find_disulfide_bridges(g):
g.protein.find_disulfide_bridges(representative_only=representatives_only)
return g
result = genes_rdd.map(find_disulfide_bridges).collect()
for modified_g in result:
original_gene = self.genes.get_by_id(modified_g.id)
original_gene.copy_modified_gene(modified_g)
### END STRUCTURE RELATED METHODS ###
####################################################################################################################
def __json_encode__(self):
to_return = {}
# Don't save properties, methods in the JSON
for x in [a for a in dir(self) if not a.startswith('__') and not isinstance(getattr(type(self), a, None), property) and not callable(getattr(self,a))]:
if self.model and x == 'genes':
continue
to_return.update({x: getattr(self, x)})
return to_return
def __json_decode__(self, **attrs):
for k, v in attrs.items():
setattr(self, k, v)
if not self.model:
self.genes = DictList(self.genes)
else:
self.genes = self.model.genes
def save_protein_pickles_and_reset_protein(self):
"""Save all Proteins as pickle files -- currently development code for parallelization purposes. Also clears the
protein attribute in all genes!"""
self.gene_protein_pickles = {}
for g in tqdm(self.genes):
if g.protein.representative_sequence:
initproteinpickle = op.join(g.protein.protein_dir, '{}_protein.pckl'.format(g.id))
g.protein.save_pickle(initproteinpickle)
self.gene_protein_pickles[g.id] = initproteinpickle
g.reset_protein()
else:
g.reset_protein()
def load_protein_pickles(self):
log.info('Loading Protein pickles into GEM-PRO...')
for g_id, protein in tqdm(self.gene_protein_pickles.items()):
g = self.genes.get_by_id(g_id)
g.protein = ssbio.io.load_pickle(protein)
|
nmih/ssbio
|
ssbio/pipeline/gempro.py
|
Python
|
mit
| 78,176
|
[
"BLAST",
"Biopython"
] |
4a62773d0aaa5200b47e2dce40c7e58067ab7ff507b32c064684e34a565169bf
|
from pssh import ParallelSSHClient
import paramiko
import argparse
import pssh.utils
parser = argparse.ArgumentParser(description='Take an SSH Key and Blast it across the network.')
parser.add_argument('-i', '--input', required=True, help='The input file containing hosts')
parser.add_argument('-s', '--sudo', default=False, action='store_true', help='Whether Sudo should be called (Default: False)')
parser.add_argument('-c', '--command', default='id', help='The Command to run (Default: id)')
parser.add_argument('-t', '--timeout', default=120, help='The timeout in seconds (Default: 120)')
parser.add_argument('-p', '--parallel', default=10, help='The number of hosts to run (Default: 10)')
parser.add_argument('-r', '--retries', default=1, help='Amount of times to retire (Default: 1)')
parser.add_argument('-u', '--user', default='root', help='The username (Default: root)')
parser.add_argument('-k', '--key', required=True, help='The Key file to use')
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Output Activity to StOut')
parser.add_argument('-o', '-output', help='The output file')
args = vars(parser.parse_args())
Scan_Hosts(args)
def Get_Hosts_From_File(input)
hosts=[]
for line in open(input, 'r'):
hosts.add(line)
return hosts
def Scan_Hosts(args):
if args['verbose']:
pssh.utils.enable_host_logger()
private_key = paramiko.RSAKey.from_private_key_file(args['key'])
client = ParallelSSHClient(Get_Hosts_From_File(args['input']), pkey=private_key, pool_size=args['parallel'], timeout=args['timeout'], num_retries=args['retries'])
output = client.run_command(args['command'], sudo=args['sudo'], stop_on_errors=False)
f = open(args['output'], 'w')
f.write("Host\tOutput")
for host in output:
for line in output[host]['stdout']:
f.write(host + "\t" + line)
|
sleventyeleven/badkeys_checker
|
SSHScanner.py
|
Python
|
gpl-2.0
| 1,896
|
[
"BLAST"
] |
f17921a8907946dc903e91567c7469890c2bf17b6b38f658a7caf1ac399d9d82
|
#!/usr/bin/env python
""" pure utilities (other)
generally useful functions for CaImAn
See Also
------------
https://docs.python.org/3/library/urllib.request.htm
"""
#\package Caiman/utils
#\version 1.0
#\bug
#\warning
#\copyright GNU General Public License v2.0
#\date Created on Tue Jun 30 21:01:17 2015
#\author: andrea giovannucci
#\namespace utils
#\pre none
import cv2
import h5py
import multiprocessing
import inspect
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import scipy
import subprocess
import tensorflow as tf
from scipy.ndimage.filters import gaussian_filter
from tifffile import TiffFile
from typing import Any, Dict, List, Tuple, Union, Iterable
try:
cv2.setNumThreads(0)
except:
pass
from urllib.request import urlopen
from ..external.cell_magic_wand import cell_magic_wand
from ..source_extraction.cnmf.spatial import threshold_components
from caiman.paths import caiman_datadir
import caiman.utils
#%%
def download_demo(name:str='Sue_2x_3000_40_-46.tif', save_folder:str='') -> str:
"""download a file from the file list with the url of its location
using urllib, you can add you own name and location in this global parameter
Args:
name: str
the path of the file correspondong to a file in the filelist (''Sue_2x_3000_40_-46.tif' or 'demoMovieJ.tif')
save_folder: str
folder inside ./example_movies to which the files will be saved. Will be created if it doesn't exist
Returns:
Path of the saved file
Raise:
WrongFolder Exception
"""
#\bug
#\warning
file_dict = {'Sue_2x_3000_40_-46.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/Sue_2x_3000_40_-46.tif',
'demoMovieJ.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/demoMovieJ.tif',
'demo_behavior.h5': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/demo_behavior.h5',
'Tolias_mesoscope_1.hdf5': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/Tolias_mesoscope_1.hdf5',
'Tolias_mesoscope_2.hdf5': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/Tolias_mesoscope_2.hdf5',
'Tolias_mesoscope_3.hdf5': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/Tolias_mesoscope_3.hdf5',
'data_endoscope.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/data_endoscope.tif',
'gmc_960_30mw_00001_red.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/gmc_960_30mw_00001_red.tif',
'gmc_960_30mw_00001_green.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/gmc_960_30mw_00001_green.tif',
'msCam13.avi': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/msCam13.avi',
'alignment.pickle': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/alignment.pickle',
'data_dendritic.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/2014-04-05-003.tif',
'blood_vessel_10Hz.mat': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/blood_vessel_10Hz.mat',
'online_vs_offline.npz': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/online_vs_offline.npz',
'demo_voltage_imaging_ROIs.hdf5': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/demo_voltage_imaging_ROIs.hdf5',
'demo_voltage_imaging.hdf5': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/demo_voltage_imaging.hdf5'}
# ,['./example_movies/demoMovie.tif','https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/demoMovie.tif']]
base_folder = os.path.join(caiman_datadir(), 'example_movies')
if os.path.exists(base_folder):
if not os.path.isdir(os.path.join(base_folder, save_folder)):
os.makedirs(os.path.join(base_folder, save_folder))
path_movie = os.path.join(base_folder, save_folder, name)
if not os.path.exists(path_movie):
url = file_dict[name]
logging.info(f"downloading {name} with urllib")
logging.info(f"GET {url} HTTP/1.1")
try:
f = urlopen(url)
except:
logging.info(f"Trying to set user agent to download demo")
from urllib.request import Request
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
f = urlopen(req)
data = f.read()
with open(path_movie, "wb") as code:
code.write(data)
else:
logging.info("File " + str(name) + " already downloaded")
else:
raise Exception('Cannot find the example_movies folder in your caiman_datadir - did you make one with caimanmanager.py?')
return path_movie
def download_model(name:str='mask_rcnn', save_folder:str='') -> str:
"""download a NN model from the file list with the url of its location
using urllib, you can add you own name and location in this global parameter
Args:
name: str
the path of the file correspondong to a file in the filelist
save_folder: str
folder inside caiman_data/model to which the files will be saved. Will be created if it doesn't exist
Returns:
Path of the saved file
Raise:
WrongFolder Exception
"""
#\bug
#\warning
file_dict = {'mask_rcnn': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/model/mask_rcnn_neurons_0040.h5'}
base_folder = os.path.join(caiman_datadir(), 'model')
if os.path.exists(base_folder):
if not os.path.isdir(os.path.join(base_folder, save_folder)):
os.makedirs(os.path.join(base_folder, save_folder))
path_movie = os.path.join(base_folder, save_folder, name)
if not os.path.exists(path_movie):
url = file_dict[name]
logging.info(f"downloading {name} with urllib")
logging.info(f"GET {url} HTTP/1.1")
try:
f = urlopen(url)
except:
logging.info(f"Trying to set user agent to download demo")
from urllib.request import Request
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
f = urlopen(req)
data = f.read()
with open(path_movie, "wb") as code:
code.write(data)
else:
logging.info("File " + str(name) + " already downloaded")
else:
raise Exception('Cannot find the model folder in your caiman_datadir - did you make one with caimanmanager.py?')
return path_movie
def val_parse(v):
"""parse values from si tags into python objects if possible from si parse
Args:
v: si tags
Returns:
v: python object
"""
try:
return eval(v)
except:
if v == 'true':
return True
elif v == 'false':
return False
elif v == 'NaN':
return np.nan
elif v == 'inf' or v == 'Inf':
return np.inf
else:
return v
def si_parse(imd:str) -> Dict:
"""parse image_description field embedded by scanimage from get image description
Args:
imd: image description
Returns:
imd: the parsed description
"""
imddata:Any = imd.split('\n')
imddata = [i for i in imddata if '=' in i]
imddata = [i.split('=') for i in imddata]
imddata = [[ii.strip(' \r') for ii in i] for i in imddata]
imddata = {i[0]: val_parse(i[1]) for i in imddata}
return imddata
def get_image_description_SI(fname:str) -> List:
"""Given a tif file acquired with Scanimage it returns a dictionary containing the information in the image description field
Args:
fname: name of the file
Returns:
image_description: information of the image
"""
image_descriptions = []
tf = TiffFile(fname)
for idx, pag in enumerate(tf.pages):
if idx % 1000 == 0:
logging.debug(idx) # progress report to the user
field = pag.tags['image_description'].value
image_descriptions.append(si_parse(field))
return image_descriptions
#%% Generate data
def gen_data(dims:Tuple[int,int]=(48, 48), N:int=10, sig:Tuple[int,int]=(3, 3), tau:float=1., noise:float=.3, T:int=2000,
framerate:int=30, firerate:float=.5, seed:int=3, cmap:bool=False, truncate:float=np.exp(-2),
difference_of_Gaussians:bool=True, fluctuating_bkgrd:List=[50, 300]) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, Tuple[int, int]]:
bkgrd = 10 # fluorescence baseline
np.random.seed(seed)
boundary = 4
M = int(N * 1.5)
# centers = boundary + (np.array(GeneralizedHalton(2, seed).get(M)) *
# (np.array(dims) - 2 * boundary)).astype('uint16')
centers = boundary + (np.random.rand(M, 2) *
(np.array(dims) - 2 * boundary)).astype('uint16')
trueA = np.zeros(dims + (M,), dtype='float32')
for i in range(M):
trueA[tuple(centers[i]) + (i,)] = 1.
if difference_of_Gaussians:
q = .75
for n in range(M):
s = (.67 + .33 * np.random.rand(2)) * np.array(sig)
tmp = gaussian_filter(trueA[:, :, n], s)
trueA[:, :, n] = np.maximum(tmp - gaussian_filter(trueA[:, :, n], q * s) *
q**2 * (.2 + .6 * np.random.rand()), 0)
else:
for n in range(M):
s = [ss * (.75 + .25 * np.random.rand()) for ss in sig]
trueA[:, :, n] = gaussian_filter(trueA[:, :, n], s)
trueA = trueA.reshape((-1, M), order='F')
trueA *= (trueA >= trueA.max(0) * truncate)
trueA /= np.linalg.norm(trueA, 2, 0)
keep = np.ones(M, dtype=bool)
overlap = trueA.T.dot(trueA) - np.eye(M)
while keep.sum() > N:
keep[np.argmax(overlap * np.outer(keep, keep)) % M] = False
trueA = trueA[:, keep]
trueS = np.random.rand(N, T) < firerate / float(framerate)
trueS[:, 0] = 0
for i in range(N // 2):
trueS[i, :500 + i * T // N * 2 // 3] = 0
trueC = trueS.astype('float32')
for i in range(N):
# * (.9 + .2 * np.random.rand())))
gamma = np.exp(-1. / (tau * framerate))
for t in range(1, T):
trueC[i, t] += gamma * trueC[i, t - 1]
if fluctuating_bkgrd:
K = np.array([[np.exp(-(i - j)**2 / 2. / fluctuating_bkgrd[0]**2)
for i in range(T)] for j in range(T)])
ch = np.linalg.cholesky(K + 1e-10 * np.eye(T))
truef = 1e-2 * ch.dot(np.random.randn(T)).astype('float32') / bkgrd
truef -= truef.mean()
truef += 1
K = np.array([[np.exp(-(i - j)**2 / 2. / fluctuating_bkgrd[1]**2)
for i in range(dims[0])] for j in range(dims[0])])
ch = np.linalg.cholesky(K + 1e-10 * np.eye(dims[0]))
trueb = 3 * 1e-2 * \
np.outer(
*ch.dot(np.random.randn(dims[0], 2)).T).ravel().astype('float32')
trueb -= trueb.mean()
trueb += 1
else:
truef = np.ones(T, dtype='float32')
trueb = np.ones(np.prod(dims), dtype='float32')
trueb *= bkgrd
Yr = np.outer(trueb, truef) + noise * np.random.randn(
* (np.prod(dims), T)).astype('float32') + trueA.dot(trueC)
if cmap:
import caiman as cm
Y = np.reshape(Yr, dims + (T,), order='F')
Cn = cm.local_correlations(Y)
plt.figure(figsize=(20, 3))
plt.plot(trueC.T)
plt.figure(figsize=(20, 3))
plt.plot((trueA.T.dot(Yr - bkgrd) / np.sum(trueA**2, 0).reshape(-1, 1)).T)
plt.figure(figsize=(12, 4))
plt.subplot(131)
plt.scatter(*centers[keep].T[::-1], c='g')
plt.scatter(*centers[~keep].T[::-1], c='r')
plt.imshow(Y[:T // 10 * 10].reshape(dims +
(T // 10, 10)).mean(-1).max(-1), cmap=cmap)
plt.title('Max')
plt.subplot(132)
plt.scatter(*centers[keep].T[::-1], c='g')
plt.scatter(*centers[~keep].T[::-1], c='r')
plt.imshow(Y.mean(-1), cmap=cmap)
plt.title('Mean')
plt.subplot(133)
plt.scatter(*centers[keep].T[::-1], c='g')
plt.scatter(*centers[~keep].T[::-1], c='r')
plt.imshow(Cn, cmap=cmap)
plt.title('Correlation')
plt.show()
return Yr, trueC, trueS, trueA, trueb, truef, centers, dims # XXX dims is always the same as passed into the function?
#%%
def save_object(obj, filename:str) -> None:
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def load_object(filename:str) -> Any:
with open(filename, 'rb') as input_obj:
obj = pickle.load(input_obj)
return obj
#%%
def apply_magic_wand(A, gSig, dims, A_thr=None, coms=None, dview=None,
min_frac=0.7, max_frac=1.0, roughness=2, zoom_factor=1,
center_range=2) -> np.ndarray:
""" Apply cell magic Wand to results of CNMF to ease matching with labels
Args:
A:
output of CNMF
gSig: tuple
input of CNMF (half neuron size)
A_thr:
thresholded version of A
coms:
centers of the magic wand
dview:
for parallelization
min_frac:
fraction of minimum of gSig to take as minimum size
max_frac:
multiplier of maximum of gSig to take as maximum size
Returns:
masks: ndarray
binary masks
"""
if (A_thr is None) and (coms is None):
import pdb
pdb.set_trace()
A_thr = threshold_components(
A.tocsc()[:], dims, medw=None, thr_method='max',
maxthr=0.2, nrgthr=0.99, extract_cc=True,se=None,
ss=None, dview=dview)>0
coms = [scipy.ndimage.center_of_mass(mm.reshape(dims, order='F')) for
mm in A_thr.T]
if coms is None:
coms = [scipy.ndimage.center_of_mass(mm.reshape(dims, order='F')) for
mm in A_thr.T]
min_radius = np.round(np.min(gSig)*min_frac).astype(np.int)
max_radius = np.round(max_frac*np.max(gSig)).astype(np.int)
params = []
for idx in range(A.shape[-1]):
params.append([A.tocsc()[:,idx].toarray().reshape(dims, order='F'),
coms[idx], min_radius, max_radius, roughness, zoom_factor, center_range])
logging.debug(len(params))
if dview is not None:
masks = np.array(list(dview.map(cell_magic_wand_wrapper, params)))
else:
masks = np.array(list(map(cell_magic_wand_wrapper, params)))
return masks
def cell_magic_wand_wrapper(params):
a, com, min_radius, max_radius, roughness, zoom_factor, center_range = params
msk = cell_magic_wand(a, com, min_radius, max_radius, roughness,
zoom_factor, center_range)
return msk
#%% From https://codereview.stackexchange.com/questions/120802/recursively-save-python-dictionaries-to-hdf5-files-using-h5py
def save_dict_to_hdf5(dic:Dict, filename:str, subdir:str='/') -> None:
''' Save dictionary to hdf5 file
Args:
dic: dictionary
input (possibly nested) dictionary
filename: str
file name to save the dictionary to (in hdf5 format for now)
'''
with h5py.File(filename, 'w') as h5file:
recursively_save_dict_contents_to_group(h5file, subdir, dic)
def load_dict_from_hdf5(filename:str) -> Dict:
''' Load dictionary from hdf5 file
Args:
filename: str
input file to load
Returns:
dictionary
'''
with h5py.File(filename, 'r') as h5file:
return recursively_load_dict_contents_from_group(h5file, '/')
def recursively_save_dict_contents_to_group(h5file:h5py.File, path:str, dic:Dict) -> None:
'''
Args:
h5file: hdf5 object
hdf5 file where to store the dictionary
path: str
path within the hdf5 file structure
dic: dictionary
dictionary to save
'''
# argument type checking
if not isinstance(dic, dict):
raise ValueError("must provide a dictionary")
if not isinstance(path, str):
raise ValueError("path must be a string")
if not isinstance(h5file, h5py._hl.files.File):
raise ValueError("must be an open h5py file")
# save items to the hdf5 file
for key, item in dic.items():
key = str(key)
if key == 'g':
if item is None:
item = 0
logging.info(key + ' is an object type')
try:
item = np.array(list(item))
except:
item = np.asarray(item, dtype=np.float)
if key == 'g_tot':
item = np.asarray(item, dtype=np.float)
if key in ['groups', 'idx_tot', 'ind_A', 'Ab_epoch', 'coordinates',
'loaded_model', 'optional_outputs', 'merged_ROIs', 'tf_in',
'tf_out', 'empty_merged']:
logging.info('Key {} is not saved.'.format(key))
continue
if isinstance(item, list) or isinstance(item, tuple):
if len(item) > 0 and all(isinstance(elem, str) for elem in item):
item = np.string_(item)
else:
item = np.array(item)
if not isinstance(key, str):
raise ValueError("dict keys must be strings to save to hdf5")
# save strings, numpy.int64, numpy.int32, and numpy.float64 types
if isinstance(item, (np.int64, np.int32, np.float64, str, np.float, float, np.float32,int)):
h5file[path + key] = item
logging.debug('Saving {}'.format(key))
if not h5file[path + key][()] == item:
raise ValueError('Error while saving {}.'.format(key))
# save numpy arrays
elif isinstance(item, np.ndarray):
logging.debug('Saving {}'.format(key))
try:
h5file[path + key] = item
except:
item = np.array(item).astype('|S32')
h5file[path + key] = item
if not np.array_equal(h5file[path + key][()], item):
raise ValueError('Error while saving {}.'.format(key))
# save dictionaries
elif isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + '/', item)
elif 'sparse' in str(type(item)):
logging.info(key + ' is sparse ****')
h5file[path + key + '/data'] = item.tocsc().data
h5file[path + key + '/indptr'] = item.tocsc().indptr
h5file[path + key + '/indices'] = item.tocsc().indices
h5file[path + key + '/shape'] = item.tocsc().shape
# other types cannot be saved and will result in an error
elif item is None or key == 'dview':
h5file[path + key] = 'NoneType'
elif key in ['dims', 'medw', 'sigma_smooth_snmf', 'dxy', 'max_shifts',
'strides', 'overlaps', 'gSig']:
logging.info(key + ' is a tuple ****')
h5file[path + key] = np.array(item)
elif type(item).__name__ in ['CNMFParams', 'Estimates']: # parameter object
recursively_save_dict_contents_to_group(h5file, path + key + '/', item.__dict__)
else:
raise ValueError("Cannot save %s type for key '%s'." % (type(item), key))
def recursively_load_dict_contents_from_group(h5file:h5py.File, path:str) -> Dict:
'''load dictionary from hdf5 object
Args:
h5file: hdf5 object
object where dictionary is stored
path: str
path within the hdf5 file
'''
ans:Dict = {}
for key, item in h5file[path].items():
if isinstance(item, h5py._hl.dataset.Dataset):
val_set = np.nan
if isinstance(item[()], str):
if item[()] == 'NoneType':
ans[key] = None
else:
ans[key] = item[()]
elif key in ['dims', 'medw', 'sigma_smooth_snmf', 'dxy', 'max_shifts', 'strides', 'overlaps']:
if type(item[()]) == np.ndarray:
ans[key] = tuple(item[()])
else:
ans[key] = item[()]
else:
if type(item[()]) == np.bool_:
ans[key] = bool(item[()])
else:
ans[key] = item[()]
elif isinstance(item, h5py._hl.group.Group):
if key in ('A', 'W', 'Ab', 'downscale_matrix', 'upscale_matrix'):
data = item[path + key + '/data']
indices = item[path + key + '/indices']
indptr = item[path + key + '/indptr']
shape = item[path + key + '/shape']
ans[key] = scipy.sparse.csc_matrix((data[:], indices[:],
indptr[:]), shape[:])
if key in ('W', 'upscale_matrix'):
ans[key] = ans[key].tocsr()
else:
ans[key] = recursively_load_dict_contents_from_group(h5file, path + key + '/')
return ans
def fun(f, q_in, q_out):
while True:
i, x = q_in.get()
if i is None:
break
q_out.put((i, f(x)))
def parmap(f, X, nprocs=multiprocessing.cpu_count()):
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
proc = [multiprocessing.Process(target=fun, args=(f, q_in, q_out))
for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
return [x for i, x in sorted(res)]
def load_graph(frozen_graph_filename):
""" Load a tensorflow .pb model and use it for inference"""
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we can use again a convenient built-in function to import a
# graph_def into the current default Graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="prefix",
producer_op_list=None
)
return graph
def get_caiman_version() -> Tuple[str, str]:
""" Get the version of CaImAn, as best we can determine"""
# This does its best to determine the version of CaImAn. This uses the first successful
# from these methods:
# 'GITW' ) git rev-parse if caiman is built from "pip install -e ." and we are working
# out of the checkout directory (the user may have since updated without reinstall)
# 'RELF') A release file left in the process to cut a release. Should have a single line
# in it whick looks like "Version:1.4"
# 'FILE') The date of some frequently changing files, which act as a very rough
# approximation when no other methods are possible
#
# Data is returned as a tuple of method and version, with method being the 4-letter string above
# and version being a format-dependent string
# Attempt 'GITW'.
# TODO:
# A) Find a place to do it that's better than cwd
# B) Hide the output from the terminal
try:
rev = subprocess.check_output(["git", "rev-parse", "HEAD"], stderr=subprocess.DEVNULL).decode("utf-8").split("\n")[0]
except:
rev = None
if rev is not None:
return 'GITW', rev
# Attempt: 'RELF'
relfile = os.path.join(caiman_datadir(), 'RELEASE')
if os.path.isfile(relfile):
with open(relfile, 'r') as sfh:
for line in sfh:
if ':' in line: # expect a line like "Version:1.3"
_, version = line.rstrip().split(':')
return 'RELF', version
# Attempt: 'FILE'
# Right now this samples the utils directory
modpath = os.path.dirname(inspect.getfile(caiman.utils)) # Probably something like /mnt/home/pgunn/miniconda3/envs/caiman/lib/python3.7/site-packages/caiman
newest = 0
for fn in os.listdir(modpath):
last_modified = os.stat(os.path.join(modpath, fn)).st_mtime
if last_modified > newest:
newest = last_modified
return 'FILE', str(int(newest))
|
agiovann/Constrained_NMF
|
caiman/utils/utils.py
|
Python
|
gpl-2.0
| 25,059
|
[
"NEURON"
] |
032f693cb7c40ef625a38ae2e34f256c62a7e7bafa033c8927965623682f936a
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DataDomain.order'
db.add_column(u'profiles_datadomain', 'order', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'DataDomain.order'
db.delete_column(u'profiles_datadomain', 'order')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 11, 10, 59, 58, 243028)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 11, 10, 59, 58, 242620)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'order': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datadomainindex': {
'Meta': {'ordering': "['order']", 'object_name': 'DataDomainIndex'},
'dataDomain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
'groups': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'geography_name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'geography_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'geometry_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'indicator_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}),
'summary_level': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
u'profiles.groupindex': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupIndex'},
'groups': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': u"orm['profiles.Indicator']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.legendoption': {
'Meta': {'object_name': 'LegendOption'},
'bin_options': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bin_type': ('django.db.models.fields.CharField', [], {'default': "'jenks'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
|
216software/Profiles
|
communityprofiles/profiles/oldmigrations/0068_auto__add_field_datadomain_order.py
|
Python
|
mit
| 22,873
|
[
"MOE"
] |
127ede2b8dbe5d0c866ecaa6cae1af7b87aa197458d48f5fe1f29eb6ff93a845
|
import unittest
import test_vers
from streamsx.topology.topology import *
from streamsx.topology import context
from streamsx.topology.schema import CommonSchema
from streamsx.topology.tester import Tester
from streamsx.spl import op
import time
import os
import datetime
class Person(object):
def __init__(self, name, birth_year):
self.name = name
self._birth_year = birth_year
def birth_year(self):
return self._birth_year
expected_contents = """8
Punctuation received: WindowMarker
9
Punctuation received: WindowMarker
10
Punctuation received: WindowMarker
11
Punctuation received: WindowMarker
Punctuation received: FinalMarker
"""
class TimeCounter(object):
"""Count up from zero every `period` seconds for a given number of
iterations."""
def __init__(self, period=None, iterations=None):
if period is None:
period = 1.0
self.period = period
self.iterations = iterations
self.count = 0
def __iter__(self):
return self
def __next__(self):
# If the number of iterations has been met, stop iterating.
if self.iterations is not None and self.count >= self.iterations:
raise StopIteration
# Otherwise increment, sleep, and return.
to_return = self.count
self.count += 1
time.sleep(self.period)
return to_return
def next(self):
return self.__next__()
class TriggerDiff(object):
"""Given any input, returns the timespan (in seconds) between now
and the last time the TriggerDiff callable was invoked."""
def __init__(self):
self.last = None
def __call__(self, param):
# Record the last time
_last = self.last
# Set the current time
self.last = time.time()
# Handle the fist invocation
if _last is None:
return None
# Return the time diff
return self.last - _last
class TupleTimespanCheck(object):
"""Checks whether each item in a list passed to the callable has a timestamp marked after
a certain point in time"""
def __init__(self, span):
self.span = span
def __call__(self, items):
mark = time.time() - self.span
return all([mark < item[1] for item in items])
# Given a value, a tolerance, and the expected value, return true iff the value is
# within the margin of error.
within_tolerance = lambda val, tol, exp: val < exp + (tol*exp) and val > exp - (tol*exp)
@unittest.skipIf(not test_vers.tester_supported() , "Tester not supported")
class TestPythonWindowing(unittest.TestCase):
def setUp(self):
Tester.setup_standalone(self)
def test_BasicCountCountWindow(self):
topo = Topology()
s = topo.source([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
# Need a float cast to make the value consistent with Python 2/3
s = s.last(10).trigger(2).aggregate(lambda x: float(sum(x))/float(len(x)))
tester = Tester(topo)
tester.contents(s, [1.5,2.5,3.5,4.5,5.5,7.5,9.5])
tester.test(self.test_ctxtype, self.test_config)
def test_BasicCountTimeWindow(self):
# Aggregate every 0.5 seconds
aggregate_period = 0.5
# Check that each aggregation is triggered at the right time, with a maximum %20 error
tolerance = 0.20
topo = Topology()
s = topo.source(TimeCounter(iterations = 10))
s = s.last(1).trigger(datetime.timedelta(seconds=aggregate_period)).aggregate(TriggerDiff())
tester = Tester(topo)
tester.tuple_check(s, lambda val: within_tolerance(val, tolerance, aggregate_period))
tester.test(self.test_ctxtype, self.test_config)
def test_BasicTimeCountWindow(self):
# Ensure tuples are evicted no later than (window_span*tolerance + window_span)
tolerance = 0.20
window_span = 2.0
max_evict = window_span*tolerance + window_span
topo = Topology()
s = topo.source(TimeCounter(iterations = 100, period = 0.1))
s = s.map(lambda x: (x, time.time()))
s = s.last(datetime.timedelta(seconds=window_span)).trigger(20).aggregate(TupleTimespanCheck(max_evict))
tester = Tester(topo)
tester.tuple_check(s, lambda val: val)
tester.test(self.test_ctxtype, self.test_config)
def test_JsonInputCountCountWindow(self):
topo = Topology()
s = topo.source([{'a':1},{'b':2,'c':3}, {'d': 4, 'e': 5}])
# Check the averages of the values of the Json objects
s = s.map(lambda x: x, schema = CommonSchema.Json)
s = s.last(3).trigger(1).aggregate(lambda tuples: [[set(tup.keys()), sum(tup.values())] for tup in tuples])
tester = Tester(topo)
tester.contents(s, [ [[{'a'},1]],
[[{'a'},1], [{'c','b'}, 5]],
[[{'a'},1], [{'c','b'}, 5], [{'d','e'}, 9]]
])
tester.test(self.test_ctxtype, self.test_config)
def test_StringInputCountCountWindow(self):
topo = Topology()
s = topo.source(['1','3','5','7'])
s = s.map(lambda x: x, schema = CommonSchema.String)
s = s.last(3).trigger(1).aggregate(lambda tuples: ''.join(tuples))
tester = Tester(topo)
tester.contents(s, ['1','13','135','357'])
tester.test(self.test_ctxtype, self.test_config)
def test_NotByRefWindow(self):
topo = Topology()
s = topo.source(['1','3','5','7'])
# Used to prevent pass by ref for the source
f = s.filter(lambda x: True)
s = s.last(3).trigger(4).aggregate(lambda x: int(sum([int(s) for s in x])/len(x)))
tester = Tester(topo)
tester.contents(s, [5])
tester.test(self.test_ctxtype, self.test_config)
def test_ClassCountCountWindow(self):
topo = Topology()
current_year = time.localtime().tm_year
s = topo.source([
['Wallace', 1962],
['Copernicus', 1473],
['Feynman', 1918],
['Dirac', 1902],
['Pauli', 1900],
['Frenkel', 1968],
['Terence Tao', 1975]
])
s = s.map(lambda x: Person(x[0], x[1]))
s = s.last(3).trigger(1).aggregate(lambda x: int(sum([p.birth_year() for p in x])/len(x)))
tester = Tester(topo)
tester.contents(s, [1962, 1717, 1784, 1764, 1906, 1923, 1947])
tester.test(self.test_ctxtype, self.test_config)
# Windowing doesn't currently support the 'dict' type.
@unittest.expectedFailure
def test_DictInputWindow(self):
topo = Topology()
s = topo.source([1,2,3,4])
s = s.map(lambda x: ('a', x), schema = "tuple<rstring a, int32 b>")
# Canned aggregate
s = s.last(3).trigger(4).aggregate(lambda x: 0),
tester = Tester(topo)
tester.test(self.test_ctxtype, self.test_config)
def test_WindowPunctuation(self):
"""Trigger an aggregation 4 times. Ensure that window punctuations are submitted each time
by writing them to an output file, and then verifying that the file contains the correct
contents."""
topo = Topology()
s = topo.source([1,2,3,4])
# Aggregate and write to file.
s = s.last(1).trigger(1).aggregate(lambda x: x[0]+7)
# Ensure map/flat_map/filter passes window marks through.
s = s.flat_map(lambda x : [x])
s = s.filter(lambda x : True)
s = s.map(lambda x : (x,), schema='tuple<int32 z>')
op_params = {'file' : 'punct_file', 'writePunctuations' : True, 'flushOnPunctuation' : True}
op.Sink("spl.adapter::FileSink", s, params = op_params)
# Copy the config, since it's shared across all tests, and not every test needs a data
# directory.
cfg = self.test_config.copy()
jc = context.JobConfig(data_directory=os.getcwd())
jc.add(cfg)
tester = Tester(topo)
tester.test(self.test_ctxtype, cfg)
path = os.path.join(os.getcwd(), 'punct_file')
# Validate the contents of the file.
with open(path, 'r') as f:
file_contents = f.read()
self.assertEqual(expected_contents, file_contents)
os.remove(path)
if __name__ == '__main__':
unittest.main()
|
wmarshall484/streamsx.topology
|
test/python/topology/test2_python_window.py
|
Python
|
apache-2.0
| 8,492
|
[
"DIRAC"
] |
53f087963cd1df1fd62a93e32e76732351d37401cb29896f486186b0b864203d
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Unit tests for data_ingestion.py"""
__version__ = '$Id: 568a6aaef622d61870862a94f33f8c743cf2f7f3 $'
import os
import unittest
import test_utils
import pywikibot
import data_ingestion
class TestPhoto(unittest.TestCase):
def setUp(self):
self.obj = data_ingestion.Photo(URL='http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png',
metadata={'description.en': '"Sounds" icon',
'source': 'http://commons.wikimedia.org/wiki/File:Sound-icon.svg',
'author': 'KDE artists | Silstor',
'license': 'LGPL',
'set': 'Crystal SVG icon set',
'name': 'Sound icon'}
)
def test_downloadPhoto(self):
f = open(os.path.join(os.path.split(__file__)[0], 'data', 'MP_sounds.png'))
self.assertEqual(f.read(), self.obj.downloadPhoto().read())
def test_findDuplicateImages(self):
duplicates = self.obj.findDuplicateImages()
self.assertIn('MP sounds.png', [dup.replace("_", " ") for dup in duplicates])
def test_getTitle(self):
self.assertEqual(self.obj.getTitle("%(name)s - %(set)s.%(_ext)s"), "Sound icon - Crystal SVG icon set.png")
def test_getDescription(self):
self.assertEqual(self.obj.getDescription('CrystalTemplate'),
"""{{CrystalTemplate
|author=KDE artists {{!}} Silstor
|description.en="Sounds" icon
|license=LGPL
|name=Sound icon
|set=Crystal SVG icon set
|source=http://commons.wikimedia.org/wiki/File:Sound-icon.svg
}}""")
class TestCSVReader(unittest.TestCase):
def setUp(self):
fileobj = open(os.path.join(os.path.split(__file__)[0], 'data', 'csv_ingestion.csv'))
self.iterator = data_ingestion.CSVReader(fileobj, 'url')
self.obj = self.iterator.next()
def test_PhotoURL(self):
self.assertEqual(self.obj.URL, 'http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png')
def test_getTitle(self):
self.assertEqual(self.obj.getTitle("%(name)s - %(set)s.%(_ext)s"), "Sound icon - Crystal SVG icon set.png")
def test_getDescription(self):
self.assertEqual(self.obj.getDescription('CrystalTemplate'),
"""{{CrystalTemplate
|author=KDE artists {{!}} Silstor
|description.en="Sounds" icon
|license=LGPL
|name=Sound icon
|set=Crystal SVG icon set
|source=http://commons.wikimedia.org/wiki/File:Sound-icon.svg
|url=http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png
}}""")
if __name__ == "__main__":
unittest.main()
|
races1986/SafeLanguage
|
CEM/tests/test_data_ingestion.py
|
Python
|
epl-1.0
| 2,763
|
[
"CRYSTAL"
] |
b50b3cd58ad0e75101a1c828524c48546dcc154cbe8d948d92c629c12cf34ed0
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
"""Analysis of results from mdevents_optimize.py and others
@author: Janik Zikovsky
"""
#This line has to be first for some reason.
#from enthought.mayavi import mlab
from pylab import *
import os
import sys
import time
import numpy as np
import pickle
import argparse
from scipy import stats
class Params:
def __init__(self):
self.NumberEvents = 0
self.MaxRecursionDepth = 10
self.MakeTime = 0
self.CoarseBinTime = 0
self.MediumBinTime = 0
self.FineBinTime = 0
self.MemoryUsed = 0
results = []
#========================================================================================================
def plot_results_vs_other(results, x_field, y_field, other_field, extra_title=""):
""" Function to plot Y vs X of anything. It accesses the members of "results" to plot them.
other_field is used to separate by another field, and make separate line plots for each"""
others = set()
for par in results:
others.add( eval('par.%s' % other_field) )
others = list(others)
others.sort()
figure()
for other in others:
data = []
for par in results:
this_other = eval('par.%s' % other_field)
if this_other == other:
x = eval('par.%s' % x_field)
y = eval('par.%s' % y_field)
data.append( (x,y) )
data.sort()
xs = [x for (x,y) in data]
ys = [y for (x,y) in data]
p = plot(xs,ys, marker='.', label="%s = %f" % (other_field, other))
if extra_title != "": extra_title = "\n" + extra_title
title("%s vs %s%s" % (y_field, x_field, extra_title) );
xlabel(x_field)
ylabel(y_field)
legend(loc='best')
savefig("%s_vs_%s.png" % (y_field, x_field));
#========================================================================================================
def plot_results_with_slope(results, x_field, y_field, x_scale=1):
""" Function to plot Y vs X of anything. It accesses the members of "results" to plot them.
other_field is used to separate by another field, and make separate line plots for each
@param x_scale :: multiply x by this amount
"""
figure()
data = []
for par in results:
x = eval('par.%s' % x_field)
y = eval('par.%s' % y_field)
data.append( (x,y) )
data.sort()
xs = [x*x_scale for (x,y) in data]
ys = [y for (x,y) in data]
# Now get the slope
gradient, intercept, r_value, p_value, std_err = stats.linregress(xs,ys)
p = plot(xs,ys, marker='.', label="y = %.3gx + %.3g" % (gradient, intercept))
title("%s vs %s" % (y_field, x_field));
xlabel("%s x %s" % (x_field, x_scale) )
ylabel(y_field)
legend(loc='best')
savefig("%s_vs_%s.png" % (y_field, x_field));
#========================================================================================================
def do_analysis(file_list, type):
# Load back the results
results = []
for filename in file_list:
f = open(filename, 'r')
these_results = pickle.load(f)
results += these_results
f.close()
if type == 1:
plot_results_vs_other(results, "SplitInto", "MakeTime", "SplitThresholdBase")
plot_results_vs_other(results, "SplitInto", "MemoryUsed", "SplitThresholdBase")
plot_results_vs_other(results, "SplitInto", "CoarseBinTime", "SplitThresholdBase")
plot_results_vs_other(results, "SplitInto", "MediumBinTime", "SplitThresholdBase")
plot_results_vs_other(results, "SplitInto", "FineBinTime", "SplitThresholdBase")
elif type == 2:
plot_results_with_slope(results, "NumberEvents", "MakeTime", x_scale=1e-9)
plot_results_with_slope(results, "NumberEvents", "MemoryUsed", x_scale=1e-9)
plot_results_with_slope(results, "NumberEvents", "CoarseBinTime", x_scale=1e-9)
plot_results_with_slope(results, "NumberEvents", "MediumBinTime", x_scale=1e-9)
plot_results_with_slope(results, "NumberEvents", "FineBinTime", x_scale=1e-9)
elif type == 3:
extra_title = "Binary Splitting Method"
plot_results_vs_other(results, "SplitThreshold", "MakeTime", "SplitInto", extra_title)
plot_results_vs_other(results, "SplitThreshold", "MemoryUsed", "SplitInto", extra_title)
plot_results_vs_other(results, "SplitThreshold", "CoarseBinTime", "SplitInto", extra_title)
plot_results_vs_other(results, "SplitThreshold", "MediumBinTime", "SplitInto", extra_title)
plot_results_vs_other(results, "SplitThreshold", "FineBinTime", "SplitInto", extra_title)
show()
#========================================================================================================
if __name__=="__main__":
# parser = argparse.ArgumentParser(description='Analyze results from MDEvents optimization')
# parser.add_argument('files', metavar='FILES', type=str,
# help='The .dat results file')
# parser.add_argument('--force', dest='force', action='store_const',
# const=True, default=False,
# help='Force overwriting existing files. Use with caution!')
# args = parser.parse_args()
file_list = ["optimize_results1.dat"]
do_analysis(file_list, 1)
|
mganeva/mantid
|
Framework/DataObjects/scripts/analysis.py
|
Python
|
gpl-3.0
| 5,578
|
[
"Mayavi"
] |
1d82261ce5c3af97e6b3eb40a61a6f31ae6b531ca645e011042c15947c092bad
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007-2014 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" A transfer receipt implementation """
import datetime
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.reporting.report import HTMLReport, ObjectListReport
_ = stoqlib_gettext
class TransferOrderReceipt(HTMLReport):
"""Transfer Order receipt
This class builds the namespace used in template
"""
template_filename = 'transfer/transfer.html'
title = _("Transfer Receipt")
complete_header = False
def __init__(self, filename, order):
self.order = order
HTMLReport.__init__(self, filename)
def get_namespace(self):
total = 0
for item in self.order.get_items():
total += item.quantity
return dict(subtitle="Transfer number: %s" % (self.order.identifier, ),
order=self.order, total=total)
def adjust_for_test(self):
date = datetime.date(2012, 01, 01)
self.order.open_date = date
self.order.receival_date = date
self.order.identifier = 50
self.logo_data = 'logo.png'
class TransferOrderReport(ObjectListReport):
title = _("Transfer report")
main_object_name = (_("transfer"), _("transfers"))
class TransferItemReport(ObjectListReport):
title = _("Transfer item report")
main_object_name = (_("transfer item"), _("transfer items"))
|
tiagocardosos/stoq
|
stoqlib/reporting/transfer.py
|
Python
|
gpl-2.0
| 2,250
|
[
"VisIt"
] |
5ec5f1a9708e56fcc75f9b033d2238dad2807b8c500b3697ce48d0f11fc3f502
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import re
import math
from decimal import Decimal
from collections import defaultdict
from .exceptions import *
from .pdict import PreservingDict
from . import qcformat
from . import molpro_basissets
from . import options
def harvest_output(outtext):
"""Function to read MRCC output file *outtext* and parse important
quantum chemical information from it in
"""
psivar = PreservingDict()
psivar_coord = None
psivar_grad = None
NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))"
# <<< Process NRE >>>
mobj = re.search(
r'^\s*' + r'(?:NUCLEAR REPULSION ENERGY)' + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched nrc')
psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
# <<< Process SCF >>>
#mobj = re.search(
# r'^\s*' + r'(?:Energy of reference determinant (?:\[au\]|/au/):)' + r'\s+' + NUMBER + r'\s*$',
# outtext, re.MULTILINE)
#if mobj:
# print('matched scf')
# psivar['SCF TOTAL ENERGY'] = mobj.group(1)
# <<< Process MP2 >>>
mobj = re.search(
r'^\s*' + r'Reference energy[:]?\s+' + NUMBER + r'\s*' +
r'^\s*' + r'MP2 singlet pair energy[:]?\s+' + NUMBER + r'\s*' +
r'^\s*' + r'MP2 triplet pair energy[:]?\s+' + NUMBER + r'\s*' +
r'^\s*' + r'MP2 correlation energy[:]?\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched mp2')
psivar['HF TOTAL ENERGY'] = mobj.group(1)
psivar['MP2 CORRELATION ENERGY'] = mobj.group(4)
psivar['MP2 TOTAL ENERGY'] = Decimal(mobj.group(1)) + Decimal(mobj.group(4))
psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(3)) * \
Decimal(2) / Decimal(3)
psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(4)) - \
psivar['MP2 SAME-SPIN CORRELATION ENERGY']
# <<< Process SAPT-like >>>
mobj = re.search(
#r'^\s+' + r'E1pol\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E1exch\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E1exch\(S2\)\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E2ind\(unc\)\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E2ind\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E2ind-exch\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E2disp\(unc\)\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
# r'^\s+' + r'E2disp\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r'\)\s+' + NUMBER + r'\s+' + NUMBER + '\s*',
r'^\s+' + r'E2disp\s+' + NUMBER + r'.*$',
#r'^\s+' + r'E2disp-exch\(unc\)\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E2disp-exc\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
outtext, re.MULTILINE)
if mobj:
#print('matched sapt-like')
psivar['MP2C DISP20 ENERGY'] = Decimal(mobj.group(1)) / Decimal(1000)
# <<< Process SCF-F12 >>>
mobj = re.search(
r'^\s+' + r'CABS-singles contribution of\s+' + NUMBER + r'\s+patched into reference energy.\s*' +
r'^\s+' + r'New reference energy\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched scff12')
psivar['SCF TOTAL ENERGY'] = Decimal(mobj.group(2)) - Decimal(mobj.group(1))
psivar['HF-CABS TOTAL ENERGY'] = mobj.group(2)
# <<< Process MP2-F12 >>>
# DF-MP2-F12 correlation energies:
# --------------------------------
# Approx. Singlet Triplet Ecorr Total Energy
# DF-MP2 -0.261035854033 -0.140514056591 -0.401549910624 -112.843952380305
# DF-MP2-F12/3*C(DX,FIX) -0.367224875485 -0.163178266500 -0.530403141984 -112.972805611666
# DF-MP2-F12/3*C(FIX) -0.358294348708 -0.164988061549 -0.523282410258 -112.965684879939
# DF-MP2-F12/3C(FIX) -0.357375628783 -0.165176490386 -0.522552119169 -112.964954588851
#
# DF-MP2-F12 correlation energies:
# ================================
# Approx. Singlet Triplet Ecorr Total Energy
# DF-MP2 -0.357960885582 -0.185676627667 -0.543637513249 -132.841755020796
# DF-MP2-F12/3*C(DX,FIX) -0.381816069559 -0.188149510095 -0.569965579654 -132.868083087202
# DF-MP2-F12/3*C(FIX) -0.379285470419 -0.187468208608 -0.566753679027 -132.864871186575
# DF-MP2-F12/3C(FIX) -0.379246010149 -0.187531433611 -0.566777443760 -132.864894951307
mobj = re.search(
r'^\s*' + r'DF-MP2-F12 correlation energies:\s*' +
r'^\s*(?:[=-]+)\s*' +
r'^\s+' + r'Approx.\s+Singlet\s+Triplet\s+Ecorr\s+Total Energy\s*' +
r'^\s+' + r'DF-MP2\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'DF-MP2-F12/3\*C\(DX,FIX\)\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'DF-MP2-F12/3\*C\(FIX\)\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'DF-MP2-F12/3C\(FIX\)\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched mp2f12')
psivar['MP2 CORRELATION ENERGY'] = mobj.group(3)
psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(2)) * \
Decimal(2) / Decimal(3)
psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(3)) - \
psivar['MP2 SAME-SPIN CORRELATION ENERGY']
psivar['MP2 TOTAL ENERGY'] = mobj.group(4)
psivar['MP2-F12 CORRELATION ENERGY'] = mobj.group(15)
psivar['MP2-F12 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(14)) * \
Decimal(2) / Decimal(3)
psivar['MP2-F12 OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(15)) - \
psivar['MP2-F12 SAME-SPIN CORRELATION ENERGY']
psivar['MP2-F12 TOTAL ENERGY'] = mobj.group(16)
# <<< Process CC >>>
mobj = re.search(
r'^\s*' + r'CCSD triplet pair energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'CCSD correlation energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'Triples \(T\) contribution\s+' + NUMBER + '\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched ccsd(t)')
psivar['CCSD CORRELATION ENERGY'] = mobj.group(2)
psivar['CCSD SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) * \
Decimal(2) / Decimal(3)
psivar['CCSD OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(2)) - \
psivar['CCSD SAME-SPIN CORRELATION ENERGY']
psivar['CCSD TOTAL ENERGY'] = Decimal(mobj.group(2)) + psivar['HF TOTAL ENERGY']
psivar['(T) CORRECTION ENERGY'] = mobj.group(3)
psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(2)) + Decimal(mobj.group(3))
psivar['CCSD(T) TOTAL ENERGY'] = psivar['CCSD(T) CORRELATION ENERGY'] + psivar['HF TOTAL ENERGY']
# <<< Process CC-F12 >>>
mobj = re.search(
r'^\s*' + r'CCSD-F12a triplet pair energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'CCSD-F12a correlation energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'Triples \(T\) contribution\s+' + NUMBER + '\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched ccsd(t)-f12a')
psivar['CCSD-F12A CORRELATION ENERGY'] = mobj.group(2)
psivar['CCSD-F12A SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) * \
Decimal(2) / Decimal(3)
psivar['CCSD-F12A OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(2)) - \
psivar['CCSD-F12A SAME-SPIN CORRELATION ENERGY']
psivar['CCSD-F12A TOTAL ENERGY'] = Decimal(mobj.group(2)) + psivar['HF-CABS TOTAL ENERGY']
psivar['(T)-F12AB CORRECTION ENERGY'] = mobj.group(3)
psivar['CCSD(T)-F12A CORRELATION ENERGY'] = Decimal(mobj.group(2)) + Decimal(mobj.group(3))
psivar['CCSD(T)-F12A TOTAL ENERGY'] = psivar['CCSD(T)-F12A CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
psivar['(T*)-F12AB CORRECTION ENERGY'] = Decimal(mobj.group(3)) * \
psivar['MP2-F12 CORRELATION ENERGY'] / psivar['MP2 CORRELATION ENERGY']
psivar['CCSD(T*)-F12A CORRELATION ENERGY'] = Decimal(mobj.group(2)) + psivar['(T*)-F12AB CORRECTION ENERGY']
psivar['CCSD(T*)-F12A TOTAL ENERGY'] = psivar['CCSD(T*)-F12A CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
mobj = re.search(
r'^\s*' + r'CCSD-F12b triplet pair energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'CCSD-F12b correlation energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'Triples \(T\) contribution\s+' + NUMBER + '\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched ccsd(t)-f12b')
psivar['CCSD-F12B CORRELATION ENERGY'] = mobj.group(2)
psivar['CCSD-F12B SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) * \
Decimal(2) / Decimal(3)
psivar['CCSD-F12B OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(2)) - \
psivar['CCSD-F12B SAME-SPIN CORRELATION ENERGY']
psivar['CCSD-F12B TOTAL ENERGY'] = Decimal(mobj.group(2)) + psivar['HF-CABS TOTAL ENERGY']
psivar['(T)-F12AB CORRECTION ENERGY'] = mobj.group(3)
psivar['CCSD(T)-F12B CORRELATION ENERGY'] = Decimal(mobj.group(2)) + Decimal(mobj.group(3))
psivar['CCSD(T)-F12B TOTAL ENERGY'] = psivar['CCSD(T)-F12B CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
psivar['(T*)-F12AB CORRECTION ENERGY'] = Decimal(mobj.group(3)) * \
psivar['MP2-F12 CORRELATION ENERGY'] / psivar['MP2 CORRELATION ENERGY']
psivar['CCSD(T*)-F12B CORRELATION ENERGY'] = Decimal(mobj.group(2)) + psivar['(T*)-F12AB CORRECTION ENERGY']
psivar['CCSD(T*)-F12B TOTAL ENERGY'] = psivar['CCSD(T*)-F12B CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
mobj = re.search(
r'^\s*' + r'CCSD-F12c triplet pair energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'CCSD-F12c correlation energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'Triples \(T\) contribution\s+' + NUMBER + '\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched ccsd(t)-f12c')
psivar['CCSD-F12C CORRELATION ENERGY'] = mobj.group(2)
psivar['CCSD-F12C SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) * \
Decimal(2) / Decimal(3)
psivar['CCSD-F12C OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(2)) - \
psivar['CCSD-F12C SAME-SPIN CORRELATION ENERGY']
psivar['CCSD-F12C TOTAL ENERGY'] = Decimal(mobj.group(2)) + psivar['HF-CABS TOTAL ENERGY']
psivar['(T)-F12C CORRECTION ENERGY'] = mobj.group(3)
psivar['CCSD(T)-F12C CORRELATION ENERGY'] = Decimal(mobj.group(2)) + Decimal(mobj.group(3))
psivar['CCSD(T)-F12C TOTAL ENERGY'] = psivar['CCSD(T)-F12C CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
psivar['(T*)-F12C CORRECTION ENERGY'] = Decimal(mobj.group(3)) * \
psivar['MP2-F12 CORRELATION ENERGY'] / psivar['MP2 CORRELATION ENERGY']
psivar['CCSD(T*)-F12C CORRELATION ENERGY'] = Decimal(mobj.group(2)) + psivar['(T*)-F12C CORRECTION ENERGY']
psivar['CCSD(T*)-F12C TOTAL ENERGY'] = psivar['CCSD(T*)-F12C CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
# Process Completion
mobj = re.search(
r'^\s*' + r'Variable memory released' + r'\s+$',
outtext, re.MULTILINE)
if mobj:
psivar['SUCCESS'] = True
# Process CURRENT energies (TODO: needs better way)
if 'HF TOTAL ENERGY' in psivar:
psivar['CURRENT REFERENCE ENERGY'] = psivar['HF TOTAL ENERGY']
psivar['CURRENT ENERGY'] = psivar['HF TOTAL ENERGY']
if 'HF-CABS TOTAL ENERGY' in psivar:
psivar['CURRENT REFERENCE ENERGY'] = psivar['HF-CABS TOTAL ENERGY']
psivar['CURRENT ENERGY'] = psivar['HF-CABS TOTAL ENERGY']
if 'MP2 TOTAL ENERGY' in psivar and 'MP2 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['MP2 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['MP2 TOTAL ENERGY']
if 'MP2-F12 TOTAL ENERGY' in psivar and 'MP2-F12 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['MP2-F12 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['MP2-F12 TOTAL ENERGY']
if 'CCSD TOTAL ENERGY' in psivar and 'CCSD CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD TOTAL ENERGY']
if 'CCSD-F12A TOTAL ENERGY' in psivar and 'CCSD-F12A CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD-F12A CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD-F12A TOTAL ENERGY']
if 'CCSD-F12B TOTAL ENERGY' in psivar and 'CCSD-F12B CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD-F12B CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD-F12B TOTAL ENERGY']
if 'CCSD-F12C TOTAL ENERGY' in psivar and 'CCSD-F12C CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD-F12C CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD-F12C TOTAL ENERGY']
if 'CCSD(T) TOTAL ENERGY' in psivar and 'CCSD(T) CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD(T) CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD(T) TOTAL ENERGY']
if 'CCSD(T)-F12A TOTAL ENERGY' in psivar and 'CCSD(T)-F12A CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD(T)-F12A CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD(T)-F12A TOTAL ENERGY']
if 'CCSD(T)-F12B TOTAL ENERGY' in psivar and 'CCSD(T)-F12B CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD(T)-F12B CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD(T)-F12B TOTAL ENERGY']
if 'CCSD(T)-F12C TOTAL ENERGY' in psivar and 'CCSD(T)-F12C CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD(T)-F12C CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD(T)-F12C TOTAL ENERGY']
return psivar, psivar_coord, psivar_grad
class Infile(qcformat.InputFormat2):
def __init__(self, mem, mol, mtd, der, opt):
qcformat.InputFormat2.__init__(self, mem, mol, mtd, der, opt)
#print self.method, self.molecule.nactive_fragments()
if ('sapt' in self.method or 'mp2c' in self.method) and self.molecule.nactive_fragments() != 2:
raise FragmentCountError("""Requested molecule has %d, not 2, fragments.""" % (self.molecule.nactive_fragments()))
# # memory in MB --> MW
# self.memory = int(math.ceil(mem / 8.0))
# auxiliary basis sets
[self.unaugbasis, self.augbasis, self.auxbasis] = self.corresponding_aux_basis()
def muster_basis_options(self):
text = ''
lowername = self.method.lower()
options = defaultdict(lambda: defaultdict(dict))
options['BASIS']['ORBITAL']['value'] = self.basis
# this f12 basis setting may be totally messed up
if self.method in ['ccsd(t)-f12-optri']:
if self.basis == 'cc-pvdz-f12':
options['BASIS']['JKFIT']['value'] = 'aug-cc-pvtz/jkfit'
options['BASIS']['JKFITC']['value'] = self.basis + '/optri'
options['BASIS']['MP2FIT']['value'] = 'aug-cc-pvtz/mp2fit'
elif self.method in ['ccsd(t)-f12-cabsfit']:
if self.unaugbasis and self.auxbasis:
#options['BASIS']['JKFIT']['value'] = self.auxbasis + '/jkfit'
#options['BASIS']['JKFITB']['value'] = self.unaugbasis + '/jkfit'
#options['BASIS']['MP2FIT']['value'] = self.auxbasis + '/mp2fit'
#options['BASIS']['DFLHF']['value'] = self.auxbasis + '/jkfit'
options['BASIS']['JKFITC']['value'] = 'aug-cc-pv5z/mp2fit'
else:
raise ValidationError("""Auxiliary basis not predictable from orbital basis '%s'""" % (self.basis))
elif ('df-' in self.method) or ('f12' in self.method) or (self.method in ['mp2c', 'dft-sapt', 'dft-sapt-pbe0acalda']):
if self.unaugbasis and self.auxbasis:
options['BASIS']['JKFIT']['value'] = self.auxbasis + '/jkfit'
options['BASIS']['JKFITB']['value'] = self.unaugbasis + '/jkfit'
options['BASIS']['MP2FIT']['value'] = self.auxbasis + '/mp2fit'
options['BASIS']['DFLHF']['value'] = self.auxbasis + '/jkfit'
else:
raise ValidationError("""Auxiliary basis not predictable from orbital basis '%s'""" % (self.basis))
return text, options
def prepare_basis_for_molpro(self):
text = ''
for opt, val in self.options['BASIS'].items():
#print opt, val['value']
#print molpro_basissets.altbasis.keys()
if not text:
text += """basis={\n"""
try:
# jaxz, maxz, etc.
for line in molpro_basissets.altbasis[val['value']]:
text += """%s\n""" % (line)
text += '\n'
except KeyError:
# haxz
if val['value'].startswith('heavy-aug-'):
text += """set,%s; default,%s,H=%s\n""" % (opt.lower(), self.augbasis, self.unaugbasis)
# xz, axz, 6-31g*
else:
text += """set,%s; default,%s\n""" % (opt.lower(), val['value'])
if text:
text += """}\n\n"""
return text
def format_infile_string(self):
"""
"""
# Handle memory and comment
memcmd, _memkw = """***, %s\nmemory,%d,m\n""" % (self.molecule.tagline, int(math.ceil(self.memory / 8.0))), {}
# Handle molecule and basis set
molcmd, _molkw = self.molecule.format_molecule_for_molpro(), {}
# format global convergence directions
# text += self.format_global_parameters()
_cdscmd, cdskw = muster_cdsgroup_options(self.method)
# Handle calc type and quantum chemical method
mdccmd, mdckw, mdcls = procedures['energy'][self.method](self.method, self.dertype, self.molecule)
_bascmd, baskw = self.muster_basis_options()
# # format options
# optcmd = qcdb.options.prepare_options_for_psi4(mdckw)
# make options from imdb only user options (currently non-existent). set basis and castup from here.
# Handle driver vs input/default keyword reconciliation
userkw = self.options
# userkw = p4util.prepare_options_for_modules()
#userkw = qcdb.options.reconcile_options(userkw, memkw)
#userkw = qcdb.options.reconcile_options(userkw, molkw)
userkw = options.reconcile_options2(userkw, cdskw)
userkw = options.reconcile_options2(userkw, baskw)
#userkw = qcdb.options.reconcile_options(userkw, psikw)
userkw = options.reconcile_options2(userkw, mdckw)
# Handle conversion of psi4 keyword structure into cfour format
#optcmdB = options.prepare_options_for_psi4(userkw)
optcmd = prepare_options_for_molpro(userkw, mdcls)
bascmd, _baskw = self.prepare_basis_for_molpro(), {} #self.options['BASIS']), {}
# Handle text to be passed untouched
litcmd = """\nshow[1,20f20.12],ee*,ce*,te*\nshow[1,60f20.12],_E*\n\n"""
# Assemble infile pieces
return memcmd + molcmd + bascmd + optcmd + mdccmd + litcmd
def muster_cdsgroup_options(name):
text = ''
lowername = name.lower()
options = defaultdict(lambda: defaultdict(dict))
options['GTHRESH']['ZERO']['value'] = 1.0e-14
options['GTHRESH']['ONEINT']['value'] = 1.0e-14
options['GTHRESH']['TWOINT']['value'] = 1.0e-14
options['GTHRESH']['ENERGY']['value'] = 1.0e-9
if name in ['mp2c', 'dft-sapt-shift', 'dft-sapt', 'dft-sapt-pbe0ac', 'dft-sapt-pbe0acalda']:
options['GTHRESH']['ENERGY']['value'] = 1.0e-8
options['GTHRESH']['ORBITAL']['value'] = 1.0e-8
options['GTHRESH']['GRID']['value'] = 1.0e-8
elif name in ['b3lyp', 'b3lyp-d', 'df-b3lyp', 'df-b3lyp-d']:
options['GTHRESH']['ENERGY']['value'] = 1.0e-8
options['GTHRESH']['ORBITAL']['value'] = 1.0e-7
options['GTHRESH']['GRID']['value'] = 1.0e-8
else:
pass
return text, options
def prepare_options_for_molpro(options, proc):
"""Function to take the full snapshot of the liboptions object
encoded in dictionary *options*, find the options directable toward
Cfour (options['CFOUR']['CFOUR_**']) that aren't default, then write
a CFOUR deck with those options.
Note that unlike the cfour version, this uses complete options deck.
"""
text = ''
if len(options['GTHRESH']) > 0:
text += 'gthresh'
for opt, val in options['GTHRESH'].items():
text += """,%s=%s""" % (opt, val['value'])
text += '\n\n'
for item in proc:
if len(options[item.upper()]) > 0:
text += """{%s%s}\n""" % (item, options[item.upper()]['OPTIONS']['value'])
else:
text += """%s\n""" % (item)
if text:
text += '\n'
return text
def muster_modelchem(name, dertype, mol):
"""Transform calculation method *name* and derivative level *dertype*
into options for cfour. While deliberately requested pieces,
generally |cfour__cfour_deriv_level| and |cfour__cfour_calc_level|,
are set to complain if contradicted ('clobber' set to True), other
'recommended' settings, like |cfour__cfour_cc_program|, can be
countermanded by keywords in input file ('clobber' set to False).
Occasionally, want these pieces to actually overcome keywords in
input file ('superclobber' set to True).
"""
text = ''
lowername = name.lower()
options = defaultdict(lambda: defaultdict(dict))
proc = []
if dertype == 0:
pass
else:
raise ValidationError("""Requested Psi4 dertype %d is not available.""" % (dertype))
if lowername == 'mp2':
pass
options['GLOBALS']['FREEZE_CORE']['value'] = True
options['SCF']['SCF_TYPE']['value'] = 'direct'
options['MP2']['MP2_TYPE']['value'] = 'conv'
text += """mp2')\n\n"""
elif lowername == 'ccsd(t)-f12':
proc.append('rhf')
proc.append('ccsd(t)-f12')
options['CCSD(T)-F12']['OPTIONS']['value'] = ',df_basis=mp2fit,df_basis_exch=jkfitb,ri_basis=jkfitb'
elif lowername == 'ccsd(t)-f12c':
proc.append('rhf')
proc.append('ccsd(t)-f12c')
options['CCSD(T)-F12C']['OPTIONS']['value'] = ',df_basis=mp2fit,df_basis_exch=jkfitb,ri_basis=jkfitb'
elif lowername == 'ccsd(t)-f12-optri':
proc.append('rhf')
proc.append('ccsd(t)-f12')
options['CCSD(T)-F12']['OPTIONS']['value'] = ',df_basis=mp2fit,df_basis_exch=jkfit,ri_basis=jkfitc'
elif lowername == 'ccsd(t)-f12-cabsfit':
proc.append('rhf')
proc.append('ccsd(t)-f12')
options['CCSD(T)-F12']['OPTIONS']['value'] = ',df_basis=jkfitc,df_basis_exch=jkfitc,ri_basis=jkfitc'
elif lowername == 'mp2c':
proc.append('gdirect')
proc.append(mol.extract_fragments(1, 2).format_molecule_for_molpro())
proc.append('df-hf,')
proc.append('df-ks,')
proc.append('sapt; monomerA')
options['DF-HF,']['OPTIONS']['value'] = """basis=jkfit,locorb=0; start,atdens; save,1101.2"""
options['DF-KS,']['OPTIONS']['value'] = """lhf,df_basis=dflhf,basis_coul=jkfitb,basis_exch=jkfitb; dftfac,1.0; start,1101.2; save,2101.2"""
proc.append(mol.extract_fragments(2, 1).format_molecule_for_molpro())
proc.append('df-hf')
proc.append('df-ks')
proc.append('sapt; monomerB')
options['DF-HF']['OPTIONS']['value'] = """,basis=jkfit,locorb=0; start,atdens; save,1102.2"""
options['DF-KS']['OPTIONS']['value'] = """,lhf,df_basis=dflhf,basis_coul=jkfitb,basis_exch=jkfitb; dftfac,1.0; start,1102.2; save,2102.2"""
proc.append(mol.format_molecule_for_molpro())
proc.append('sapt; intermol')
options['SAPT; INTERMOL']['OPTIONS']['value'] = """,saptlevel=3,ca=2101.2,cb=2102.2,icpks=0,fitlevel=3,nlexfac=0.0,cfac=0.0; dfit,basis_coul=jkfit,basis_exch=jkfit,cfit_scf=3"""
else:
raise ValidationError("""Requested Cfour computational methods %d is not available.""" % (lowername))
# # Set clobbering
# if 'CFOUR_DERIV_LEVEL' in options['CFOUR']:
# options['CFOUR']['CFOUR_DERIV_LEVEL']['clobber'] = True
# options['CFOUR']['CFOUR_DERIV_LEVEL']['superclobber'] = True
# if 'CFOUR_CALC_LEVEL' in options['CFOUR']:
# options['CFOUR']['CFOUR_CALC_LEVEL']['clobber'] = True
# options['CFOUR']['CFOUR_CALC_LEVEL']['superclobber'] = True
# if 'CFOUR_CC_PROGRAM' in options['CFOUR']:
# options['CFOUR']['CFOUR_CC_PROGRAM']['clobber'] = False
return text, options, proc
procedures = {
'energy': {
'mp2c' : muster_modelchem,
'ccsd(t)-f12' : muster_modelchem,
'ccsd(t)-f12c' : muster_modelchem,
'ccsd(t)-f12-optri' : muster_modelchem,
'ccsd(t)-f12-cabsfit' : muster_modelchem,
#'sapt0' : muster_modelchem,
#'sapt2+' : muster_modelchem,
#'sapt2+(3)' : muster_modelchem,
#'sapt2+3(ccd)' : muster_modelchem,
}
}
qcmtdIN = procedures['energy']
def psi4_list():
"""Return an array of Psi4 methods with energies.
"""
return procedures['energy'].keys()
|
CDSherrill/psi4
|
psi4/driver/qcdb/molpro2.py
|
Python
|
lgpl-3.0
| 27,439
|
[
"CFOUR",
"Psi4"
] |
5069105b7f7403132b84358e3010cd61cc87972ca008acb6cfbe1194339c3f54
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# settings - back end for the settings page
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Provide all the settings subpages"""
import os
import shared.returnvalues as returnvalues
from shared.base import client_alias, client_id_dir
from shared.defaults import any_vgrid, default_mrsl_filename, \
default_css_filename, profile_img_max_kb, profile_img_extensions
from shared.editing import cm_css, cm_javascript, cm_options, wrap_edit_area
from shared.functional import validate_input_and_cert
from shared.html import themed_styles
from shared.init import initialize_main_variables, find_entry, extract_menu
from shared.settings import load_settings, load_widgets, load_profile, \
load_ssh, load_davs, load_ftps
from shared.profilekeywords import get_profile_specs
from shared.safeinput import html_escape
from shared.settingskeywords import get_settings_specs
from shared.widgetskeywords import get_widgets_specs
from shared.useradm import get_default_mrsl, get_default_css, extract_field, \
create_alias_link
from shared.vgrid import vgrid_list_vgrids
try:
import shared.arcwrapper as arc
except Exception, exc:
# Ignore errors and let it crash if ARC is enabled without the lib
pass
general_edit = cm_options.copy()
ssh_edit = cm_options.copy()
davs_edit = cm_options.copy()
ftps_edit = cm_options.copy()
style_edit = cm_options.copy()
style_edit['mode'] = 'css'
widgets_edit = cm_options.copy()
widgets_edit['mode'] = 'htmlmixed'
profile_edit = cm_options.copy()
profile_edit['mode'] = 'htmlmixed'
def signature():
"""Signature of the main function"""
defaults = {'topic': ['general']}
return ['html_form', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
client_dir = client_id_dir(client_id)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = os.path.abspath(os.path.join(configuration.user_home,
client_dir)) + os.sep
title_entry = find_entry(output_objects, 'title')
title_entry['text'] = 'Settings'
# prepare support for toggling the views (by css/jquery)
title_entry['style'] = themed_styles(configuration)
title_entry['style']['skin'] += '''
%s
''' % cm_css
title_entry['javascript'] = '''
<script type="text/javascript" src="/images/js/jquery.js"></script>
<script type="text/javascript" src="/images/js/jquery-ui.js"></script>
%s
<script type="text/javascript" >
var toggleHidden = function(classname) {
// classname supposed to have a leading dot
$(classname).toggleClass("hidden");
}
$(document).ready(function() {
}
);
</script>
''' % cm_javascript
valid_topics = ['general', 'style']
active_menu = extract_menu(configuration, title_entry)
if 'submitjob' in active_menu:
valid_topics.append('job')
if 'people' in active_menu:
valid_topics.append('profile')
if configuration.site_script_deps:
valid_topics.append('widgets')
if configuration.arc_clusters:
valid_topics.append('arc')
if configuration.site_enable_sftp:
valid_topics.append('sftp')
if configuration.site_enable_davs:
valid_topics.append('webdavs')
if configuration.site_enable_ftps:
valid_topics.append('ftps')
topics = accepted['topic']
# Backwards compatibility
if topics and topics[0] == 'ssh':
topics[0] = 'sftp'
topics = [i for i in topics if i in valid_topics]
# Default to general if no valid topics given
if not topics:
topics.append(valid_topics[0])
topic_titles = dict([(i, i.title()) for i in valid_topics])
for (key, val) in [('sftp', 'SFTP'), ('webdavs', 'WebDAVS'),
('ftps', 'FTPS')]:
if key in valid_topics:
topic_titles[key] = val
output_objects.append({'object_type': 'header', 'text'
: 'Settings'})
links = []
for name in valid_topics:
active_menu = ''
if topics[0] == name:
active_menu = 'activebutton'
links.append({'object_type': 'link',
'destination': "settings.py?topic=%s" % name,
'class': '%ssettingslink settingsbutton %s' % \
(name, active_menu),
'title': 'Switch to %s settings' % topic_titles[name],
'text' : '%s' % topic_titles[name],
})
output_objects.append({'object_type': 'multilinkline', 'links': links,
'sep': ' '})
output_objects.append({'object_type': 'text', 'text': ''})
# load current settings
current_settings_dict = load_settings(client_id, configuration)
if not current_settings_dict:
# no current settings found
current_settings_dict = {}
if not topics:
output_objects.append({'object_type': 'error_text', 'text':
'No valid topics!'})
return (output_objects, returnvalues.CLIENT_ERROR)
if 'general' in topics:
html = \
'''
<div id="settings">
<form method="post" action="settingsaction.py">
<table class="settings fixedlayout">
<tr class="title"><td class="centertext">
Select your %s settings
</td></tr>
<tr><td>
</td></tr>
<tr><td>
<input type="hidden" name="topic" value="general" />
Please note that if you want to set multiple values (e.g. addresses)
in the same field, you must write each value on a separate line but
without blank lines.
</td></tr>
<tr><td>
</td></tr>
''' % configuration.short_title
settings_entries = get_settings_specs()
for (keyword, val) in settings_entries:
if 'SUBMITUI' == keyword and \
'job' not in valid_topics:
continue
if 'notify' == val['Context'] and \
keyword.lower() not in configuration.notify_protocols:
continue
entry = \
"""
<tr class='title'><td>
%s
</td></tr>
<tr><td>
%s
</td></tr>
<tr><td>
"""\
% (keyword.replace('_', ' ').title(), val['Description'])
if val['Type'] == 'multiplestrings':
try:
# get valid choices from conf. multiple selections
valid_choices = eval('configuration.%s' % keyword.lower())
current_choice = []
if current_settings_dict.has_key(keyword):
current_choice = current_settings_dict[keyword]
if len(valid_choices) > 0:
entry += '<div class="scrollselect">'
for choice in valid_choices:
selected = ''
if choice in current_choice:
selected = 'checked'
entry += '''
<input type="checkbox" name="%s" %s value="%s">%s<br />''' % \
(keyword, selected, choice, choice)
entry += '</div>'
else:
entry = ''
except:
# failed on evaluating configuration.%s
area = '''
<textarea id="%s" cols=40 rows=1 name="%s">''' % \
(keyword, keyword)
if current_settings_dict.has_key(keyword):
area += '\n'.join(current_settings_dict[keyword])
area += '</textarea>'
entry += wrap_edit_area(keyword, area, general_edit,
'BASIC')
elif val['Type'] == 'string':
# get valid choices from conf
valid_choices = eval('configuration.%s' % keyword.lower())
current_choice = ''
if current_settings_dict.has_key(keyword):
current_choice = current_settings_dict[keyword]
if len(valid_choices) > 0:
entry += '<select name="%s">' % keyword
for choice in valid_choices:
selected = ''
if choice == current_choice:
selected = 'selected'
entry += '<option %s value="%s">%s</option>'\
% (selected, choice, choice)
entry += '</select><br />'
else:
entry = ''
elif val['Type'] == 'boolean':
current_choice = ''
if current_settings_dict.has_key(keyword):
current_choice = current_settings_dict[keyword]
entry += '<select name="%s">' % keyword
for choice in (True, False):
selected = ''
if choice == current_choice:
selected = 'selected'
entry += '<option %s value="%s">%s</option>'\
% (selected, choice, choice)
entry += '</select><br />'
html += """%s
</td></tr>
""" % entry
html += \
"""
<tr><td>
<input type="submit" value="Save General Settings" />
</td></tr>
</table>
</form>
</div>
"""
output_objects.append({'object_type': 'html_form', 'text': html})
if 'job' in topics:
mrsl_path = os.path.join(base_dir, default_mrsl_filename)
default_mrsl = get_default_mrsl(mrsl_path)
html = \
'''
<div id="defaultmrsl">
<form method="post" action="editfile.py">
<table class="defaultjob fixedlayout">
<tr class="title"><td class="centertext">
Default job on submit page
</td></tr>
<tr><td>
</td></tr>
<tr><td>
If you use the same fields and values in many of your jobs, you can save your
preferred job description here to always start out with that description on
your submit job page.
</td></tr>
<tr><td>
</td></tr>
<tr><td>
<input type="hidden" name="path" value="%(mrsl_template)s" />
<input type="hidden" name="newline" value="unix" />
'''
keyword = "defaultjob"
area = '''
<textarea id="%(keyword)s" cols=82 rows=25 name="editarea">
%(default_mrsl)s
</textarea>
'''
html += wrap_edit_area(keyword, area, cm_options, 'BASIC')
html += '''
</td></tr>
<tr><td>
<input type="submit" value="Save Job Template" />
</td></tr>
</table>
</form>
</div>
'''
html = html % {
'default_mrsl': default_mrsl,
'mrsl_template': default_mrsl_filename,
'site': configuration.short_title,
'keyword': keyword
}
output_objects.append({'object_type': 'html_form', 'text': html})
if 'style' in topics:
css_path = os.path.join(base_dir, default_css_filename)
default_css = get_default_css(css_path)
html = \
'''
<div id="defaultcss">
<form method="post" action="editfile.py">
<table class="defaultstyle fixedlayout">
<tr class="title"><td class="centertext">
Default CSS (style) for all pages
</td></tr>
<tr><td>
</td></tr>
<tr><td>
If you want to customize the look and feel of the %(site)s web interfaces you
can override default values here. If you leave the style file blank you will
just use the default style.<br />
You can copy paste from the available style file links below if you want to
override specific parts.<br />
<div class="warningtext">Please note that you can not save an empty style
file, but must at least leave a blank line to use defaults. Additionally some
errors in your style code may potentially cause severe corruption in your page
layouts, so it may be a good idea to keep another browser tab/window ready to
(re)move your .default.css file to restore the defaults while experimenting
here.
</div>
</td></tr>
<tr><td>
<a class="urllink" href="/images/default.css">default</a> ,
<a class="urllink" href="/images/bluesky.css">bluesky</a>
</td></tr>
<tr><td>
</td></tr>
<tr><td>
<input type="hidden" name="path" value="%(css_template)s" />
<input type="hidden" name="newline" value="unix" />
'''
keyword = "defaultstyle"
area = '''
<textarea id="%(keyword)s" cols=82 rows=25 min_len=1 name="editarea">
%(default_css)s
</textarea>
'''
html += wrap_edit_area(keyword, area, style_edit)
html += '''
</td></tr>
<tr><td>
<input type="submit" value="Save Style Settings" />
</td></tr>
</table>
</form>
</div>
'''
html = html % {
'default_css': default_css,
'css_template': default_css_filename,
'site': configuration.short_title,
'keyword': keyword
}
output_objects.append({'object_type': 'html_form', 'text': html})
if 'widgets' in topics:
# load current widgets
current_widgets_dict = load_widgets(client_id, configuration)
if not current_widgets_dict:
# no current widgets found
current_widgets_dict = {}
show_widgets = current_settings_dict.get('ENABLE_WIDGETS', True)
if show_widgets:
edit_widgets = '''You can simply copy/paste from the available
widget file links below if you want to reuse existing widgets.<br />
</td></tr>
<tr><td>
<a class="urllink" href="/images/widgets/hello-grid.app">hello grid</a>,
<a class="urllink" href="/images/widgets/simple-calendar.app">simple calendar</a>,
<a class="urllink" href="/images/widgets/calendar.app">calendar</a>,
<a class="urllink" href="/images/widgets/gcal.app">google calendar</a>,
<a class="urllink" href="/images/widgets/calculator.app">calculator</a>,
<a class="urllink" href="/images/widgets/localrss.app">local rss reader</a>,
<a class="urllink" href="/images/widgets/rss.app">rss reader</a>,
<a class="urllink" href="/images/widgets/clock.app">clock</a>,
<a class="urllink" href="/images/widgets/weather.app">weather</a>,
<a class="urllink" href="/images/widgets/progressbar.app">progress bar</a>,
<a class="urllink" href="/images/widgets/simple-move.app">simple-move</a>,
<a class="urllink" href="/images/widgets/portlets.app">portlets</a>,
<a class="urllink" href="/images/widgets/countdown.app">countdown</a>,
<a class="urllink" href="/images/widgets/sparkline.app">mini chart</a>,
<a class="urllink" href="/images/widgets/piechart.app">pie chart</a>,
<a class="urllink" href="/images/widgets/simple-jobmon.app">simple-jobmon</a>,
<a class="urllink" href="/images/widgets/cert-countdown.app">certificate countdown</a>,
<a class="urllink" href="/images/widgets/disk-use.app">disk use progress bar</a>,
<a class="urllink" href="/images/widgets/jobs-stats.app">jobs stats table</a>,
<a class="urllink" href="/images/widgets/jobs-stats-chart.app">jobs stats chart</a>,
<a class="urllink" href="/images/widgets/daily-wm-comic.app">Daily WulffMorgenthaler comic</a>,
<a class="urllink" href="/images/widgets/kunet-login.app">KUnet login</a>
<a class="urllink" href="/images/widgets/tdchotspot-login.app">TDC Hotspot login</a>
</td></tr>
<tr><td>
<div class="warningtext">Please note that the widgets parser is rather grumpy
so you may have to avoid blank lines in your widget code below. Additionally
any errors in your widgets code may cause severe corruption in your pages, so
it may be a good idea to keep another browser tab/window ready for emergency
disabling of widgets while experimenting here.</div>
</td></tr>
<tr><td>
<input type="hidden" name="topic" value="widgets" />
</td></tr>
<tr><td>
'''
html = \
'''<div id="widgets">
<form method="post" action="settingsaction.py">
<table class="widgets fixedlayout">
<tr class="title"><td class="centertext">
Default user defined widgets for all pages
</td></tr>
<tr><td>
</td></tr>
<tr><td>
If you want to customize the look and feel of the %s web interfaces you can
add your own widgets here. If you leave the widgets blank you will just get
the default empty widget spaces.<br />
''' % configuration.short_title
widgets_entries = get_widgets_specs()
widgets_html = ''
for (keyword, val) in widgets_entries:
widgets_html += \
"""
<tr class=title><td>
%s
</td></tr>
<tr><td>
%s
</td></tr>
<tr><td>
"""\
% (keyword.replace('_', ' ').title(), val['Description'])
if val['Type'] == 'multiplestrings':
try:
# get valid choices from conf. multiple selections
valid_choices = eval('configuration.%s' % keyword.lower())
current_choice = []
if current_widgets_dict.has_key(keyword):
current_choice = current_widgets_dict[keyword]
if len(valid_choices) > 0:
widgets_html += '<div class="scrollselect">'
for choice in valid_choices:
selected = ''
if choice in current_choice:
selected = 'checked'
widgets_html += '''
<input type="checkbox" name="%s" %s value="%s">%s<br />'''\
% (keyword, selected, choice, choice)
widgets_html += '</div>'
except:
area = \
"""<textarea id='%s' cols=78 rows=10 name='%s'>""" % \
(keyword, keyword)
if current_widgets_dict.has_key(keyword):
area += '\n'.join(current_widgets_dict[keyword])
area += '</textarea>'
widgets_html += wrap_edit_area(keyword, area, widgets_edit)
if show_widgets:
edit_widgets += '''
%s
<tr><td>
<input type="submit" value="Save Widgets Settings" />
</td></tr>
''' % widgets_html
else:
edit_widgets = '''
<br/>
<div class="warningtext">
Widgets are disabled on your <em>General</em> settings page. Please enable
them there first if you want to customize your grid pages.
</div>
'''
html += \
'''
%s
</table>
</form>
</div>
''' % edit_widgets
output_objects.append({'object_type': 'html_form', 'text': html})
if 'profile' in topics:
# load current profile
current_profile_dict = load_profile(client_id, configuration)
if not current_profile_dict:
# no current profile found
current_profile_dict = {}
(got_list, all_vgrids) = vgrid_list_vgrids(configuration)
if not got_list:
all_vgrids = []
all_vgrids.append(any_vgrid)
all_vgrids.sort()
configuration.vgrids_allow_email = all_vgrids
configuration.vgrids_allow_im = all_vgrids
images = []
for path in os.listdir(base_dir):
real_path = os.path.join(base_dir, path)
if os.path.splitext(path)[1].strip('.') in profile_img_extensions \
and os.path.getsize(real_path) < profile_img_max_kb*1024:
images.append(path)
configuration.public_image = images
html = \
'''
<div id="profile">
<form method="post" action="settingsaction.py">
<table class="profile fixedlayout">
<tr class="title"><td class="centertext">
Public profile information visible to other users.
</td></tr>
<tr><td>
</td></tr>
<tr><td>
If you want to let other users know more about you can add your own text here.
If you leave the text area blank you will just get the default empty profile
information.<br />
</td></tr>
<tr><td>
<div class="warningtext">Please note that the profile parser is rather grumpy
so you may have to avoid blank lines in your text below.
</div>
</td></tr>
<tr><td>
<input type="hidden" name="topic" value="profile" />
</td></tr>
<tr><td>
'''
profile_entries = get_profile_specs()
for (keyword, val) in profile_entries:
# Mask VGrid name if configured
mask_title = keyword.replace(
'VGRID', configuration.site_vgrid_label.upper())
mask_desc = val['Description'].replace(
'VGrid', configuration.site_vgrid_label)
html += \
"""
<tr class=title><td>
%s
</td></tr>
<tr><td>
%s
</td></tr>
<tr><td>
""" % (mask_title.replace('_', ' ').title(),
html_escape(mask_desc))
if val['Type'] == 'multiplestrings':
try:
# get valid choices from conf. multiple selections
valid_choices = eval('configuration.%s' % keyword.lower())
current_choice = []
if current_profile_dict.has_key(keyword):
current_choice = current_profile_dict[keyword]
if len(valid_choices) > 0:
html += '<div class="scrollselect">'
for choice in valid_choices:
selected = ''
if choice in current_choice:
selected = 'checked'
html += '''
<input type="checkbox" name="%s" %s value="%s">%s<br />''' % \
(keyword, selected, choice, choice)
html += '</div>'
except:
area = \
"""<textarea id='%s' cols=78 rows=10 name='%s'>""" % \
(keyword, keyword)
if current_profile_dict.has_key(keyword):
area += '\n'.join(current_profile_dict[keyword])
area += '</textarea>'
html += wrap_edit_area(keyword, area, profile_edit)
elif val['Type'] == 'boolean':
valid_choices = [True, False]
current_choice = ''
if current_profile_dict.has_key(keyword):
current_choice = current_profile_dict[keyword]
if len(valid_choices) > 0:
html += '<select name="%s">' % keyword
for choice in valid_choices:
selected = ''
if choice == current_choice:
selected = 'selected'
html += '<option %s value="%s">%s</option>'\
% (selected, choice, choice)
html += '</select><br />'
html += '''
<tr><td>
<input type="submit" value="Save Profile Settings" />
</td></tr>
</table>
</form>
</div>
'''
output_objects.append({'object_type': 'html_form', 'text': html})
if 'sftp' in topics:
# load current ssh/sftp
current_ssh_dict = load_ssh(client_id, configuration)
if not current_ssh_dict:
# no current ssh found
current_ssh_dict = {}
default_authkeys = current_ssh_dict.get('authkeys', '')
default_authpassword = current_ssh_dict.get('authpassword', '')
username = client_alias(client_id)
if configuration.user_sftp_alias:
username = extract_field(client_id, configuration.user_sftp_alias)
create_alias_link(username, client_id, configuration.user_home)
sftp_server = configuration.user_sftp_show_address
sftp_port = configuration.user_sftp_show_port
html = \
'''
<div id="sshaccess">
<form method="post" action="settingsaction.py">
<table class="sshsettings fixedlayout">
<tr class="title"><td class="centertext">
SFTP access to your %(site)s account
</td></tr>
<tr><td>
</td></tr>
<tr><td>
You can configure SFTP login to your %(site)s account for efficient file
access. On Linux/UN*X it also allows transparent access through SSHFS.
<br/>
<h3>Login Details</h3>
<ul>
<li>Host <em>%(sftp_server)s</em></li>
<li>Port <em>%(sftp_port)s</em></li>
<li>Username <em>%(username)s</em></li>
<li>%(auth_methods)s <em>as you choose below</em></li>
</ul>
</td></tr>
<tr><td>
<input type="hidden" name="topic" value="sftp" />
<div class="div-sftp-client-notes hidden">
<a href="javascript:toggleHidden('.div-sftp-client-notes');"
class="removeitemlink" title="Toggle view">
Show less SFTP client details...</a>
<h3>Graphical SFTP access</h3>
The FireFTP plugin for Firefox is known to generally work for graphical
access to your %(site)s home over SFTP.
Enter the following values in the FireFTP Account Manager:
<pre>
Host %(sftp_server)s
Login %(username)s
Password YOUR_PASSWORD_HERE (passphrase if you configured public key access)
Security SFTP
Port %(sftp_port)s
Private Key ~/.mig/key.pem (if you configured public key access)
</pre>
other graphical clients may work as well.
<h3>Command line SFTP/SSHFS access on Linux/UN*X</h3>
Save something like the following lines in your local ~/.ssh/config
to avoid typing the full login details every time:<br />
<pre>
Host %(sftp_server)s
Hostname %(sftp_server)s
User %(username)s
Port %(sftp_port)s
IdentityFile ~/.mig/key.pem
</pre>
From then on you can use sftp and sshfs to access your %(site)s home:
<pre>
sftp %(sftp_server)s
</pre>
<pre>
sshfs %(sftp_server)s: mig-home -o uid=$(id -u) -o gid=$(id -g)
</pre>
You can also integrate with ordinary mounts by adding a line like:
<pre>
sshfs#%(username)s@%(sftp_server)s: /home/USER/mig-home fuse noauto,user,port=%(sftp_port)d 0 0
</pre>
to your /etc/fstab .
</div>
<div class="div-sftp-client-notes">
<a href="javascript:toggleHidden('.div-sftp-client-notes');"
class="additemlink" title="Toggle view">Show more SFTP client details...
</a>
</div>
'''
keyword_keys = "authkeys"
if 'publickey' in configuration.user_sftp_auth:
html += '''
</td></tr>
<tr><td>
<h3>Authorized Public Keys</h3>
You can use any existing RSA key, or create a new one. If you signed up with a
x509 user certificate, you should also have received such a key.pem along with
your user certificate. In any case you need to save the contents of the
corresponding public key (X.pub) in the text area below, to be able to connect
with username and key as described in the Login Details.
<br/>
'''
area = '''
<textarea id="%(keyword_keys)s" cols=82 rows=5 name="publickeys">
%(default_authkeys)s
</textarea>
'''
html += wrap_edit_area(keyword_keys, area, ssh_edit, 'BASIC')
html += '''
(leave empty to disable sftp access with public keys)
</td></tr>
'''
keyword_password = "authpassword"
if 'password' in configuration.user_sftp_auth:
# We only want a single password and a masked input field
html += '''
<tr><td>
<h3>Authorized Password</h3>
Please enter and save your desired password in the text field below, to be able
to connect with username and password as described in the Login Details.
<br/>
<input type=password id="%(keyword_password)s" size=40 name="password"
value="%(default_authpassword)s" />
(leave empty to disable sftp access with password)
</td></tr>
'''
html += '''
<tr><td>
<input type="submit" value="Save SFTP Settings" />
</td></tr>
'''
html += '''
</table>
</form>
</div>
'''
html = html % {
'default_authkeys': default_authkeys,
'default_authpassword': default_authpassword,
'site': configuration.short_title,
'keyword_keys': keyword_keys,
'keyword_password': keyword_password,
'username': username,
'sftp_server': sftp_server,
'sftp_port': sftp_port,
'auth_methods': ' / '.join(configuration.user_sftp_auth).title(),
}
output_objects.append({'object_type': 'html_form', 'text': html})
if 'webdavs' in topics:
# load current davs
current_davs_dict = load_davs(client_id, configuration)
if not current_davs_dict:
# no current davs found
current_davs_dict = {}
default_authkeys = current_davs_dict.get('authkeys', '')
default_authpassword = current_davs_dict.get('authpassword', '')
username = client_alias(client_id)
if configuration.user_davs_alias:
username = extract_field(client_id, configuration.user_davs_alias)
create_alias_link(username, client_id, configuration.user_home)
davs_server = configuration.user_davs_show_address
davs_port = configuration.user_davs_show_port
html = \
'''
<div id="davsaccess">
<form method="post" action="settingsaction.py">
<table class="davssettings fixedlayout">
<tr class="title"><td class="centertext">
WebDAVS access to your %(site)s account
</td></tr>
<tr><td>
</td></tr>
<tr><td>
You can configure WebDAVS login to your %(site)s account for transparent file
access from your PC or workstation.<br/>
<h3>Login Details</h3>
<ul>
<li>Host <em>%(davs_server)s</em></li>
<li>Port <em>%(davs_port)s</em></li>
<li>Username <em>%(username)s</em></li>
<li>%(auth_methods)s <em>as you choose below</em></li>
</ul>
</td></tr>
<tr><td>
<input type="hidden" name="topic" value="webdavs" />
<div class="div-webdavs-client-notes hidden">
<a href="javascript:toggleHidden('.div-webdavs-client-notes');"
class="removeitemlink" title="Toggle view">
Show less WebDAVS client details...</a>
<h3>Graphical WebDAVS access</h3>
Several native file browsers and web browsers are known to generally work for
graphical access to your %(site)s home over WebDAVS.
<br />
Enter the address https://%(davs_server)s:%(davs_port)s and when fill in the
login details:
<pre>
Username %(username)s
Password YOUR_PASSWORD_HERE
</pre>
other graphical clients should work as well.
<h3>Command line WebDAVS access on Linux/UN*X</h3>
Save something like the following lines in your local ~/.netrc
to avoid typing the full login details every time:<br />
<pre>
machine %(davs_server)s
login %(username)s
password YOUR_PASSWORD_HERE
</pre>
From then on you can use e.g. cadaver or fusedav to access your %(site)s home:
<pre>
cadaver https://%(davs_server)s:%(davs_port)s
</pre>
<pre>
fusedav https://%(davs_server)s:%(davs_port)s mig-home -o uid=$(id -u) -o gid=$(id -g)
</pre>
</div>
<div class="div-webdavs-client-notes">
<a href="javascript:toggleHidden('.div-webdavs-client-notes');"
class="additemlink" title="Toggle view">
Show more WebDAVS client details...</a>
</div>
'''
keyword_keys = "authkeys"
if 'publickey' in configuration.user_davs_auth:
html += '''
</td></tr>
<tr><td>
<h3>Authorized Public Keys</h3>
You can use any existing RSA key, including the key.pem you received along with
your user certificate, or create a new one. In any case you need to save the
contents of the corresponding public key (X.pub) in the text area below, to be
able to connect with username and key as described in the Login Details.
<br/>'''
area = '''
<textarea id="%(keyword_keys)s" cols=82 rows=5 name="publickeys">
%(default_authkeys)s
</textarea>
'''
html += wrap_edit_area(keyword_keys, area, davs_edit, 'BASIC')
html += '''
(leave empty to disable davs access with public keys)
</td></tr>
'''
keyword_password = "authpassword"
if 'password' in configuration.user_davs_auth:
# We only want a single password and a masked input field
html += '''
<tr><td>
<h3>Authorized Password</h3>
Please enter and save your desired password in the text field below, to be able
to connect with username and password as described in the Login Details.
<br/>
<input type=password id="%(keyword_password)s" size=40 name="password"
value="%(default_authpassword)s" />
(leave empty to disable davs access with password)
</td></tr>
'''
html += '''
<tr><td>
<input type="submit" value="Save WebDAVS Settings" />
</td></tr>
'''
html += '''
</table>
</form>
</div>
'''
html = html % {
'default_authkeys': default_authkeys,
'default_authpassword': default_authpassword,
'site': configuration.short_title,
'keyword_keys': keyword_keys,
'keyword_password': keyword_password,
'username': username,
'davs_server': davs_server,
'davs_port': davs_port,
'auth_methods': ' / '.join(configuration.user_davs_auth).title(),
}
output_objects.append({'object_type': 'html_form', 'text': html})
if 'ftps' in topics:
# load current ftps
current_ftps_dict = load_ftps(client_id, configuration)
if not current_ftps_dict:
# no current ftps found
current_ftps_dict = {}
default_authkeys = current_ftps_dict.get('authkeys', '')
default_authpassword = current_ftps_dict.get('authpassword', '')
username = client_alias(client_id)
if configuration.user_ftps_alias:
username = extract_field(client_id, configuration.user_ftps_alias)
create_alias_link(username, client_id, configuration.user_home)
ftps_server = configuration.user_ftps_show_address
ftps_ctrl_port = configuration.user_ftps_show_ctrl_port
html = \
'''
<div id="ftpsaccess">
<form method="post" action="settingsaction.py">
<table class="ftpssettings fixedlayout">
<tr class="title"><td class="centertext">
FTPS access to your %(site)s account
</td></tr>
<tr><td>
</td></tr>
<tr><td>
You can configure FTPS login to your %(site)s account for efficient file
access.<br/>
<h3>Login Details</h3>
<ul>
<li>Host <em>%(ftps_server)s</em></li>
<li>Port <em>%(ftps_ctrl_port)s</em></li>
<li>Username <em>%(username)s</em></li>
<li>%(auth_methods)s <em>as you choose below</em></li>
</ul>
</td></tr>
<tr><td>
<input type="hidden" name="topic" value="ftps" />
<div class="div-ftps-client-notes hidden">
<a href="javascript:toggleHidden('.div-ftps-client-notes');"
class="removeitemlink" title="Toggle view">
Show less FTPS client details...</a>
<h3>Graphical FTPS access</h3>
The FireFTP plugin for Firefox is known to generally work for graphical
access to your %(site)s home over FTPS.
Enter the following values in the FireFTP Account Manager:
<pre>
Host %(ftps_server)s
Login %(username)s
Password YOUR_PASSWORD_HERE
Security FTPS
Port %(ftps_ctrl_port)s
</pre>
Other FTP clients and web browsers may work as well if you enter the address
ftps://%(ftps_server)s:%(ftps_ctrl_port)s
and fill in the login details when prompted:
<pre>
Username %(username)s
Password YOUR_PASSWORD_HERE
</pre>
<h3>Command line FTPS access on Linux/UN*X</h3>
Save something like the following lines in your local ~/.netrc
to avoid typing the full login details every time:<br />
<pre>
machine %(ftps_server)s
login %(username)s
password YOUR_PASSWORD_HERE
</pre>
From then on you can use e.g. lftp or CurlFtpFS to access your %(site)s home:
<!--
TODO: we need to provide the intermediate cert for server cert check like this
set ssl:ca-file sub.class1.server.ca.pem
-->
<pre>
lftp -e "set ssl:verify-certificate no; set ftp:ssl-protect-data on" \\
-p %(ftps_ctrl_port)s %(ftps_server)s
</pre>
<pre>
curlftpfs -o ssl %(ftps_server)s:%(ftps_ctrl_port)s mig-home \\
-o user=%(username)s -ouid=$(id -u) -o gid=$(id -g) -o no_verify_peer
</pre>
</div>
<div class="div-ftps-client-notes">
<a href="javascript:toggleHidden('.div-ftps-client-notes');"
class="additemlink" title="Toggle view">Show more FTPS client details...
</a>
</div>
'''
keyword_keys = "authkeys"
if 'publickey' in configuration.user_ftps_auth:
html += '''
</td></tr>
<tr><td>
<h3>Authorized Public Keys</h3>
You can use any existing RSA key, including the key.pem you received along with
your user certificate, or create a new one. In any case you need to save the
contents of the corresponding public key (X.pub) in the text area below, to be
able to connect with username and key as described in the Login Details.
<br/>
'''
area = '''
<textarea id="%(keyword_keys)s" cols=82 rows=5 name="publickeys">
%(default_authkeys)s
</textarea>
'''
html += wrap_edit_area(keyword_keys, area, ftps_edit, 'BASIC')
html += '''
(leave empty to disable ftps access with public keys)
</td></tr>
'''
keyword_password = "authpassword"
if 'password' in configuration.user_ftps_auth:
# We only want a single password and a masked input field
html += '''
<tr><td>
<h3>Authorized Password</h3>
Please enter and save your desired password in the text field below, to be able
to connect with username and password as described in the Login Details.
<br/>
<input type=password id="%(keyword_password)s" size=40 name="password"
value="%(default_authpassword)s" />
(leave empty to disable ftps access with password)
</td></tr>
'''
html += '''
<tr><td>
<input type="submit" value="Save FTPS Settings" />
</td></tr>
'''
html += '''
</table>
</form>
</div>
'''
html = html % {
'default_authkeys': default_authkeys,
'default_authpassword': default_authpassword,
'site': configuration.short_title,
'keyword_keys': keyword_keys,
'keyword_password': keyword_password,
'username': username,
'ftps_server': ftps_server,
'ftps_ctrl_port': ftps_ctrl_port,
'auth_methods': ' / '.join(configuration.user_ftps_auth).title(),
}
output_objects.append({'object_type': 'html_form', 'text': html})
# if ARC-enabled server:
if 'arc' in topics:
# provide information about the available proxy, offer upload
try:
home_dir = os.path.normpath(base_dir)
session_Ui = arc.Ui(home_dir, require_user_proxy=True)
proxy = session_Ui.getProxy()
if proxy.IsExpired():
# can rarely happen, constructor will throw exception
output_objects.append({'object_type': 'text',
'text': 'Proxy certificate is expired.'})
else:
output_objects.append({'object_type': 'text',
'text': 'Proxy for %s' \
% proxy.GetIdentitySN()})
output_objects.append(
{'object_type': 'text',
'text': 'Proxy certificate will expire on %s (in %s sec.)'
% (proxy.Expires(), proxy.getTimeleft())
})
except arc.NoProxyError, err:
output_objects.append({'object_type':'warning',
'text': 'No proxy certificate to load: %s' \
% err.what()})
output_objects = output_objects + arc.askProxy()
return (output_objects, returnvalues.OK)
|
heromod/migrid
|
mig/shared/functionality/settings.py
|
Python
|
gpl-2.0
| 40,926
|
[
"Brian"
] |
91914e523ba4749724e7b1ead245aa6b7db67304f6f5e6d84ab4a1e23b27e6ff
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import PolynomialFeatures
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.neighbors import KNeighborsRegressor
import pickle
# RiBuild Modules
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
# Logger
logger = ribuild_logger(__name__)
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
catch_ratio = np.load(os.path.join(os.path.dirname(__file__), 'catch_ratio.npy'))
height = np.array([0.0, 5.0, 8.0, 8.5, 9.0, 9.25, 9.5, 9.75, 10.0])
horizontal_rain_intensity = np.array(
[0.0, 0.1, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 12.0, 15.0, 20.0, 25.0, 30.0])
width = np.array([0.0, 2.5, 5.0, 7.5, 10.0])
wind_speed = np.array([0, 1, 2, 3, 4, 5, 6, 8, 10])
print('Catch Ratio shape:', catch_ratio.shape)
print('Wind Speed:', len(wind_speed))
print('Rain:', len(horizontal_rain_intensity))
print('Height:', len(height))
print('Width:', len(width))
catch_ratio_list = []
height_list = []
horizontal_rain_intensity_list = []
width_list = []
wind_speed_list = []
for wind_index in range(len(wind_speed)):
for rain_index in range(len(horizontal_rain_intensity)):
for height_index in range(len(height)):
for width_index in range(len(width)):
catch_ratio_list.append(catch_ratio[wind_index, rain_index, height_index, width_index])
height_list.append(height[height_index])
horizontal_rain_intensity_list.append(horizontal_rain_intensity[rain_index])
wind_speed_list.append(wind_speed[wind_index])
width_list.append(width[width_index])
print('')
print('Catch Ratio:', len(catch_ratio_list))
print('Height:', len(height_list))
print('Rain:', len(horizontal_rain_intensity_list))
print('Wind Speed:', len(wind_speed_list))
print('Width:', len(width_list))
print('')
y_data = np.array(catch_ratio_list)
x_data = np.vstack(np.array([height_list, horizontal_rain_intensity_list, wind_speed_list, width_list]).T)
print('y_data shape:', y_data.shape)
print('x_data shape:', x_data.shape)
print('')
X_train, X_test, y_train, y_test = train_test_split(x_data, y_data, random_state=0)
linreg = linear_model.LinearRegression(normalize=True)
linreg.fit(X_train, y_train)
print('Linear Model')
print('linear model intercept: {}'.format(linreg.intercept_))
print('linear model coeff:\n{}'.format(linreg.coef_))
print('R-squared score (training): {:.3f}'.format(linreg.score(X_train, y_train)))
print('R-squared score (test): {:.3f}'.format(linreg.score(X_test, y_test)))
print('')
linridge = linear_model.Ridge(alpha=20.0).fit(X_train, y_train)
print('Ridge Model')
print('ridge regression linear model intercept: {}'.format(linridge.intercept_))
print('ridge regression linear model coeff:\n{}'.format(linridge.coef_))
print('R-squared score (training): {:.3f}'.format(linridge.score(X_train, y_train)))
print('R-squared score (test): {:.3f}'.format(linridge.score(X_test, y_test)))
print('Number of non-zero features: {}'.format(np.sum(linridge.coef_ != 0)))
print('')
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
linridge_normal = linear_model.Ridge(alpha=20.0).fit(X_train_scaled, y_train)
print('Ridge Model Normalized')
print('ridge regression linear model intercept: {}'.format(linridge_normal.intercept_))
print('ridge regression linear model coeff:\n{}'.format(linridge_normal.coef_))
print('R-squared score (training): {:.3f}'.format(linridge_normal.score(X_train_scaled, y_train)))
print('R-squared score (test): {:.3f}'.format(linridge_normal.score(X_test_scaled, y_test)))
print('Number of non-zero features: {}'.format(np.sum(linridge_normal.coef_ != 0)))
print('')
poly = PolynomialFeatures(degree=2)
X_F1_poly = poly.fit_transform(x_data)
X_train, X_test, y_train, y_test = train_test_split(X_F1_poly, y_data, random_state=0)
linreg_poly = linear_model.LinearRegression().fit(X_train, y_train)
print('(poly deg 2) linear model coeff (w):\n{}'.format(linreg_poly.coef_))
print('(poly deg 2) linear model intercept (b): {:.5f}'.format(linreg_poly.intercept_))
print('(poly deg 2) R-squared score (training): {:.5f}'.format(linreg_poly.score(X_train, y_train)))
print('(poly deg 2) R-squared score (test): {:.5f}\n'.format(linreg_poly.score(X_test, y_test)))
print('\nAddition of many polynomial features often leads to\n\
overfitting, so we often use polynomial features in combination\n\
with regression that has a regularization penalty, like ridge\n\
regression.\n')
linridge_poly = linear_model.Ridge().fit(X_train, y_train)
print('(poly deg 2 + ridge) linear model coeff (w):\n{}'.format(linridge_poly.coef_))
print('(poly deg 2 + ridge) linear model intercept (b): {:.5f}'.format(linridge_poly.intercept_))
print('(poly deg 2 + ridge) R-squared score (training): {:.5f}'.format(linridge_poly.score(X_train, y_train)))
print('(poly deg 2 + ridge) R-squared score (test): {:.5f}'.format(linridge_poly.score(X_test, y_test)))
print('')
poly_3 = PolynomialFeatures(degree=3)
X_poly_3 = poly.fit_transform(x_data)
X_train, X_test, y_train, y_test = train_test_split(X_poly_3, y_data, random_state=0)
poly_reg_3 = linear_model.LinearRegression().fit(X_train, y_train)
print('(poly deg 3) linear model coeff (w):\n{}'.format(poly_reg_3.coef_))
print('(poly deg 3) linear model intercept (b): {:.5f}'.format(poly_reg_3.intercept_))
print('(poly deg 3) R-squared score (training): {:.5f}'.format(poly_reg_3.score(X_train, y_train)))
print('(poly deg 3) R-squared score (test): {:.5f}\n'.format(poly_reg_3.score(X_test, y_test)))
ridge_poly_3 = linear_model.Ridge().fit(X_train, y_train)
print('(poly deg 3 + ridge) linear model coeff (w):\n{}'.format(ridge_poly_3.coef_))
print('(poly deg 3 + ridge) linear model intercept (b): {:.5f}'.format(ridge_poly_3.intercept_))
print('(poly deg 3 + ridge) R-squared score (training): {:.5f}'.format(ridge_poly_3.score(X_train, y_train)))
print('(poly deg 3 + ridge) R-squared score (test): {:.5f}'.format(ridge_poly_3.score(X_test, y_test)))
print('')
X_train, X_test, y_train, y_test = train_test_split(x_data, y_data, random_state=0)
knnreg = KNeighborsRegressor(n_neighbors=5).fit(X_train, y_train)
print('K-nearest regression (5 neighbors)')
print(knnreg.predict(X_test))
print('R-squared train score: {:.5f}'.format(knnreg.score(X_train, y_train)))
print('R-squared test score: {:.5f}'.format(knnreg.score(X_test, y_test)))
print('')
knnreg = KNeighborsRegressor(n_neighbors=3).fit(X_train, y_train)
print('K-nearest regression (3 neighbors)')
print(knnreg.predict(X_test))
print('R-squared train score: {:.5f}'.format(knnreg.score(X_train, y_train)))
print('R-squared test score: {:.5f}'.format(knnreg.score(X_test, y_test)))
"""
X_train, X_test, y_train, y_test = train_test_split(x_data, y_data, random_state=0)
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X_train, y_train)
#print('(gaussian) model coeff (w):\n{}'.format(gp.coef_))
#print('(gaussian) model intercept (b): {:.5f}'.format(gp.intercept_))
print('(gaussian) R-squared score (training): {:.5f}'.format(gp.score(X_train, y_train)))
print('(gaussian) R-squared score (test): {:.5f}'.format(gp.score(X_test, y_test)))
print('')
"""
# Save final model
filename_kn = 'k_nearest_model.sav'
pickle.dump(knnreg, open(filename_kn, 'wb'))
filename_p2 = 'poly2_model.sav'
pickle.dump(linridge_poly, open(filename_p2, 'wb'))
|
thp44/delphin_6_automation
|
test_files/driving_rain_model.py
|
Python
|
mit
| 8,199
|
[
"Gaussian"
] |
812d2285abe85e1c12f332513bc691b4a3224bf1c1fd2d600b050a41c3258bca
|
from __future__ import print_function
"""An experimental package for making plots during a simulation.
A PrimiPlotter can plot a list of atoms on one or more output devices.
"""
from numpy import *
from ase.visualize.colortable import color_table
import ase.data
import sys, os, time, weakref
import collections
class PrimiPlotterBase:
"Base class for PrimiPlotter and Povrayplotter."
#def set_dimensions(self, dims):
# "Set the size of the canvas (a 2-tuple)."
# self.dims = dims
def set_rotation(self, rotation):
"Set the rotation angles (in degrees)."
self.angles[:] = array(rotation) * (pi/180)
def set_radii(self, radii):
"""Set the atomic radii. Give an array or a single number."""
self.radius = radii
def set_colors(self, colors):
"""Explicitly set the colors of the atoms.
The colors can either be a dictionary mapping tags to colors
or an array of colors, one per atom.
Each color is specified as a greyscale value from 0.0 to 1.0
or as three RGB values from 0.0 to 1.0.
"""
self.colors = colors
def set_color_function(self, colors):
"""Set a color function, to be used to color the atoms."""
if isinstance(colors, collections.Callable):
self.colorfunction = colors
else:
raise TypeError("The color function is not callable.")
def set_invisible(self, inv):
"""Choose invisible atoms."""
self.invisible = inv
def set_invisibility_function(self, invfunc):
"""Set an invisibility function."""
if isinstance(invfunc, collections.Callable):
self.invisibilityfunction = invfunc
else:
raise TypeError("The invisibility function is not callable.")
def set_cut(self, xmin=None, xmax=None, ymin=None, ymax=None,
zmin=None, zmax=None):
self.cut = {"xmin":xmin, "xmax":xmax, "ymin":ymin, "ymax":ymax,
"zmin":zmin, "zmax":zmax}
def update(self, newatoms = None):
"""Cause a plot (respecting the interval setting).
update causes a plot to be made. If the interval variable was
specified when the plotter was create, it will only produce a
plot with that interval. update takes an optional argument,
newatoms, which can be used to replace the list of atoms with
a new one.
"""
if newatoms is not None:
self.atoms = newatoms
if self.skipnext <= 0:
self.plot()
self.skipnext = self.interval
self.skipnext -= 1
def set_log(self, log):
"""Sets a file for logging.
log may be an open file or a filename.
"""
if hasattr(log, "write"):
self.logfile = log
self.ownlogfile = False
else:
self.logfile = open(log, "w")
self.ownlogfile = True
def log(self, message):
"""logs a message to the file set by set_log."""
if self.logfile is not None:
self.logfile.write(message+"\n")
self.logfile.flush()
self._verb(message)
def _verb(self, txt):
if self.verbose:
sys.stderr.write(txt+"\n")
def _starttimer(self):
self.starttime = time.time()
def _stoptimer(self):
elapsedtime = time.time() - self.starttime
self.totaltime = self.totaltime + elapsedtime
print("plotting time %s sec (total %s sec)" % (elapsedtime,
self.totaltime))
def _getpositions(self):
return self.atoms.get_positions()
def _getradii(self):
if self.radius is not None:
if hasattr(self.radius, "shape"):
return self.radius # User has specified an array
else:
return self.radius * ones(len(self.atoms), float)
# No radii specified. Try getting them from the atoms.
try:
return self.atoms.get_atomic_radii()
except AttributeError:
try:
z = self._getatomicnumbers()
except AttributeError:
pass
else:
return ase.data.covalent_radii[z]
# No radius available. Defaulting to 1.0
return ones(len(self.atoms), float)
def _getatomicnumbers(self):
return self.atoms.get_atomic_numbers()
def _getcolors(self):
# Try any explicitly given colors
if self.colors is not None:
if isinstance(self.colors, type({})):
self.log("Explicit colors dictionary")
return _colorsfromdict(self.colors,
asarray(self.atoms.get_tags(),int))
else:
self.log("Explicit colors")
return self.colors
# Try the color function, if given
if self.colorfunction is not None:
self.log("Calling color function.")
return self.colorfunction(self.atoms)
# Maybe the atoms know their own colors
try:
c = self.atoms.get_colors()
except AttributeError:
c = None
if c is not None:
if isinstance(c, type({})):
self.log("Color dictionary from atoms.get_colors()")
return _colorsfromdict(c, asarray(self.atoms.get_tags(),int))
else:
self.log("Colors from atoms.get_colors()")
return c
# Default to white atoms
self.log("No colors: using white")
return ones(len(self.atoms), float)
def _getinvisible(self):
if self.invisible is not None:
inv = self.invisible
else:
inv = zeros(len(self.atoms))
if self.invisibilityfunction:
inv = logical_or(inv, self.invisibilityfunction(self.atoms))
r = self._getpositions()
if len(r) > len(inv):
# This will happen in parallel simulations due to ghost atoms.
# They are invisible. Hmm, this may cause trouble.
i2 = ones(len(r))
i2[:len(inv)] = inv
inv = i2
del i2
if self.cut["xmin"] is not None:
inv = logical_or(inv, less(r[:,0], self.cut["xmin"]))
if self.cut["xmax"] is not None:
inv = logical_or(inv, greater(r[:,0], self.cut["xmax"]))
if self.cut["ymin"] is not None:
inv = logical_or(inv, less(r[:,1], self.cut["ymin"]))
if self.cut["ymax"] is not None:
inv = logical_or(inv, greater(r[:,1], self.cut["ymax"]))
if self.cut["zmin"] is not None:
inv = logical_or(inv, less(r[:,2], self.cut["zmin"]))
if self.cut["zmax"] is not None:
inv = logical_or(inv, greater(r[:,2], self.cut["zmax"]))
return inv
def __del__(self):
if self.ownlogfile:
self.logfile.close()
class PrimiPlotter(PrimiPlotterBase):
"""Primitive PostScript-based plots during a simulation.
The PrimiPlotter plots atoms during simulations, extracting the
relevant information from the list of atoms. It is created using
the list of atoms as an argument to the constructor. Then one or
more output devices must be attached using set_output(device). The
list of supported output devices is at the end.
The atoms are plotted as circles. The system is first rotated
using the angles specified by set_rotation([vx, vy, vz]). The
rotation is vx degrees around the x axis (positive from the y
toward the z axis), then vy degrees around the y axis (from x
toward z), then vz degrees around the z axis (from x toward y).
The rotation matrix is the same as the one used by RasMol.
Per default, the system is scaled so it fits within the canvas
(autoscale mode). Autoscale mode is enabled and disables using
autoscale("on") or autoscale("off"). A manual scale factor can be
set with set_scale(scale), this implies autoscale("off"). The
scale factor (from the last autoscale event or from set_scale) can
be obtained with get_scale(). Finally, an explicit autoscaling can
be triggered with autoscale("now"), this is mainly useful before
calling get_scale or before disabling further autoscaling.
Finally, a relative scaling factor can be set with
SetRelativeScaling(), it is multiplied to the usual scale factor
(from autoscale or from set_scale). This is probably only useful in
connection with autoscaling.
The radii of the atoms are obtained from the first of the following
methods which work:
1. If the radii are specified using PrimiPlotter.set_radii(r),
they are used. Must be an array, or a single number.
2. If the atoms has a get_atomic_radii() method, it is used. This is
unlikely.
3. If the atoms has a get_atomic_numbers() method, the
corresponding covalent radii are extracted from the
ASE.ChemicalElements module.
4. If all else fails, the radius is set to 1.0 Angstrom.
The atoms are colored using the first of the following methods
which work.
1. If colors are explicitly set using PrimiPlotter.set_colors(),
they are used.
2. If these colors are specified as a dictionary, the tags
(from atoms.get_tags()) are used as an index into the
dictionary to get the actual colors of the atoms.
3. If a color function has been set using
PrimiPlotter.set_color_function(), it is called with the atoms
as an argument, and is expected to return an array of colors.
4. If the atoms have a get_colors() method, it is used to get the
colors.
5. If these colors are specified as a dictionary, the tags
(from atoms.get_tags()) are used as an index into the
dictionary to get the actual colors of the atoms.
6. If all else fails, the atoms will be white.
The colors are specified as an array of colors, one color per
atom. Each color is either a real number from 0.0 to 1.0,
specifying a grayscale (0.0 = black, 1.0 = white), or an array of
three numbers from 0.0 to 1.0, specifying RGB values. The colors
of all atoms are thus a Numerical Python N-vector or a 3xN matrix.
In cases 1a and 3a above, the keys of the dictionary are integers,
and the values are either numbers (grayscales) or 3-vectors (RGB
values), or strings with X11 color names, which are then
translated to RGB values. Only in case 1a and 3a are strings
recognized as colors.
Some atoms may be invisible, and thus left out of the plot.
Invisible atoms are determined from the following algorithm.
Unlike the radius or the coloring, all points below are tried and
if an atom is invisible by any criterion, it is left out of the plot.
1. All atoms are visible.
2. If PrimiPlotter.set_invisible() has be used to specify invisible
atoms, any atoms for which the value is non-zero becomes invisible.
3. If an invisiblility function has been set with
PrimiPlotter.set_invisibility_function(), it is called with the
atoms as argument. It is expected to return an integer per
atom, any non-zero value makes that atom invisible.
4. If a cut has been specified using set_cut, any atom outside the
cut is made invisible.
Note that invisible atoms are still included in the algorithm for
positioning and scaling the plot.
The following output devices are implemented.
PostScriptFile(prefix): Create PS files names prefix0000.ps etc.
PnmFile(prefix): Similar, but makes PNM files.
GifFile(prefix): Similar, but makes GIF files.
JpegFile(prefix): Similar, but makes JPEG files.
X11Window(): Show the plot in an X11 window using ghostscript.
Output devices writing to files take an extra optional argument to
the constructor, compress, specifying if the output file should be
gzipped. This is not allowed for some (already compressed) file
formats.
Instead of a filename prefix, a filename containing a % can be
used. In that case the filename is expected to expand to a real
filename when used with the Python string formatting operator (%)
with the frame number as argument. Avoid generating spaces in the
file names: use e.g. %03d instead of %3d.
"""
def __init__(self, atoms, verbose=0, timing=0, interval=1, initframe=0):
"""
Parameters to the constructor:
atoms: The atoms to be plottet.
verbose = 0: Write progress information to stderr.
timing = 0: Collect timing information.
interval = 1: If specified, a plot is only made every
interval'th time update() is called. Deprecated, normally you
should use the interval argument when attaching the plotter to
e.g. the dynamics.
initframe = 0: Initial frame number, i.e. the number of the
first plot.
"""
self.atoms = atoms
self.outputdevice = []
self.angles = zeros(3, float)
self.dims = (512, 512)
self.verbose = verbose
self.timing = timing
self.totaltime = 0.0
self.radius = None
self.colors = None
self.colorfunction = None
self.n = initframe
self.interval = interval
self.skipnext = 0 # Number of calls to update before anything happens.
self.a_scale = 1
self.relativescale = 1.0
self.invisible = None
self.invisibilityfunction = None
self.set_cut() # No cut
self.isparallel = 0
self.logfile = None
self.ownlogfile = False
def set_output(self, device):
"Attach an output device to the plotter."
self.outputdevice.append(device)
device.set_dimensions(self.dims)
device.set_owner(weakref.proxy(self))
def set_dimensions(self, dims):
"Set the size of the canvas (a 2-tuple)."
if self.outputdevice:
raise RuntimeError("Cannot set dimensions after an output device has been specified.")
self.dims = dims
def autoscale(self, mode):
if mode == "on":
self.a_scale = 1
elif mode == "off":
self.a_scale = 0
elif mode == "now":
coords = self._rotate(self.atoms.get_positions())
radii = self._getradii()
self._autoscale(coords, radii)
else:
raise ValueError("Unknown autoscale mode: ").with_traceback(+str(mode))
def set_scale(self, scale):
self.autoscale("off")
self.scale = scale
def get_scale(self):
return self.scale
def set_relative_scale(self, rscale = 1.0):
self.relativescale = rscale
def plot(self):
"""Create a plot now. Does not respect the interval timer.
This method makes a plot unconditionally. It does not look at
the interval variable, nor is this plot taken into account in
the counting done by the update() method if an interval
variable was specified.
"""
if self.timing:
self._starttimer()
self.log("PrimiPlotter: Starting plot at "
+ time.strftime("%a, %d %b %Y %H:%M:%S"))
colors = self._getcolors()
invisible = self._getinvisible()
coords = self._rotate(self._getpositions())
radii = self._getradii()
if self.a_scale:
self._autoscale(coords,radii)
scale = self.scale * self.relativescale
coords = scale * coords
center = self._getcenter(coords)
offset = array(self.dims + (0.0,))/2.0 - center
coords = coords + offset
self.log("Scale is %f and size is (%d, %d)"
% (scale, self.dims[0], self.dims[1]))
self.log("Physical size of plot is %f Angstrom times %f Angstrom"
% (self.dims[0] / scale, self.dims[1] / scale))
self._verb("Sorting.")
order = argsort(coords[:,2])
coords = coords[order] ### take(coords, order)
radii = radii[order] ### take(radii, order)
colors = colors[order] ### take(colors, order)
invisible = invisible[order] ### take(invisible, order)
if self.isparallel:
id = arange(len(coords))[order] ### take(arange(len(coords)), order)
else:
id = None
radii = radii * scale
selector = self._computevisibility(coords, radii, invisible, id)
coords = compress(selector, coords, 0)
radii = compress(selector, radii)
colors = compress(selector, colors, 0)
self._makeoutput(scale, coords, radii, colors)
self.log("PrimiPlotter: Finished plotting at "
+ time.strftime("%a, %d %b %Y %H:%M:%S"))
self.log("\n\n")
if self.timing:
self._stoptimer()
def _computevisibility(self, coords, rad, invisible, id, zoom = 1):
xy = coords[:,:2]
typradius = sum(rad) / len(rad)
if typradius < 4.0:
self.log("Refining visibility check.")
if zoom >= 16:
raise RuntimeError("Cannot check visibility - too deep recursion.")
return self._computevisibility(xy*2, rad*2, invisible, id, zoom*2)
else:
self.log("Visibility(r_typ = %.1f pixels)" % (typradius,))
dims = array(self.dims) * zoom
maxr = int(ceil(max(rad))) + 2
canvas = zeros((dims[0] + 4*maxr, dims[1] + 4*maxr), int8)
# Atoms are only invisible if they are within the canvas, or closer
# to its edge than their radius
visible = (greater(xy[:,0], -rad) * less(xy[:,0], dims[0]+rad)
* greater(xy[:,1], -rad) * less(xy[:,1], dims[1]+rad)
* logical_not(invisible))
# Atoms are visible if not hidden behind other atoms
xy = floor(xy + 2*maxr + 0.5).astype(int)
masks = {}
for i in range(len(rad)-1, -1, -1):
if (i % 100000) == 0 and i:
self._verb(str(i))
if not visible[i]:
continue
x, y = xy[i]
r = rad[i]
try:
mask, invmask, rn = masks[r]
except KeyError:
rn = int(ceil(r))
nmask = 2*rn+1
mask = (arange(nmask) - rn)**2
mask = less(mask[:,newaxis]+mask[newaxis,:], r*r).astype(int8)
invmask = equal(mask, 0).astype(int8)
masks[r] = (mask, invmask, rn)
window = logical_or(canvas[x-rn:x+rn+1, y-rn:y+rn+1], invmask)
hidden = alltrue(window.flat)
if hidden:
visible[i] = 0
else:
canvas[x-rn:x+rn+1, y-rn:y+rn+1] = logical_or(canvas[x-rn:x+rn+1, y-rn:y+rn+1], mask)
self.log("%d visible, %d hidden out of %d" %
(sum(visible), len(visible) - sum(visible), len(visible)))
return visible
def _rotate(self, positions):
self.log("Rotation angles: %f %f %f" % tuple(self.angles))
mat = dot(dot(_rot(self.angles[2], 2),
_rot(self.angles[1], 1)),
_rot(self.angles[0]+pi, 0))
return dot(positions, mat)
def _getcenter(self, coords):
return array((max(coords[:,0]) + min(coords[:,0]),
max(coords[:,1]) + min(coords[:,1]), 0.0)) / 2.0
def _autoscale(self, coords, radii):
x = coords[:,0]
y = coords[:,1]
maxradius = max(radii)
deltax = max(x) - min(x) + 2*maxradius
deltay = max(y) - min(y) + 2*maxradius
scalex = self.dims[0] / deltax
scaley = self.dims[1] / deltay
self.scale = 0.95 * min(scalex, scaley)
self.log("Autoscale: %f" % self.scale)
def _makeoutput(self, scale, coords, radii, colors):
for device in self.outputdevice:
device.inform_about_scale(scale)
device.plot(self.n, coords, radii, colors)
self.n = self.n + 1
class ParallelPrimiPlotter(PrimiPlotter):
"""A version of PrimiPlotter for parallel ASAP simulations.
Used like PrimiPlotter, but only the output devices on the master
node are used. Most of the processing is distributed on the
nodes, but the actual output is only done on the master. See the
PrimiPlotter docstring for details.
"""
def __init__(self, *args, **kwargs):
PrimiPlotter.__init__(self, *args, **kwargs)
self.isparallel = 1
import ase.parallel
self.mpi = ase.parallel.world
if self.mpi is None:
raise RuntimeError("MPI is not available.")
self.master = self.mpi.rank == 0
self.mpitag = 42 # Reduce chance of collision with other modules.
def set_output(self, device):
if self.master:
PrimiPlotter.set_output(self, device)
def set_log(self, log):
if self.master:
PrimiPlotter.set_log(self, log)
def _getpositions(self):
realpos = self.atoms.get_positions()
ghostpos = self.atoms.get_ghost_positions()
self.numberofrealatoms = len(realpos)
self.numberofghostatoms = len(ghostpos)
return concatenate((realpos, ghostpos))
def _getatomicnumbers(self):
realz = self.atoms.get_atomic_numbers()
ghostz = self.atoms.get_ghost_atomic_numbers()
return concatenate((realz, ghostz))
def _getradius(self):
r = PrimiPlotter._getradius(self)
if len(r) == self.numberofrealatoms + self.numberofghostatoms:
# Must have calculated radii from atomic numbers
return r
else:
assert len(r) == self.numberofrealatoms
# Heuristic: use minimum r for the ghosts
ghostr = min(r) * ones(self.numberofghostatoms, float)
return concatenate((r, ghostr))
def _getcenter(self, coords):
# max(x) and min(x) only works for rank-1 arrays in Numeric version 17.
maximal = maximum.reduce(coords[:,0:2])
minimal = minimum.reduce(coords[:,0:2])
self.mpi.max(maximal)
self.mpi.min(minimal)
maxx, maxy = maximal
minx, miny = minimal
return array([maxx + minx, maxy + miny, 0.0]) / 2.0
def _computevisibility(self, xy, rad, invisible, id, zoom = 1):
# Find visible atoms, allowing ghost atoms to hide real atoms.
v = PrimiPlotter._computevisibility(self, xy, rad, invisible, id, zoom)
# Then remove ghost atoms
return v * less(id, self.numberofrealatoms)
def _autoscale(self, coords, radii):
self._verb("Autoscale")
n = len(self.atoms)
x = coords[:n,0]
y = coords[:n,1]
assert len(x) == len(self.atoms)
maximal = array([max(x), max(y), max(radii[:n])])
minimal = array([min(x), min(y)])
self.mpi.max(maximal)
self.mpi.min(minimal)
maxx, maxy, maxradius = maximal
minx, miny = minimal
deltax = maxx - minx + 2*maxradius
deltay = maxy - miny + 2*maxradius
scalex = self.dims[0] / deltax
scaley = self.dims[1] / deltay
self.scale = 0.95 * min(scalex, scaley)
self.log("Autoscale: %f" % self.scale)
def _getcolors(self):
col = PrimiPlotter._getcolors(self)
nghost = len(self.atoms.get_ghost_positions())
newcolshape = (nghost + col.shape[0],) + col.shape[1:]
newcol = zeros(newcolshape, col.dtype)
newcol[:len(col)] = col
return newcol
def _makeoutput(self, scale, coords, radii, colors):
if len(colors.shape) == 1:
# Greyscales
ncol = 1
else:
ncol = colors.shape[1] # 1 or 3.
assert ncol == 3 # RGB values
# If one processor says RGB, all must convert
ncolmax = self.mpi.max(ncol)
if ncolmax > ncol:
assert ncol == 1
colors = colors[:,newaxis] + zeros(ncolmax)[newaxis,:]
ncol = ncolmax
assert colors.shape == (len(coords), ncol)
# Now send data from slaves to master
data = zeros((len(coords), 4+ncol), float)
data[:,:3] = coords
data[:,3] = radii
if ncol == 1:
data[:,4] = colors
else:
data[:,4:] = colors
if not self.master:
datashape = array(data.shape)
assert datashape.shape == (2,)
self.mpi.send(datashape, 0, self.mpitag)
self.mpi.send(data, 0, self.mpitag)
else:
total = [data]
n = len(coords)
colsmin = colsmax = 4+ncol
for proc in range(1, self.mpi.size):
self._verb("Receiving from processor "+str(proc))
datashape = zeros(2, int)
self.mpi.receive(datashape, proc, self.mpitag)
fdat = zeros(tuple(datashape))
self.mpi.receive(fdat, proc, self.mpitag)
total.append(fdat)
n = n + len(fdat)
if fdat.shape[1] < colsmin:
colsmin = fdat.shape[1]
if fdat.shape[1] > colsmax:
colsmax = fdat.shape[1]
self._verb("Merging data")
# Some processors may have only greyscales whereas others
# may have RGB. That will cause difficulties.
trouble = colsmax != colsmin
data = zeros((n, colsmax), float)
if trouble:
assert data.shape[1] == 7
else:
assert data.shape[1] == 7 or data.shape[1] == 5
i = 0
for d in total:
if not trouble or d.shape[1] == 7:
data[i:i+len(d)] = d
else:
assert d.shape[1] == 5
data[i:i+len(d), :5] = d
data[i:i+len(d), 5] = d[4]
data[i:i+len(d), 6] = d[4]
i = i + len(d)
assert i == len(data)
# Now all data is on the master
self._verb("Sorting merged data")
order = argsort(data[:,2])
data = data[order] ### take(data, order)
coords = data[:,:3]
radii = data[:,3]
if data.shape[1] == 5:
colors = data[:,4]
else:
colors = data[:,4:]
PrimiPlotter._makeoutput(self, scale, coords, radii, colors)
class _PostScriptDevice:
"""PostScript based output device."""
offset = (0,0) # Will be changed by some classes
def __init__(self):
self.scale = 1
self.linewidth = 1
self.outline = 1
def set_dimensions(self, dims):
self.dims = dims
def set_owner(self, owner):
self.owner = owner
def inform_about_scale(self, scale):
self.linewidth = 0.1 * scale
def set_outline(self, value):
self.outline = value
return self # Can chain these calls in set_output()
def plot(self, *args, **kargs):
self.Doplot(self.PSplot, *args, **kargs)
def plotArray(self, *args, **kargs):
self.Doplot(self.PSplotArray, *args, **kargs)
def PSplot(self, file, n, coords, r, colors, noshowpage=0):
xy = coords[:,:2]
assert(len(xy) == len(r) and len(xy) == len(colors))
if len(colors.shape) == 1:
gray = 1
else:
gray = 0
assert(colors.shape[1] == 3)
file.write("%!PS-Adobe-2.0\n")
file.write("%%Creator: Primiplot\n")
file.write("%%Pages: 1\n")
file.write("%%%%BoundingBox: %d %d %d %d\n" %
(self.offset + (self.offset[0] + self.dims[0],
self.offset[1] + self.dims[1])))
file.write("%%EndComments\n")
file.write("\n")
file.write("% Enforce BoundingBox\n")
file.write("%d %d moveto %d 0 rlineto 0 %d rlineto -%d 0 rlineto\n" %
((self.offset + self.dims + (self.dims[0],))))
file.write("closepath clip newpath\n\n")
file.write("%f %f scale\n" % (2*(1.0/self.scale,)))
file.write("%d %d translate\n" % (self.scale * self.offset[0],
self.scale * self.offset[1]))
file.write("\n")
if gray:
if self.outline:
file.write("/circ { 0 360 arc gsave setgray fill grestore stroke } def\n")
else:
file.write("/circ { 0 360 arc setgray fill } def\n")
else:
if self.outline:
file.write("/circ { 0 360 arc gsave setrgbcolor fill grestore stroke } def\n")
else:
file.write("/circ { 0 360 arc setrgbcolor fill } def\n")
file.write("%f setlinewidth 0.0 setgray\n" %
(self.linewidth * self.scale,))
if gray:
data = zeros((len(xy), 4), float)
data[:,0] = colors
data[:,1:3] = (self.scale * xy)
data[:,3] = (self.scale * r)
for point in data:
file.write("%.3f %.2f %.2f %.2f circ\n" % tuple(point))
else:
data = zeros((len(xy), 6), float)
data[:,0:3] = colors
data[:,3:5] = (self.scale * xy)
data[:,5] = (self.scale * r)
for point in data:
file.write("%.3f %.3f %.3f %.2f %.2f %.2f circ\n" % tuple(point))
if not noshowpage:
file.write("showpage\n")
def PSplotArray(self, file, n, data, noshowpage=0):
assert(len(data.shape) == 3)
assert(data.shape[0] == self.dims[1] and data.shape[1] == self.dims[0])
data = clip((256*data).astype(int), 0, 255)
file.write("%!PS-Adobe-2.0\n")
file.write("%%Creator: Fieldplotter\n")
file.write("%%Pages: 1\n")
file.write("%%%%BoundingBox: %d %d %d %d\n" %
(self.offset + (self.offset[0] + self.dims[0],
self.offset[1] + self.dims[1])))
file.write("%%EndComments\n")
file.write("\n")
file.write("%d %d translate\n" % self.offset)
file.write("%f %f scale\n" % self.dims)
file.write("\n")
file.write("% String holding a single line\n")
file.write("/pictline %d string def\n" %(data.shape[1]*data.shape[2],))
file.write("\n")
file.write("%d %d 8\n" % self.dims)
file.write("[%d 0 0 %d 0 0]\n" % self.dims)
file.write("{currentfile pictline readhexstring pop}\n")
file.write("false %d colorimage\n" % (data.shape[2],))
file.write("\n")
s = ""
for d in data.flat:
s += ("%02X" % d)
if len(s) >= 72:
file.write(s+"\n")
s = ""
file.write(s+"\n")
file.write("\n")
if not noshowpage:
file.write("showpage\n")
class _PostScriptToFile(_PostScriptDevice):
"""Output device for PS files."""
compr_suffix = None
def __init__(self, prefix, compress = 0):
self.compress = compress
if "'" in prefix:
raise ValueError("Filename may not contain a quote ('): "+prefix)
if "%" in prefix:
# Assume the user knows what (s)he is doing
self.filenames = prefix
else:
self.filenames = prefix + "%04d" + self.suffix
if compress:
if self.compr_suffix is None:
raise RuntimeError("Compression not supported.")
self.filenames = self.filenames + self.compr_suffix
_PostScriptDevice.__init__(self)
class PostScriptFile(_PostScriptToFile):
suffix = ".ps"
compr_suffix = ".gz"
offset = (50,50)
# Inherits __init__
def Doplot(self, plotmethod, n, *args, **kargs):
filename = self.filenames % (n,)
self.owner.log("Output to PostScript file "+filename)
if self.compress:
file = os.popen("gzip > '"+filename+"'", "w")
else:
file = open(filename, "w")
plotmethod(*(file, n)+args, **kargs)
file.close()
class _PS_via_PnmFile(_PostScriptToFile):
gscmd = "gs -q -sDEVICE=pnmraw -sOutputFile=- -dDEVICEWIDTH=%d -dDEVICEHEIGHT=%d - "
# Inherits __init__
def Doplot(self, plotmethod, n, *args, **kargs):
filename = self.filenames % (n,)
self.owner.log("Output to bitmapped file " + filename)
cmd = self.gscmd + self.converter
if self.compress:
cmd = cmd + "| gzip "
cmd = (cmd+" > '%s'") % (self.dims[0], self.dims[1], filename)
file = os.popen(cmd, "w")
plotmethod(*(file, n)+args, **kargs)
file.close()
class PnmFile(_PS_via_PnmFile):
suffix = ".pnm"
compr_suffix = ".gz"
converter = ""
class GifFile(_PS_via_PnmFile):
suffix = ".gif"
converter = "| ppmquant -floyd 256 2>/dev/null | ppmtogif 2>/dev/null"
class JpegFile(_PS_via_PnmFile):
suffix = ".jpeg"
converter = "| ppmtojpeg --smooth=5"
class X11Window(_PostScriptDevice):
"""Shows the plot in an X11 window."""
#Inherits __init__
gscmd = "gs -q -sDEVICE=x11 -dDEVICEWIDTH=%d -dDEVICEHEIGHT=%d -r72x72 -"
def Doplot(self, plotmethod, n, *args, **kargs):
self.owner.log("Output to X11 window")
try:
file = self.pipe
self.pipe.write("showpage\n")
except AttributeError:
filename = self.gscmd % tuple(self.dims)
file = os.popen(filename, "w")
self.pipe = file
kargs["noshowpage"] = 1
plotmethod(*(file, n)+args, **kargs)
file.write("flushpage\n")
file.flush()
# Helper functions
def _rot(v, axis):
ax1, ax2 = ((1, 2), (0, 2), (0, 1))[axis]
c, s = cos(v), sin(v)
m = zeros((3,3), float)
m[axis,axis] = 1.0
m[ax1,ax1] = c
m[ax2,ax2] = c
m[ax1,ax2] = s
m[ax2,ax1] = -s
return m
def _colorsfromdict(dict, cls):
"""Extract colors from dictionary using cls as key."""
assert(isinstance(dict, type({})))
# Allow local modifications, to replace strings with rgb values.
dict = dict.copy()
isgray, isrgb = 0, 0
for k in dict.keys():
v = dict[k]
if isinstance(v, type("string")):
v = color_table[v]
dict[k] = v
try:
if len(v) == 3:
isrgb = 1 # Assume it is an RGB value
if not hasattr(v, "shape"):
dict[k] = array(v) # Convert to array
else:
raise RuntimeError("Unrecognized color object "+repr(v))
except TypeError:
isgray = 1 # Assume it is a number
if isgray and isrgb:
# Convert all to RGB
for k in dict.keys():
v = dict[k]
if not hasattr(v, "shape"):
dict[k] = v * ones(3, float)
# Now the dictionary is ready
if isrgb:
colors = zeros((len(cls),3), float)
else:
colors = zeros((len(cls),), float)
for i in range(len(cls)):
colors[i] = dict[cls[i]]
return colors
|
suttond/MODOI
|
ase/visualize/primiplotter.py
|
Python
|
lgpl-3.0
| 35,589
|
[
"ASE",
"RasMol"
] |
0e6107e465c5adc24d49133d771615ef134001474f6d26d487bf58c690ab41c2
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGosemsim(RPackage):
"""The semantic comparisons of Gene Ontology (GO) annotations provide
quantitative ways to compute similarities between genes and gene
groups, and have became important basis for many bioinformatics
analysis approaches. GOSemSim is an R package for semantic similarity
computation among GO terms, sets of GO terms, gene products and gene
clusters. GOSemSim implemented five methods proposed by Resnik,
Schlicker, Jiang, Lin and Wang respectively."""
homepage = "https://www.bioconductor.org/packages/GOSemSim/"
git = "https://git.bioconductor.org/packages/GOSemSim.git"
version('2.2.0', commit='247434790e6c8cf99e5643f569390362b8c87c52')
depends_on('r@3.4.0:3.4.9', when='@2.2.0')
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-gosemsim/package.py
|
Python
|
lgpl-2.1
| 2,171
|
[
"Bioconductor"
] |
75e4554b5ab8a6f50e506f0006749737bbc7dfb33eb47c5d6a0bea0d48cfd341
|
# coding=utf-8
"""
Unit Tests for sickbeard/numdict.py
"""
# pylint: disable=line-too-long
from __future__ import print_function, unicode_literals
import os.path
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from sickbeard.numdict import NumDict
import six
from six.moves import UserDict
class NumDictTest(unittest.TestCase):
"""
Test the NumDict class
"""
def test_constructors(self): # pylint: disable=too-many-locals, too-many-statements
"""
Test NumDict constructors
"""
# dicts for testing
dict_0 = {} # Empty dictionary
dict_1 = {1: 'Elephant'} # Single numeric key
dict_2 = {1: 'Elephant', 2: 'Mouse'} # Multiple numeric keys
dict_3 = {'3': 'Aardvark'} # Numeric string key
dict_4 = {'3': 'Aardvark', '4': 'Ant'} # Multiple numeric string keys
dict_5 = {5: 'Cat', '6': 'Dog'} # Mixed numeric and numeric string keys
dict_6 = {1: None, '2': None} # None as values
dict_7 = {None: 'Empty'} # None as key
# Construct NumDicts from dicts
num_dict = NumDict()
num_dict_0 = NumDict(dict_0)
num_dict_1 = NumDict(dict_1)
num_dict_2 = NumDict(dict_2)
num_dict_3 = NumDict(dict_3)
num_dict_4 = NumDict(dict_4)
num_dict_5 = NumDict(dict_5)
num_dict_6 = NumDict(dict_6)
num_dict_7 = NumDict(dict_7)
# Most NumDicts from dicts should compare equal...
self.assertEqual(num_dict, {})
self.assertEqual(num_dict_0, dict_0)
self.assertEqual(num_dict_1, dict_1)
self.assertEqual(num_dict_2, dict_2)
# ...however, numeric keys are not equal to numeric string keys...
self.assertNotEqual(num_dict_3, dict_3)
self.assertNotEqual(num_dict_4, dict_4)
self.assertNotEqual(num_dict_5, dict_5)
self.assertNotEqual(num_dict_6, dict_6)
# ...but None keys work just fine
self.assertEqual(num_dict_7, dict_7)
# Construct dicts from NumDicts
dict_from_num_dict = dict(num_dict)
dict_from_num_dict_1 = dict(num_dict_1)
dict_from_num_dict_2 = dict(num_dict_2)
dict_from_num_dict_3 = dict(num_dict_3)
dict_from_num_dict_4 = dict(num_dict_4)
dict_from_num_dict_5 = dict(num_dict_5)
dict_from_num_dict_6 = dict(num_dict_6)
dict_from_num_dict_7 = dict(num_dict_7)
# All dicts from NumDicts should compare equal
self.assertEqual(num_dict, dict_from_num_dict)
self.assertEqual(num_dict_1, dict_from_num_dict_1)
self.assertEqual(num_dict_2, dict_from_num_dict_2)
self.assertEqual(num_dict_3, dict_from_num_dict_3)
self.assertEqual(num_dict_4, dict_from_num_dict_4)
self.assertEqual(num_dict_5, dict_from_num_dict_5)
self.assertEqual(num_dict_6, dict_from_num_dict_6)
self.assertEqual(num_dict_7, dict_from_num_dict_7)
# Construct NumDicts from NumDicts
num_dict_from_num_dict = NumDict(num_dict)
num_dict_from_num_dict_0 = NumDict(num_dict_0)
num_dict_from_num_dict_1 = NumDict(num_dict_1)
num_dict_from_num_dict_2 = NumDict(num_dict_2)
num_dict_from_num_dict_3 = NumDict(num_dict_3)
num_dict_from_num_dict_4 = NumDict(num_dict_4)
num_dict_from_num_dict_5 = NumDict(num_dict_5)
num_dict_from_num_dict_6 = NumDict(num_dict_6)
num_dict_from_num_dict_7 = NumDict(num_dict_7)
# All NumDicts from NumDicts should compare equal
self.assertEqual(num_dict, num_dict_from_num_dict)
self.assertEqual(num_dict_0, num_dict_from_num_dict_0)
self.assertEqual(num_dict_1, num_dict_from_num_dict_1)
self.assertEqual(num_dict_2, num_dict_from_num_dict_2)
self.assertEqual(num_dict_3, num_dict_from_num_dict_3)
self.assertEqual(num_dict_4, num_dict_from_num_dict_4)
self.assertEqual(num_dict_5, num_dict_from_num_dict_5)
self.assertEqual(num_dict_6, num_dict_from_num_dict_6)
self.assertEqual(num_dict_7, num_dict_from_num_dict_7)
# keyword arg constructor should fail
with self.assertRaises(TypeError):
NumDict(one=1, two=2) # Raise TypeError since we can't have numeric keywords
# item sequence constructors work fine...
self.assertEqual(NumDict([(1, 'Elephant'), (2, 'Mouse')]), dict_from_num_dict_2)
self.assertEqual(NumDict(dict=[(1, 'Elephant'), (2, 'Mouse')]), dict_from_num_dict_2)
self.assertEqual(NumDict([(1, 'Elephant'), ('2', 'Mouse')]), dict_from_num_dict_2)
self.assertEqual(NumDict(dict=[('1', 'Elephant'), (2, 'Mouse')]), dict_from_num_dict_2)
# ...unless you have a non-numeric key
with self.assertRaises(TypeError):
NumDict([('Rat', 11), ('Snake', 12)])
with self.assertRaises(TypeError):
NumDict(dict=[('Rat', 11), ('Snake', 12)])
# combining item sequence constructors with keyword args does not work
with self.assertRaises(TypeError): # Raise TypeError since we can't have numeric keywords
NumDict([(1, 'one'), (2, 'two')], two=3, five=4)
# alternate constructors
dict_8 = {1: 'Echo', 2: 'Echo'}
self.assertEqual(NumDict.fromkeys('1 2'.split()), dict_from_num_dict_6)
self.assertEqual(NumDict().fromkeys('1 2'.split()), dict_from_num_dict_6)
self.assertEqual(NumDict.fromkeys('1 2'.split(), 'Echo'), dict_8)
self.assertEqual(NumDict().fromkeys('1 2'.split(), 'Echo'), dict_8)
self.assertTrue(num_dict_1.fromkeys('1 2'.split()) is not num_dict_1)
self.assertIsInstance(num_dict_1.fromkeys('1 2'.split()), NumDict)
self.assertIsInstance(num_dict_2.fromkeys('1 2'.split()), NumDict)
self.assertIsInstance(num_dict_3.fromkeys('1 2'.split()), NumDict)
self.assertIsInstance(num_dict_4.fromkeys('1 2'.split()), NumDict)
def test_repr(self): # pylint: disable=too-many-locals
"""
Test representation of NumDicts
"""
# dicts for testing
dict_0 = {} # Empty dictionary
dict_1 = {1: 'Elephant'} # Single numeric key
dict_2 = {1: 'Elephant', 2: 'Mouse'} # Multiple numeric keys
dict_3 = {'3': 'Aardvark'} # Numeric string key
dict_4 = {'3': 'Aardvark', '4': 'Ant'} # Multiple numeric string keys
dict_5 = {5: 'Cat', '6': 'Dog'} # Mixed numeric and numeric string keys
dict_6 = {1: None, '2': None} # None as values
dict_7 = {None: 'Empty'} # None as key
# Construct NumDicts from dicts
num_dict = NumDict()
num_dict_0 = NumDict(dict_0)
num_dict_1 = NumDict(dict_1)
num_dict_2 = NumDict(dict_2)
num_dict_3 = NumDict(dict_3)
num_dict_4 = NumDict(dict_4)
num_dict_5 = NumDict(dict_5)
num_dict_6 = NumDict(dict_6)
num_dict_7 = NumDict(dict_7)
reps = (
"{}",
"{1: u'Elephant'}",
"{1: u'Elephant', 2: u'Mouse'}",
"'3': u'Aardvark'",
"{'3': u'Aardvark', '4': u'Ant'}",
"{5: u'Cat', '6': u'Dog'}",
"{1: None, '2': None}",
"{None: u'Empty'}",
)
# Most representations of NumDicts should compare equal to dicts...
self.assertEqual(six.text_type(num_dict), six.text_type({}))
self.assertEqual(repr(num_dict), repr({}))
self.assertIn(repr(num_dict), reps)
self.assertEqual(six.text_type(num_dict_0), six.text_type(dict_0))
self.assertEqual(repr(num_dict_0), repr(dict_0))
self.assertIn(repr(num_dict_0), reps)
self.assertEqual(six.text_type(num_dict_1), six.text_type(dict_1))
self.assertEqual(repr(num_dict_1), repr(dict_1))
self.assertIn(repr(num_dict_1), reps)
self.assertEqual(six.text_type(num_dict_2), six.text_type(dict_2))
self.assertEqual(repr(num_dict_2), repr(dict_2))
self.assertIn(repr(num_dict_2), reps)
# ...however, numeric keys are not equal to numeric string keys...
# ...so the string representations for those are different...
self.assertNotEqual(six.text_type(num_dict_3), six.text_type(dict_3))
self.assertNotEqual(repr(num_dict_3), repr(dict_3))
self.assertNotIn(repr(num_dict_3), reps)
self.assertNotEqual(six.text_type(num_dict_4), six.text_type(dict_4))
self.assertNotEqual(repr(num_dict_4), repr(dict_4))
self.assertNotIn(repr(num_dict_4), reps)
self.assertNotEqual(six.text_type(num_dict_5), six.text_type(dict_5))
self.assertNotEqual(repr(num_dict_5), repr(dict_5))
self.assertNotIn(repr(num_dict_5), reps)
self.assertNotEqual(six.text_type(num_dict_6), six.text_type(dict_6))
self.assertNotEqual(repr(num_dict_6), repr(dict_6))
self.assertNotIn(repr(num_dict_6), reps)
# ...but None keys work just fine
self.assertEqual(six.text_type(num_dict_7), six.text_type(dict_7))
self.assertEqual(repr(num_dict_7), repr(dict_7))
self.assertIn(repr(num_dict_7), reps)
def test_rich_comparison_and_len(self):
"""
Test rich comparison and length
"""
# dicts for testing
dict_0 = {} # Empty dictionary
dict_1 = {1: 'Elephant'} # Single numeric key
dict_2 = {1: 'Elephant', 2: 'Mouse'} # Multiple numeric keys
# Construct NumDicts from dicts
num_dict = NumDict()
num_dict_0 = NumDict(dict_0)
num_dict_1 = NumDict(dict_1)
num_dict_2 = NumDict(dict_2)
# Construct NumDicts from NumDicts
num_dict_from_num_dict = NumDict(num_dict)
num_dict_from_num_dict_0 = NumDict(num_dict_0)
num_dict_from_num_dict_1 = NumDict(num_dict_1)
num_dict_from_num_dict_2 = NumDict(num_dict_2)
all_dicts = [dict_0, dict_1, dict_2, num_dict, num_dict_0, num_dict_1, num_dict_2, num_dict_from_num_dict, num_dict_from_num_dict_0, num_dict_from_num_dict_1, num_dict_from_num_dict_2]
for val_a in all_dicts:
for val_b in all_dicts:
self.assertEqual(val_a == val_b, len(val_a) == len(val_b))
def test_dict_access_and_mod(self): # pylint: disable=too-many-locals, too-many-statements
"""
Test num dict access and modification
"""
# dicts for testing
dict_0 = {} # Empty dictionary
dict_1 = {1: 'Elephant'} # Single numeric key
dict_2 = {1: 'Elephant', 2: 'Mouse'} # Multiple numeric keys
# Construct NumDicts from dicts
num_dict_0 = NumDict()
num_dict_1 = NumDict(dict_1)
num_dict_2 = NumDict(dict_2)
# test __getitem__
self.assertEqual(num_dict_2[1], 'Elephant')
with self.assertRaises(KeyError):
_ = num_dict_1['Mouse'] # key is not numeric
with self.assertRaises(KeyError):
_ = num_dict_1.__getitem__('Mouse') # key is not numeric
with self.assertRaises(KeyError):
_ = num_dict_1[None] # key does not exist
with self.assertRaises(KeyError):
_ = num_dict_1.__getitem__(None) # key does not exist
# Test __setitem__
num_dict_3 = NumDict(num_dict_2)
self.assertEqual(num_dict_2, num_dict_3)
num_dict_3[2] = 'Frog'
self.assertNotEqual(num_dict_2, num_dict_3)
# Check None keys and numeric key conversion
num_dict_3['3'] = 'Armadillo'
num_dict_3[None] = 'Cockroach'
# Check long ints
num_dict_3[12390809518259081208909880312] = 'Squid'
num_dict_3['12390809518259081208909880312'] = 'Octopus'
self.assertEqual(num_dict_3[12390809518259081208909880312], 'Octopus')
with self.assertRaises(TypeError):
num_dict_3.__setitem__('Gorilla', 1) # key is not numeric
with self.assertRaises(TypeError):
num_dict_3['Chimpanzee'] = 1 # key is not numeric
with self.assertRaises(TypeError):
num_dict_3[(4, 1)] = 1 # key is not numeric
with self.assertRaises(TypeError):
num_dict_3[[1, 3, 4]] = 1 # key is not numeric and is not hashable
# Test __delitem__
del num_dict_3[3]
del num_dict_3[None]
with self.assertRaises(KeyError):
del num_dict_3[3] # already deleted
with self.assertRaises(KeyError):
num_dict_3.__delitem__(3) # already deleted
with self.assertRaises(KeyError):
del num_dict_3['Mouse'] # key would not exist, since it is not numeric
# Test clear
num_dict_3.clear()
self.assertEqual(num_dict_3, {})
# Test copy()
num_dict_2a = dict_2.copy()
self.assertEqual(num_dict_2, num_dict_2a)
num_dict_2b = num_dict_2.copy()
self.assertEqual(num_dict_2b, num_dict_2)
num_dict_2c = UserDict({1: 'Elephant', 2: 'Mouse'})
num_dict_2d = num_dict_2c.copy() # making a copy of a UserDict is special cased
self.assertEqual(num_dict_2c, num_dict_2d)
class MyNumDict(NumDict):
"""
subclass Numdict for testing
"""
def display(self):
"""
add a method to subclass to differentiate from superclass
"""
print('MyNumDict:', self)
my_num_dict = MyNumDict(num_dict_2)
my_num_dict_a = my_num_dict.copy()
self.assertEqual(my_num_dict_a, my_num_dict)
my_num_dict[1] = 'Frog'
self.assertNotEqual(my_num_dict_a, my_num_dict)
# Test keys, items, values
self.assertEqual(sorted(six.iterkeys(num_dict_2)), sorted(six.iterkeys(dict_2)))
self.assertEqual(sorted(six.iteritems(num_dict_2)), sorted(six.iteritems(dict_2)))
self.assertEqual(sorted(six.itervalues(num_dict_2)), sorted(six.itervalues(dict_2)))
# Test "in".
for i in num_dict_2:
self.assertIn(i, num_dict_2)
self.assertEqual(i in num_dict_1, i in dict_1)
self.assertEqual(i in num_dict_0, i in dict_0)
self.assertFalse(None in num_dict_2)
self.assertEqual(None in num_dict_2, None in dict_2)
dict_2[None] = 'Cow'
num_dict_2[None] = dict_2[None]
self.assertTrue(None in num_dict_2)
self.assertEqual(None in num_dict_2, None in dict_2)
self.assertFalse('Penguin' in num_dict_2)
# Test update
test = NumDict()
test.update(dict_2)
self.assertEqual(test, num_dict_2)
# Test get
for i in num_dict_2:
self.assertEqual(num_dict_2.get(i), num_dict_2[i])
self.assertEqual(num_dict_1.get(i), dict_1.get(i))
self.assertEqual(num_dict_0.get(i), dict_0.get(i))
for i in ['purple', None, 12312301924091284, 23]:
self.assertEqual(num_dict_2.get(i), dict_2.get(i), i)
with self.assertRaises(AssertionError):
i = '1'
self.assertEqual(num_dict_2.get(i), dict_2.get(i), i) # dict_2 expects string key which does not exist
# Test "in" iteration.
num_dict_2b = num_dict_2
for i in range(20):
num_dict_2[i] = six.text_type(i)
num_dict_2b[six.text_type(i)] = six.text_type(i)
self.assertEqual(num_dict_2, num_dict_2b)
ikeys = []
for k in num_dict_2:
ikeys.append(k)
self.assertEqual(set(ikeys), set(num_dict_2.keys()))
# Test setdefault
val = 1
test = NumDict()
self.assertEqual(test.setdefault(val, 42), 42)
self.assertEqual(test.setdefault(val, '42'), 42)
self.assertNotEqual(test.setdefault(val, 42), '42')
self.assertNotEqual(test.setdefault(val, '42'), '42')
self.assertIn(val, test)
self.assertEqual(test.setdefault(val, 23), 42)
self.assertEqual(test.setdefault(val, '23'), 42)
self.assertNotEqual(test.setdefault(val, 23), '42')
self.assertNotEqual(test.setdefault(val, '23'), '42')
self.assertIn(val, test)
# Test pop
val = 1
test = NumDict({val: 42})
self.assertEqual(test.pop(val), 42)
self.assertRaises(KeyError, test.pop, val)
self.assertEqual(test.pop(val, 1), 1)
test[val] = 42
self.assertEqual(test.pop(val, 1), 42)
# Test popitem
val = 1
test = NumDict({val: 42})
self.assertEqual(test.popitem(), (val, 42))
self.assertRaises(KeyError, test.popitem)
def test_missing(self):
"""
Test missing keys
"""
# Make sure NumDict doesn't have a __missing__ method
self.assertEqual(hasattr(NumDict, "__missing__"), False)
class NumDictD(NumDict):
"""
subclass defines __missing__ method returning a value
"""
def __missing__(self, key): # pylint: disable=no-self-use
key = 42
return key
num_dict_d = NumDictD({1: 2, 3: 4})
self.assertEqual(num_dict_d[1], 2)
self.assertEqual(num_dict_d[3], 4)
self.assertNotIn(2, num_dict_d)
self.assertNotIn(2, num_dict_d.keys())
self.assertEqual(num_dict_d[2], 42)
class NumDictE(NumDict):
"""
subclass defines __missing__ method raising RuntimeError
"""
def __missing__(self, key): # pylint: disable=no-self-use
raise RuntimeError(key)
num_dict_e = NumDictE()
try:
num_dict_e[42]
except RuntimeError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("num_dict_e[42] didn't raise RuntimeError")
class NumDictF(NumDict):
"""
subclass sets __missing__ instance variable (no effect)
"""
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
NumDict.__init__(self)
num_dict_f = NumDictF()
try:
num_dict_f[42]
except KeyError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("num_dict_f[42] didn't raise KeyError")
class NumDictG(NumDict):
"""
subclass doesn't define __missing__ at a all
"""
pass
num_dict_g = NumDictG()
try:
num_dict_g[42]
except KeyError as err:
self.assertEqual(err.args, (42,))
else:
self.fail("num_dict_g[42] didn't raise KeyError")
class NumDictH(NumDictD):
"""
subclass calls super classes __missing__ and modifies the value before returning it
"""
def __missing__(self, key): # pylint: disable=arguments-differ
return super(NumDictH, self).__missing__(key) + 1
num_dict_h = NumDictH()
self.assertEqual(num_dict_h[None], num_dict_d[None] + 1)
def test_main():
"""
Run tests when run as main
"""
import logging
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
log.info("=======================")
log.info("STARTING - COMMON TESTS")
log.info("=======================")
log.info("######################################################################")
suite = unittest.TestLoader().loadTestsFromTestCase(NumDictTest)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
test_main()
|
terbolous/SickRage
|
tests/numdict_tests.py
|
Python
|
gpl-3.0
| 19,849
|
[
"Octopus"
] |
b4abda4ec178ed0d9a715cfbb4bfaedf9223dc65f40579f0323f5223f223b2fb
|
from __future__ import absolute_import
import six
import inspect
from parsimonious.grammar import Grammar, NodeVisitor
from parsimonious.exceptions import ParseError
from sentry.stacktraces.platform import get_behavior_family_for_platform
from sentry.grouping.utils import get_rule_bool
from sentry.utils.safe import get_path
from sentry.utils.glob import glob_match
VERSION = 1
# Grammar is defined in EBNF syntax.
fingerprinting_grammar = Grammar(
r"""
fingerprinting_rules = line+
line = _ (comment / rule / empty) newline?
rule = _ matchers _ follow _ fingerprint
matchers = matcher+
matcher = _ matcher_type sep argument
matcher_type = "path" / "function" / "module" / "family" / "type" / "value" / "message" / "package" / "app"
argument = quoted / unquoted
fingerprint = fp_value+
fp_value = _ fp_argument _ ","?
fp_argument = quoted / unquoted_no_comma
comment = ~r"#[^\r\n]*"
quoted = ~r'"([^"\\]*(?:\\.[^"\\]*)*)"'
unquoted = ~r"\S+"
unquoted_no_comma = ~r"((?:\{\{\s*\S+\s*\}\})|(?:[^\s,]+))"
follow = "->"
sep = ":"
space = " "
empty = ""
newline = ~r"[\r\n]"
_ = space*
"""
)
class InvalidFingerprintingConfig(Exception):
pass
class EventAccess(object):
def __init__(self, event):
self.event = event
self._exceptions = None
self._frames = None
self._messages = None
def get_messages(self):
if self._messages is None:
self._messages = []
message = get_path(self.event, "logentry", "formatted", filter=True)
if message:
self._messages.append(
{
"message": message,
"family": get_behavior_family_for_platform(self.event.get("platform")),
}
)
return self._messages
def get_exceptions(self):
if self._exceptions is None:
self._exceptions = []
for exc in get_path(self.event, "exception", "values", filter=True) or ():
self._exceptions.append(
{
"type": exc.get("type"),
"value": exc.get("value"),
"family": get_behavior_family_for_platform(self.event.get("platform")),
}
)
return self._exceptions
def get_frames(self, with_functions=False):
from sentry.stacktraces.functions import get_function_name_for_frame
if self._frames is None:
self._frames = []
def _push_frame(frame):
platform = frame.get("platform") or self.event.get("platform")
func = get_function_name_for_frame(frame, platform)
self._frames.append(
{
"function": func or "<unknown>",
"path": frame.get("abs_path") or frame.get("filename"),
"module": frame.get("module"),
"family": get_behavior_family_for_platform(platform),
"package": frame.get("package"),
"app": frame.get("in_app"),
}
)
have_errors = False
for exc in get_path(self.event, "exception", "values", filter=True) or ():
for frame in get_path(exc, "stacktrace", "frames", filter=True) or ():
_push_frame(frame)
have_errors = True
if not have_errors:
frames = get_path(self.event, "stacktrace", "frames", filter=True)
if not frames:
threads = get_path(self.event, "threads", "values", filter=True)
if threads and len(threads) == 1:
frames = get_path(threads, 0, "stacktrace", "frames")
for frame in frames or ():
_push_frame(frame)
return self._frames
def get_values(self, interface):
if interface == "message":
return self.get_messages()
elif interface == "exception":
return self.get_exceptions()
elif interface == "frame":
return self.get_frames()
return []
class FingerprintingRules(object):
def __init__(self, rules, changelog=None, version=None):
if version is None:
version = VERSION
self.version = version
self.rules = rules
self.changelog = changelog
def iter_rules(self):
return iter(self.rules)
def get_fingerprint_values_for_event(self, event):
if not self.rules:
return
access = EventAccess(event)
for rule in self.iter_rules():
new_values = rule.get_fingerprint_values_for_event_access(access)
if new_values is not None:
return new_values
@classmethod
def _from_config_structure(cls, data):
version = data["version"]
if version != VERSION:
raise ValueError("Unknown version")
return cls(rules=[Rule._from_config_structure(x) for x in data["rules"]], version=version)
def _to_config_structure(self):
return {"version": self.version, "rules": [x._to_config_structure() for x in self.rules]}
def to_json(self):
return self._to_config_structure()
@classmethod
def from_json(cls, value):
try:
return cls._from_config_structure(value)
except (LookupError, AttributeError, TypeError, ValueError) as e:
raise ValueError("invalid fingerprinting config: %s" % e)
@classmethod
def from_config_string(self, s):
try:
tree = fingerprinting_grammar.parse(s)
except ParseError as e:
context = e.text[e.pos : e.pos + 33]
if len(context) == 33:
context = context[:-1] + "..."
raise InvalidFingerprintingConfig(
'Invalid syntax near "%s" (line %s, column %s)' % (context, e.line(), e.column())
)
return FingerprintingVisitor().visit(tree)
class Match(object):
def __init__(self, key, pattern):
self.key = key
self.pattern = pattern
@property
def interface(self):
if self.key == "message":
return "message"
elif self.key in ("type", "value"):
return "exception"
return "frame"
def matches_value(self, value):
if value is None:
return False
if self.key in ("path", "package"):
if glob_match(
value, self.pattern, ignorecase=True, doublestar=True, path_normalize=True
):
return True
if not value.startswith("/") and glob_match(
"/" + value, self.pattern, ignorecase=True, doublestar=True, path_normalize=True
):
return True
elif self.key == "family":
flags = self.pattern.split(",")
if "all" in flags or value in flags:
return True
elif self.key == "app":
ref_val = get_rule_bool(self.pattern)
if ref_val is not None and ref_val == value:
return True
elif glob_match(value, self.pattern, ignorecase=self.key in ("message", "value")):
return True
return False
def _to_config_structure(self):
return [self.key, self.pattern]
@classmethod
def _from_config_structure(cls, obj):
return cls(obj[0], obj[1])
class Rule(object):
def __init__(self, matchers, fingerprint):
self.matchers = matchers
self.fingerprint = fingerprint
def get_fingerprint_values_for_event_access(self, access):
by_interface = {}
for matcher in self.matchers:
by_interface.setdefault(matcher.interface, []).append(matcher)
for interface, matchers in six.iteritems(by_interface):
for values in access.get_values(interface):
if all(x.matches_value(values.get(x.key)) for x in matchers):
break
else:
return
return self.fingerprint
def _to_config_structure(self):
return {
"matchers": [x._to_config_structure() for x in self.matchers],
"fingerprint": self.fingerprint,
}
@classmethod
def _from_config_structure(cls, obj):
return cls([Match._from_config_structure(x) for x in obj["matchers"]], obj["fingerprint"])
class FingerprintingVisitor(NodeVisitor):
visit_comment = visit_empty = lambda *a: None
def visit_comment(self, node, children):
return node.text
def visit_fingerprinting_rules(self, node, children):
changelog = []
rules = []
in_header = True
for child in children:
if isinstance(child, six.string_types):
if in_header and child[:2] == "##":
changelog.append(child[2:].rstrip())
else:
in_header = False
elif child is not None:
rules.append(child)
in_header = False
return FingerprintingRules(rules, inspect.cleandoc("\n".join(changelog)).rstrip() or None)
def visit_line(self, node, children):
_, line, _ = children
comment_or_rule_or_empty = line[0]
if comment_or_rule_or_empty:
return comment_or_rule_or_empty
def visit_rule(self, node, children):
_, matcher, _, _, _, fingerprint = children
return Rule(matcher, fingerprint)
def visit_matcher(self, node, children):
_, ty, _, argument = children
return Match(ty, argument)
def visit_matcher_type(self, node, children):
return node.text
def visit_argument(self, node, children):
return children[0]
visit_fp_argument = visit_argument
def visit_fingerprint(self, node, children):
return children
def visit_fp_value(self, node, children):
_, argument, _, _ = children
return argument
def visit_quoted(self, node, children):
return node.text[1:-1].encode("ascii", "backslashreplace").decode("unicode-escape")
def visit_unquoted(self, node, children):
return node.text
visit_unquoted_no_comma = visit_unquoted
def generic_visit(self, node, children):
return children
|
mvaled/sentry
|
src/sentry/grouping/fingerprinting.py
|
Python
|
bsd-3-clause
| 10,477
|
[
"VisIt"
] |
407d9f79b9398d078360c6f21fb70d2889f7ecdfef2c0bfd980ee5ac57ab9c9b
|
# Copyright (C) 2016 Christopher M. Biwer, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes for evaluating Gaussian distributions.
"""
import numpy
from scipy.special import erf, erfinv
import scipy.stats
from pycbc.distributions import bounded
class Gaussian(bounded.BoundedDist):
r"""A Gaussian distribution on the given parameters; the parameters are
independent of each other.
Bounds can be provided on each parameter, in which case the distribution
will be a truncated Gaussian distribution. The PDF of a truncated
Gaussian distribution is given by:
.. math::
p(x|a, b, \mu,\sigma) = \frac{1}{\sqrt{2 \pi \sigma^2}}\frac{e^{- \frac{\left( x - \mu \right)^2}{2 \sigma^2}}}{\Phi(b|\mu, \sigma) - \Phi(a|\mu, \sigma)},
where :math:`\mu` is the mean, :math:`\sigma^2` is the variance,
:math:`a,b` are the bounds, and :math:`\Phi` is the cumulative distribution
of an unbounded normal distribution, given by:
.. math::
\Phi(x|\mu, \sigma) = \frac{1}{2}\left[1 + \mathrm{erf}\left(\frac{x-\mu}{\sigma \sqrt{2}}\right)\right].
Note that if :math:`[a,b) = [-\infty, \infty)`, this reduces to a standard
Gaussian distribution.
Instances of this class can be called like a function. By default, logpdf
will be called, but this can be changed by setting the class's __call__
method to its pdf method.
Parameters
----------
\**params :
The keyword arguments should provide the names of parameters and
(optionally) some bounds, as either a tuple or a
`boundaries.Bounds` instance. The mean and variance of each
parameter can be provided by additional keyword arguments that have
`_mean` and `_var` adding to the parameter name. For example,
`foo=(-2,10), foo_mean=3, foo_var=2` would create a truncated Gaussian
with mean 3 and variance 2, bounded between :math:`[-2, 10)`. If no
mean or variance is provided, the distribution will have 0 mean and
unit variance. If None is provided for the bounds, the distribution
will be a normal, unbounded Gaussian (equivalent to setting the bounds
to `[-inf, inf)`).
Attributes
----------------
name : 'guassian'
The name of this distribution.
Examples
--------
Create an unbounded Gaussian distribution with zero mean and unit variance:
>>> dist = distributions.Gaussian(mass1=None)
Create a bounded Gaussian distribution on :math:`[1,10)` with a mean of 3
and a variance of 2:
>>> dist = distributions.Gaussian(mass1=(1,10), mass1_mean=3, mass1_var=2)
Create a bounded Gaussian distribution with the same parameters, but with
cyclic boundary conditions:
>>> dist = distributions.Gaussian(mass1=Bounds(1,10, cyclic=True), mass1_mean=3, mass1_var=2)
"""
name = "gaussian"
def __init__(self, **params):
# save distribution parameters as dict
# calculate the norm and exponential norm ahead of time
# and save to self._norm, self._lognorm, and self._expnorm
self._bounds = {}
self._mean = {}
self._var = {}
self._norm = {}
self._lognorm = {}
self._expnorm = {}
# pull out specified means, variance
mean_args = [p for p in params if p.endswith('_mean')]
var_args = [p for p in params if p.endswith('_var')]
self._mean = dict([[p[:-5], params.pop(p)] for p in mean_args])
self._var = dict([[p[:-4], params.pop(p)] for p in var_args])
# initialize the bounds
super(Gaussian, self).__init__(**params)
# check that there are no params in mean/var that are not in params
missing = set(self._mean.keys()) - set(params.keys())
if any(missing):
raise ValueError("means provided for unknow params {}".format(
', '.join(missing)))
missing = set(self._var.keys()) - set(params.keys())
if any(missing):
raise ValueError("vars provided for unknow params {}".format(
', '.join(missing)))
# set default mean/var for params not specified
self._mean.update(dict([[p, 0.]
for p in params if p not in self._mean]))
self._var.update(dict([[p, 1.]
for p in params if p not in self._var]))
# compute norms
for p,bnds in self._bounds.items():
sigmasq = self._var[p]
mu = self._mean[p]
a,b = bnds
invnorm = scipy.stats.norm.cdf(b, loc=mu, scale=sigmasq**0.5) \
- scipy.stats.norm.cdf(a, loc=mu, scale=sigmasq**0.5)
invnorm *= numpy.sqrt(2*numpy.pi*sigmasq)
self._norm[p] = 1./invnorm
self._lognorm[p] = numpy.log(self._norm[p])
self._expnorm[p] = -1./(2*sigmasq)
@property
def mean(self):
return self._mean
@property
def var(self):
return self._var
def _normalcdf(self, param, value):
"""The CDF of the normal distribution, without bounds."""
mu = self._mean[param]
var = self._var[param]
return 0.5*(1. + erf((value - mu)/(2*var)**0.5))
def cdf(self, param, value):
"""Returns the CDF of the given parameter value."""
a, b = self._bounds[param]
if a != -numpy.inf:
phi_a = self._normalcdf(param, a)
else:
phi_a = 0.
if b != numpy.inf:
phi_b = self._normalcdf(param, b)
else:
phi_b = 1.
phi_x = self._normalcdf(param, value)
return (phi_x - phi_a)/(phi_b - phi_a)
def _normalcdfinv(self, param, p):
"""The inverse CDF of the normal distribution, without bounds."""
mu = self._mean[param]
var = self._var[param]
return mu + (2*var)**0.5 * erfinv(2*p - 1.)
def _cdfinv_param(self, param, p):
"""Return inverse of the CDF.
"""
a, b = self._bounds[param]
if a != -numpy.inf:
phi_a = self._normalcdf(param, a)
else:
phi_a = 0.
if b != numpy.inf:
phi_b = self._normalcdf(param, b)
else:
phi_b = 1.
adjusted_p = phi_a + p * (phi_b - phi_a)
return self._normalcdfinv(param, adjusted_p)
def _pdf(self, **kwargs):
"""Returns the pdf at the given values. The keyword arguments must
contain all of parameters in self's params. Unrecognized arguments are
ignored.
"""
return numpy.exp(self._logpdf(**kwargs))
def _logpdf(self, **kwargs):
"""Returns the log of the pdf at the given values. The keyword
arguments must contain all of parameters in self's params. Unrecognized
arguments are ignored.
"""
if kwargs in self:
return sum([self._lognorm[p] +
self._expnorm[p]*(kwargs[p]-self._mean[p])**2.
for p in self._params])
else:
return -numpy.inf
def rvs(self, size=1, param=None):
"""Gives a set of random values drawn from this distribution.
Parameters
----------
size : {1, int}
The number of values to generate; default is 1.
param : {None, string}
If provided, will just return values for the given parameter.
Otherwise, returns random values for each parameter.
Returns
-------
structured array
The random values in a numpy structured array. If a param was
specified, the array will only have an element corresponding to the
given parameter. Otherwise, the array will have an element for each
parameter in self's params.
"""
if param is not None:
dtype = [(param, float)]
else:
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
for (p,_) in dtype:
sigma = numpy.sqrt(self._var[p])
mu = self._mean[p]
a,b = self._bounds[p]
arr[p][:] = scipy.stats.truncnorm.rvs((a-mu)/sigma, (b-mu)/sigma,
loc=self._mean[p], scale=sigma, size=size)
return arr
@classmethod
def from_config(cls, cp, section, variable_args):
"""Returns a Gaussian distribution based on a configuration file. The
parameters for the distribution are retrieved from the section titled
"[`section`-`variable_args`]" in the config file.
Boundary arguments should be provided in the same way as described in
`get_param_bounds_from_config`. In addition, the mean and variance of
each parameter can be specified by setting `{param}_mean` and
`{param}_var`, respectively. For example, the following would create a
truncated Gaussian distribution between 0 and 6.28 for a parameter
called `phi` with mean 3.14 and variance 0.5 that is cyclic:
.. code-block:: ini
[{section}-{tag}]
min-phi = 0
max-phi = 6.28
phi_mean = 3.14
phi_var = 0.5
cyclic =
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the distribution
options.
section : str
Name of the section in the configuration file.
variable_args : str
The names of the parameters for this distribution, separated by
`prior.VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
Returns
-------
Gaussain
A distribution instance from the pycbc.inference.prior module.
"""
return bounded.bounded_from_config(cls, cp, section, variable_args,
bounds_required=False)
__all__ = ['Gaussian']
|
ahnitz/pycbc
|
pycbc/distributions/gaussian.py
|
Python
|
gpl-3.0
| 10,663
|
[
"Gaussian"
] |
a39b9b8fadb885919b1e5a29cb1a2009b46349253268fc576d14c20a920dab7f
|
#!/home/psilocaluser/toolchainconda/bin/python
# CDS:
# need to get the right python... we have to update the line above if
# python moves (sigh). /usr/bin/env python3 wouldn't work for me
# when this is called as a crontab (even though it works when called
# as a user). Only thing I found that worked was hardwiring the path
# above.
# this program creates a list of recent github commits to the main
# branch of the main psi4 repo, from
# https://github.com/psi4/psi4/commits/master
# and creates a list of this info suitable to include in a webpage at
# psicode.org
# February 2018, C. David Sherrill
# package bs4: this is installed under the name beautifulsoup4
# it parses HTML elements and is recommended over regular expression
# parsing for HTML files
# package requests: this grabs webpages
import os, sys, requests, bs4
# were we called with an explict path? If so, write the files there
dirname = os.path.dirname(sys.argv[0])
# pull over the webpage of github commits (should already be sorted with
# most recent ones at the top, and only go back a reasonable number of
# commits)
res = requests.get('https://github.com/psi4/psi4/commits/master')
res.raise_for_status() # error out if something went wrong
# parse the webpage into elements
parsed = bs4.BeautifulSoup(res.text, "html.parser")
# get list of commit titles
xtitles = parsed.select('.commit-title')
# each titles entry has more junk than we need... often it repeats information...
# let's just get the first anchor tag wihin each of these, and within that anchor
# tag, there is a descriptor with the name 'title', e.g.,
#
# <a class="message" data-pjax="true"
# href="/psi4/psi4/commit/bc46e5fb9841e44fdf4bba118eba6458dea6b342"
# title="Merge pull request #906 from psi4/loriab-patch-1
# Fixes wB97 & wB97X SAD occupations in dft1 test">Merge pull request</a>
titles = []
commit_hrefs = []
commit_hashes_short = []
# merge commits have multi-line stuff... just take first line
for xtitle in xtitles:
first_anchor = xtitle.find('a')
titles.append((first_anchor.get_text('title').split('\n'))[0])
href_string = first_anchor.get_text('href')
commit_hrefs.append(href_string)
commit_hashes_short.append(((href_string.split('/'))[-1])[0:7])
# get the list of authors
authors = parsed.select('.commit-author')
# get the times committed
commit_times = parsed.select('.commit-author-section relative-time')
# write all articles to commits.txt
f_out = open(dirname + '/commits.txt', 'w')
f_out.write('<p>\n<ul>\n')
first_written = False
for commit_number, title in enumerate(titles):
f_out.write('<li> {} by {}<br />\n'.format(commit_times[commit_number].getText(), authors[commit_number].getText()))
f_out.write('[<a href="https://github.com{}">{}</a>] {}</li><br />\n'.format(commit_hrefs[commit_number], commit_hashes_short[commit_number], title))
# write the top (most recent) commit to its own file, most_recent_commit.txt
# let's skip merge commits because they read as less interesting
if (not(first_written) and not(title[0:18] == "Merge pull request")):
first_out = open(dirname + '/most_recent_commit.txt', 'w')
first_out.write('{} by {}:<br />\n'.format(commit_times[commit_number].getText(), authors[commit_number].getText()))
first_out.write('[<a href="https://github.com{}">{}</a>] {}\n'.format(commit_hrefs[commit_number], commit_hashes_short[commit_number], title))
first_out.close()
first_written = True
f_out.write('</ul>\n</p>\n')
f_out.close()
|
psi4/psi4meta
|
github-feed/parse_gh_commits.py
|
Python
|
gpl-2.0
| 3,546
|
[
"Psi4"
] |
5dd106ee537302e2a20c30d95f9286a8cef9beaa31060da221e564d5275b2265
|
#
# iqcalc.py -- image quality calculations on FITS data
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) 2011-2012, Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import math
import logging
import numpy
import threading
import scipy.optimize as optimize
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
from ginga.misc import Bunch
class IQCalcError(Exception):
"""Base exception for raising errors in this module."""
pass
class IQCalc(object):
def __init__(self, logger=None):
if not logger:
logger = logging.getLogger('IQCalc')
self.logger = logger
# for mutex around scipy.optimize, which seems to be non-threadsafe
self.lock = threading.RLock()
# for adjustments to background level
self.skylevel_magnification = 1.05
self.skylevel_offset = 40.0
# FWHM CALCULATION
def gaussian(self, x, p):
"""Gaussian fitting function in 1D. Makes a sine function with
amplitude determined by maxv. See calc_fwhm().
p[0]==mean, p[1]==sdev, p[2]=maxv
"""
y = (1.0 / (p[1] * numpy.sqrt(2*numpy.pi)) *
numpy.exp(-(x - p[0])**2 / (2*p[1]**2))) * p[2]
return y
def calc_fwhm(self, arr1d, medv=None, gauss_fn=None):
"""FWHM calculation on a 1D array by using least square fitting of
a gaussian function on the data. arr1d is a 1D array cut in either
X or Y direction on the object.
"""
if not gauss_fn:
gauss_fn = self.gaussian
N = len(arr1d)
X = numpy.array(range(N))
Y = arr1d
# Fitting works more reliably if we do the following
# a. subtract sky background
if medv == None:
medv = numpy.median(Y)
Y = Y - medv
maxv = Y.max()
# b. clamp to 0..max (of the sky subtracted field)
Y = Y.clip(0, maxv)
# Fit a gaussian
p0 = [0, N-1, maxv] # Inital guess
# Distance to the target function
errfunc = lambda p, x, y: gauss_fn(x, p) - y
# Least square fit to the gaussian
with self.lock:
# NOTE: without this mutex, optimize.leastsq causes a fatal error
# sometimes--it appears not to be thread safe.
# The error is:
# "SystemError: null argument to internal routine"
# "Fatal Python error: GC object already tracked"
p1, success = optimize.leastsq(errfunc, p0[:], args=(X, Y))
if not success:
raise IQCalcError("FWHM gaussian fitting failed")
mu, sdev, maxv = p1
self.logger.debug("mu=%f sdev=%f maxv=%f" % (mu, sdev, maxv))
# Now that we have the sdev from fitting, we can calculate FWHM
# (fwhm = sdev * sqrt(8*log(2)) ?)
fwhm = 2.0 * numpy.sqrt(2.0 * numpy.log(2.0)) * sdev
#return (fwhm, mu, sdev, maxv)
return (float(fwhm), float(mu), float(sdev), maxv)
def get_fwhm(self, x, y, radius, data, medv=None):
"""
"""
if medv == None:
medv = numpy.median(data)
# Get two cuts of the data, one in X and one in Y
x0, y0, xarr, yarr = self.cut_cross(x, y, radius, data)
# Calculate FWHM in each direction
fwhm_x, cx, sdx, maxx = self.calc_fwhm(xarr, medv=medv)
fwhm_y, cy, sdy, maxy = self.calc_fwhm(yarr, medv=medv)
ctr_x = x0 + cx
ctr_y = y0 + cy
self.logger.debug("fwhm_x,fwhm_y=%f,%f center=%f,%f" % (
fwhm_x, fwhm_y, ctr_x, ctr_y))
return (fwhm_x, fwhm_y, ctr_x, ctr_y, sdx, sdy, maxx, maxy)
def starsize(self, fwhm_x, deg_pix_x, fwhm_y, deg_pix_y):
cdelta1 = math.fabs(deg_pix_x)
cdelta2 = math.fabs(deg_pix_y)
fwhm = (fwhm_x * cdelta1 + fwhm_y * cdelta2) / 2.0
fwhm = fwhm * 3600.0
return fwhm
def centroid(self, data, xc, yc, radius):
x0, y0, arr = self.cut_region(self, xc, yc, radius, data)
cy, cx = ndimage.center_of_mass(arr)
return (cx, cy)
# FINDING BRIGHT PEAKS
def get_threshold(self, data, sigma=5.0):
median = numpy.median(data)
# NOTE: for this method a good default sigma is 5.0
dist = numpy.fabs(data - median).mean()
threshold = median + sigma * dist
# NOTE: for this method a good default sigma is 2.0
## std = numpy.std(data - median)
## threshold = median + sigma * std
self.logger.debug("calc threshold=%f" % (threshold))
return threshold
def find_bright_peaks(self, data, threshold=None, sigma=5, radius=5):
"""
Find bright peak candidates in (data). (threshold) specifies a
threshold value below which an object is not considered a candidate.
If threshold is blank, a default is calculated using (sigma).
(radius) defines a pixel radius for determining local maxima--if the
desired objects are larger in size, specify a larger radius.
The routine returns a list of candidate object coordinate tuples
(x, y) in data.
"""
if threshold == None:
# set threshold to default if none provided
threshold = self.get_threshold(data, sigma=sigma)
self.logger.debug("threshold defaults to %f (sigma=%f)" % (
threshold, sigma))
data_max = filters.maximum_filter(data, radius)
maxima = (data == data_max)
diff = data_max > threshold
maxima[diff == 0] = 0
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
peaks = []
for dy, dx in slices:
xc = (dx.start + dx.stop - 1)/2.0
yc = (dy.start + dy.stop - 1)/2.0
# This is only an approximate center; use FWHM or centroid
# calculation to refine further
peaks.append((xc, yc))
return peaks
def cut_region(self, x, y, radius, data):
"""Return a cut region (radius) pixels away from (x, y) in (data).
"""
n = radius
ht, wd = data.shape
x0, x1 = max(0, x-n), min(wd-1, x+n)
y0, y1 = max(0, y-n), min(ht-1, y+n)
arr = data[y0:y1+1, x0:x1+1]
return (x0, y0, arr)
def cut_cross(self, x, y, radius, data):
"""Cut two data subarrays that have a center at (x, y) and with
radius (radius) from (data). Returns the starting pixel (x0, y0)
of each cut and the respective arrays (xarr, yarr).
"""
n = radius
ht, wd = data.shape
x0, x1 = max(0, x-n), min(wd-1, x+n)
y0, y1 = max(0, y-n), min(ht-1, y+n)
xarr = data[y, x0:x1+1]
yarr = data[y0:y1+1, x]
return (x0, y0, xarr, yarr)
def brightness(self, x, y, radius, medv, data):
"""Return the brightness value found in a region (radius) pixels away
from (x, y) in (data).
"""
x0, y0, arr = self.cut_region(x, y, radius, data)
arr2 = numpy.sort(arr.flat)
idx = int(len(arr2) * 0.8)
res = arr2[idx] - medv
return float(res)
def fwhm_data(self, x, y, data, radius=15):
return self.get_fwhm(x, y, radius, data)
# EVALUATION ON A FIELD
def evaluate_peaks(self, peaks, data, bright_radius=2, fwhm_radius=15,
fwhm_method=1, cb_fn=None, ev_intr=None):
height, width = data.shape
hh = float(height) / 2.0
ht = float(height)
h4 = float(height) * 4.0
wh = float(width) / 2.0
wd = float(width)
w4 = float(width) * 4.0
# Find the median (sky/background) level
median = float(numpy.median(data))
#skylevel = median
# Old SOSS qualsize() applied this calculation to skylevel
skylevel = median * self.skylevel_magnification + self.skylevel_offset
# Form a list of objects and their characteristics
objlist = []
for x, y in peaks:
if ev_intr and ev_intr.isSet():
raise IQCalcError("Evaluation interrupted!")
# Find the fwhm in x and y
try:
if fwhm_method == 1:
(fwhm_x, fwhm_y, ctr_x, ctr_y,
sdx, sdy, maxx, maxy) = self.fwhm_data(x, y, data,
radius=fwhm_radius)
## # Average the X and Y gaussian fitting near the peak
bx = self.gaussian(round(ctr_x), (ctr_x, sdx, maxx))
by = self.gaussian(round(ctr_y), (ctr_y, sdy, maxy))
## ## bx = self.gaussian(ctr_x, (ctr_x, sdx, maxx))
## ## by = self.gaussian(ctr_y, (ctr_y, sdy, maxy))
bright = float((bx + by)/2.0)
else:
raise IQCalcError("Method (%d) not supported for fwhm calculation!" %(
fwhm_method))
except Exception, e:
# Error doing FWHM, skip this object
self.logger.debug("Error doing FWHM on object at %.2f,%.2f: %s" % (
x, y, str(e)))
continue
self.logger.debug("orig=%f,%f ctr=%f,%f fwhm=%f,%f bright=%f" % (
x, y, ctr_x, ctr_y, fwhm_x, fwhm_y, bright))
# overall measure of fwhm as a single value
#fwhm = math.sqrt(fwhm_x*fwhm_x + fwhm_y*fwhm_y)
#fwhm = (math.fabs(fwhm_x) + math.fabs(fwhm_y)) / 2.0
fwhm = (math.sqrt(fwhm_x*fwhm_x + fwhm_y*fwhm_y) *
(1.0 / math.sqrt(2.0)) )
# calculate a measure of ellipticity
elipse = math.fabs(min(fwhm_x, fwhm_y) / max(fwhm_x, fwhm_y))
# calculate a measure of distance from center of image
dx = wh - ctr_x
dy = hh - ctr_y
dx2 = dx*dx / wd / w4
dy2 = dy*dy / ht / h4
if dx2 > dy2:
pos = 1.0 - dx2
else:
pos = 1.0 - dy2
obj = Bunch.Bunch(objx=ctr_x, objy=ctr_y, pos=pos,
fwhm_x=fwhm_x, fwhm_y=fwhm_y,
fwhm=fwhm, fwhm_radius=fwhm_radius,
brightness=bright, elipse=elipse,
x=int(x), y=int(y),
skylevel=skylevel, background=median)
objlist.append(obj)
if cb_fn != None:
cb_fn(obj)
return objlist
def _compare(self, obj1, obj2):
val1 = obj1.brightness * obj1.pos/math.sqrt(obj1.fwhm)
val2 = obj2.brightness * obj2.pos/math.sqrt(obj2.fwhm)
if val1 > val2:
return -1
elif val2 > val1:
return 1
else:
return 0
def objlist_select(self, objlist, width, height,
minfwhm=2.0, maxfwhm=150.0, minelipse=0.5,
edgew=0.01):
results = []
count = 0
for obj in objlist:
count += 1
self.logger.debug("%d obj x,y=%.2f,%.2f fwhm=%.2f bright=%.2f" % (
count, obj.objx, obj.objy, obj.fwhm, obj.brightness))
# If peak has a minfwhm < fwhm < maxfwhm and the object
# is inside the frame by edgew pct
if ((minfwhm < obj.fwhm) and (obj.fwhm < maxfwhm) and
(minelipse < obj.elipse) and (width*edgew < obj.x) and
(height*edgew < obj.y) and (width*(1.0-edgew) > obj.x) and
(height*(1.0-edgew) > obj.y)):
results.append(obj)
results.sort(self._compare)
return results
def pick_field(self, data, peak_radius=5, bright_radius=2, fwhm_radius=15,
threshold=None,
minfwhm=2.0, maxfwhm=50.0, minelipse=0.5,
edgew=0.01):
height, width = data.shape
# Find the bright peaks in the image
peaks = self.find_bright_peaks(data, radius=peak_radius,
threshold=threshold)
#print "peaks=", peaks
self.logger.info("peaks=%s" % str(peaks))
if len(peaks) == 0:
raise IQCalcError("Cannot find bright peaks")
# Evaluate those peaks
objlist = self.evaluate_peaks(peaks, data,
bright_radius=bright_radius,
fwhm_radius=fwhm_radius)
if len(objlist) == 0:
raise IQCalcError("Error evaluating bright peaks")
results = self.objlist_select(objlist, width, height,
minfwhm=minfwhm, maxfwhm=maxfwhm,
minelipse=minelipse, edgew=edgew)
if len(results) == 0:
raise IQCalcError("No object matches selection criteria")
return results[0]
#END
|
astrofrog/ginga
|
ginga/iqcalc.py
|
Python
|
bsd-3-clause
| 13,125
|
[
"Gaussian"
] |
a697333c39f6768dd36d68cd85ac7a53bfbf77e47d578f6921559c9f4de24981
|
"""
Demo by G. Brammer
"""
import numpy as np
from voronoi import bin2d
import matplotlib.pyplot as plt
# Noisy gaussian
yp, xp = np.indices((100,100))
R = np.sqrt((xp-50)**2+(yp-50)**2)
sigma = 10
g = 10*np.exp(-R**2/2/sigma**2)
s = 1
noise = np.random.normal(size=R.shape)*s
pix_bin, bin_x, bin_y, bin_sn, bin_npix, scale = bin2d.bin2d(xp.flatten(), yp.flatten(), (g+noise).flatten(), g.flatten()*0+s, 20., cvt=True, wvt=False, graphs=False, quiet=False)
# Bin stats
bad = bin_sn < 5
masked = pix_bin*1
mean_bins = pix_bin*0.
median_bins = pix_bin*0.
mea = bin_x*0.
med = bin_x*0.
bx = bin_x*0.
by = bin_y*0.
bin_ids = np.unique(pix_bin)
for i in range(len(bin_ids)):
bin_mask = pix_bin == bin_ids[i]
mea[i] = (g+noise).flatten()[bin_mask].mean()
mean_bins[bin_mask] = mea[i]
med[i] = np.median((g+noise).flatten()[bin_mask])
median_bins[bin_mask] = med[i]
bx[i] = np.sum(xp.flatten()*bin_mask)/bin_mask.sum()
by[i] = np.sum(yp.flatten()*bin_mask)/bin_mask.sum()
for bin in np.where(bad)[0]:
bin_mask = pix_bin == bin
masked[bin_mask] = -99
# Plot
plt.rcParams['image.origin'] = 'lower'
fig = plt.figure(figsize=[9, 2.8])
ax = fig.add_subplot(131)
ax.imshow(pix_bin.reshape(R.shape))
ax.scatter(bin_x, bin_y, marker='.', color='k', alpha=0.1)
ax = fig.add_subplot(132)
ax.imshow(g+noise, vmin=-0.1, vmax=10, cmap='gray_r')
ax = fig.add_subplot(133)
ax.imshow(median_bins.reshape(R.shape), vmin=-0.1, vmax=10, cmap='gray_r')
for ax in fig.axes:
ax.set_xticklabels([]); ax.set_yticklabels([])
fig.tight_layout(pad=0.1)
fig.savefig('test.png')
|
lauralwatkins/voronoi
|
example/test.py
|
Python
|
bsd-2-clause
| 1,615
|
[
"Gaussian"
] |
80c33a160e643949ff26ff863678e1599f99ead64cd307d7ca35e13c330c9419
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
************************************************************
**LiquidGasLB** - class for lattice Boltzmann methods
************************************************************
The LiquidGasLB class is an extension to the integrator class of ESPResSo++.
It creates a simulation box with specified dimensions and allocates necessary
memory for a lattice Boltzmann simulation. By default we use D3Q19 lattice model
(in three dimensions and with 19-velocities on the node model).
LiquidGasLB constructor expects 5 parameters (and a system pointer).
These are: lattice size in 3D Ni, lattice spacing a, lattice timestep tau,
number of dimensions and number of velocity vectors on a lattice node.
The lattice size, Ni, is an obligatory parameter and must be set at the
beginning of the simulation.
The default lattice model is D3Q19 (numDims = 3, numVels = 19) and both lattice
spacing and timestep are set to 1.
Note that at the present stage of development we aim at D3Q19 model.
If you want to use something else, please, feel free to modify the code.
Originally, we had planned this module to operate in 3D only, so if you
need a 2D version, there is a bit more tuning involved. On the other hand,
adding different 3D lattice models (such as D3Q15 or D3Q27) is rather
straightforward.
Example
>>> lb = espressopp.integrator.LiquidGasLB(system, Ni=Int3D(20, 20, 20))
>>> # creates a cubic box of 20^3 nodes with default spacing parameters in D3Q19 model.
Example
>>> lb = espressopp.integrator.LiquidGasLB(system, Ni=Int3D(30, 20, 20), a = 0.5, tau = 0.5)
>>> # creates a box of 30*20*20 nodes with lattice spacing of 0.5 and timestep of 0.5.
>>> # The model of the lattice is D3Q19.
After initialization of the Lattice Boltzmann module, one has a possibility to
set several properties of the system:
gamma_b and gamma_s are bulk and shear gammas (default values are 0.);
gamma_odd and gamma_even are (hey-hey, surprise!) odd and even gammas (defaults 0.);
(if you are unsure what these gammas are, please refer to any lattice Boltzmann review.
In short, they control correspondent viscosities of the liquid.)
lbTemp is the temperature in lb units for setting up fluctuations (default is 0.);
Example
>>> lb = espressopp.integrator.LiquidGasLB(system, Ni=Int3D(20, 20, 20))
>>> lb.lbTemp = 0.0000005
>>> # creates a box of 20^3 nodes with lattice spacing of 1. and timestep of 1. D3Q19 model.
>>> # then the fluctuations with the temperature of 0.0000005 are initialized.
Example
>>> lb = espressopp.integrator.LiquidGasLB(system, Ni=Int3D(20, 20, 20))
>>> lb.gamma_b = 0.5
>>> lb.gamma_s = 0.5
>>> # creates a box of 20^3 nodes with lattice spacing of 1. and timestep of 1. D3Q19 model.
>>> # then the bulk and shear gammas are set to 0.5
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp import Real3D
from espressopp import Int3D
from espressopp.integrator.Extension import *
from _espressopp import integrator_LiquidGasLB
class LiquidGasLBLocal(ExtensionLocal, integrator_LiquidGasLB):
def __init__(self, system, Ni , a = 1., tau = 1., numDims = 3, numVels = 19):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_LiquidGasLB, system, Ni, a, tau, numDims, numVels)
if pmi.isController :
class LiquidGasLB(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.LiquidGasLBLocal',
pmiproperty = [ 'Ni', 'a', 'tau', 'numDims', 'numVels',
'gamma_b', 'gamma_s', 'gamma_odd', 'gamma_even', 'lbTemp']
)
|
capoe/espressopp.soap
|
src/integrator/LiquidGasLB.py
|
Python
|
gpl-3.0
| 4,569
|
[
"ESPResSo"
] |
3b48a40e5cbb3b04c933aa85a4fc09ae328648b4adb5b3d4e35e22bd7c0534f3
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3.services.transition_route_groups import (
TransitionRouteGroupsAsyncClient,
)
from google.cloud.dialogflowcx_v3.services.transition_route_groups import (
TransitionRouteGroupsClient,
)
from google.cloud.dialogflowcx_v3.services.transition_route_groups import pagers
from google.cloud.dialogflowcx_v3.services.transition_route_groups import transports
from google.cloud.dialogflowcx_v3.types import fulfillment
from google.cloud.dialogflowcx_v3.types import page
from google.cloud.dialogflowcx_v3.types import response_message
from google.cloud.dialogflowcx_v3.types import transition_route_group
from google.cloud.dialogflowcx_v3.types import (
transition_route_group as gcdc_transition_route_group,
)
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TransitionRouteGroupsClient._get_default_mtls_endpoint(None) is None
assert (
TransitionRouteGroupsClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
TransitionRouteGroupsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
TransitionRouteGroupsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
TransitionRouteGroupsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
TransitionRouteGroupsClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [TransitionRouteGroupsClient, TransitionRouteGroupsAsyncClient,]
)
def test_transition_route_groups_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.TransitionRouteGroupsGrpcTransport, "grpc"),
(transports.TransitionRouteGroupsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_transition_route_groups_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [TransitionRouteGroupsClient, TransitionRouteGroupsAsyncClient,]
)
def test_transition_route_groups_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_transition_route_groups_client_get_transport_class():
transport = TransitionRouteGroupsClient.get_transport_class()
available_transports = [
transports.TransitionRouteGroupsGrpcTransport,
]
assert transport in available_transports
transport = TransitionRouteGroupsClient.get_transport_class("grpc")
assert transport == transports.TransitionRouteGroupsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
TransitionRouteGroupsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsClient),
)
@mock.patch.object(
TransitionRouteGroupsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsAsyncClient),
)
def test_transition_route_groups_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TransitionRouteGroupsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TransitionRouteGroupsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
"true",
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
"false",
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
TransitionRouteGroupsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsClient),
)
@mock.patch.object(
TransitionRouteGroupsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_transition_route_groups_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [TransitionRouteGroupsClient, TransitionRouteGroupsAsyncClient]
)
@mock.patch.object(
TransitionRouteGroupsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsClient),
)
@mock.patch.object(
TransitionRouteGroupsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsAsyncClient),
)
def test_transition_route_groups_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_transition_route_groups_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
grpc_helpers,
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_transition_route_groups_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_transition_route_groups_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflowcx_v3.services.transition_route_groups.transports.TransitionRouteGroupsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = TransitionRouteGroupsClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
grpc_helpers,
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_transition_route_groups_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [transition_route_group.ListTransitionRouteGroupsRequest, dict,]
)
def test_list_transition_route_groups(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse(
next_page_token="next_page_token_value",
)
response = client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.ListTransitionRouteGroupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTransitionRouteGroupsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_transition_route_groups_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
client.list_transition_route_groups()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.ListTransitionRouteGroupsRequest()
@pytest.mark.asyncio
async def test_list_transition_route_groups_async(
transport: str = "grpc_asyncio",
request_type=transition_route_group.ListTransitionRouteGroupsRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.ListTransitionRouteGroupsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.ListTransitionRouteGroupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTransitionRouteGroupsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_transition_route_groups_async_from_dict():
await test_list_transition_route_groups_async(request_type=dict)
def test_list_transition_route_groups_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.ListTransitionRouteGroupsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse()
client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_transition_route_groups_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.ListTransitionRouteGroupsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.ListTransitionRouteGroupsResponse()
)
await client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_transition_route_groups_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_transition_route_groups(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_transition_route_groups_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_transition_route_groups(
transition_route_group.ListTransitionRouteGroupsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_transition_route_groups_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.ListTransitionRouteGroupsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_transition_route_groups(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_transition_route_groups_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_transition_route_groups(
transition_route_group.ListTransitionRouteGroupsRequest(),
parent="parent_value",
)
def test_list_transition_route_groups_pager(transport_name: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_transition_route_groups(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, transition_route_group.TransitionRouteGroup) for i in results
)
def test_list_transition_route_groups_pages(transport_name: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
pages = list(client.list_transition_route_groups(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_transition_route_groups_async_pager():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
async_pager = await client.list_transition_route_groups(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, transition_route_group.TransitionRouteGroup)
for i in responses
)
@pytest.mark.asyncio
async def test_list_transition_route_groups_async_pages():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_transition_route_groups(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [transition_route_group.GetTransitionRouteGroupRequest, dict,]
)
def test_get_transition_route_group(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
response = client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.GetTransitionRouteGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_get_transition_route_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
client.get_transition_route_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.GetTransitionRouteGroupRequest()
@pytest.mark.asyncio
async def test_get_transition_route_group_async(
transport: str = "grpc_asyncio",
request_type=transition_route_group.GetTransitionRouteGroupRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
)
response = await client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.GetTransitionRouteGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_get_transition_route_group_async_from_dict():
await test_get_transition_route_group_async(request_type=dict)
def test_get_transition_route_group_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.GetTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
call.return_value = transition_route_group.TransitionRouteGroup()
client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_transition_route_group_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.GetTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.TransitionRouteGroup()
)
await client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_transition_route_group_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.TransitionRouteGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_transition_route_group_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_transition_route_group(
transition_route_group.GetTransitionRouteGroupRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_transition_route_group_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.TransitionRouteGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.TransitionRouteGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_transition_route_group_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_transition_route_group(
transition_route_group.GetTransitionRouteGroupRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[gcdc_transition_route_group.CreateTransitionRouteGroupRequest, dict,],
)
def test_create_transition_route_group(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
response = client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_create_transition_route_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
client.create_transition_route_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
)
@pytest.mark.asyncio
async def test_create_transition_route_group_async(
transport: str = "grpc_asyncio",
request_type=gcdc_transition_route_group.CreateTransitionRouteGroupRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
)
response = await client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_create_transition_route_group_async_from_dict():
await test_create_transition_route_group_async(request_type=dict)
def test_create_transition_route_group_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_transition_route_group_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup()
)
await client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_transition_route_group_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_transition_route_group(
parent="parent_value",
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].transition_route_group
mock_val = gcdc_transition_route_group.TransitionRouteGroup(name="name_value")
assert arg == mock_val
def test_create_transition_route_group_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_transition_route_group(
gcdc_transition_route_group.CreateTransitionRouteGroupRequest(),
parent="parent_value",
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_transition_route_group_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_transition_route_group(
parent="parent_value",
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].transition_route_group
mock_val = gcdc_transition_route_group.TransitionRouteGroup(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_transition_route_group_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_transition_route_group(
gcdc_transition_route_group.CreateTransitionRouteGroupRequest(),
parent="parent_value",
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
)
@pytest.mark.parametrize(
"request_type",
[gcdc_transition_route_group.UpdateTransitionRouteGroupRequest, dict,],
)
def test_update_transition_route_group(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
response = client.update_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.UpdateTransitionRouteGroupRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_update_transition_route_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
client.update_transition_route_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.UpdateTransitionRouteGroupRequest()
)
@pytest.mark.asyncio
async def test_update_transition_route_group_async(
transport: str = "grpc_asyncio",
request_type=gcdc_transition_route_group.UpdateTransitionRouteGroupRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
)
response = await client.update_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.UpdateTransitionRouteGroupRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_update_transition_route_group_async_from_dict():
await test_update_transition_route_group_async(request_type=dict)
def test_update_transition_route_group_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_transition_route_group.UpdateTransitionRouteGroupRequest()
request.transition_route_group.name = "transition_route_group.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
client.update_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"transition_route_group.name=transition_route_group.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_transition_route_group_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_transition_route_group.UpdateTransitionRouteGroupRequest()
request.transition_route_group.name = "transition_route_group.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup()
)
await client.update_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"transition_route_group.name=transition_route_group.name/value",
) in kw["metadata"]
def test_update_transition_route_group_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_transition_route_group(
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].transition_route_group
mock_val = gcdc_transition_route_group.TransitionRouteGroup(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_transition_route_group_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_transition_route_group(
gcdc_transition_route_group.UpdateTransitionRouteGroupRequest(),
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_transition_route_group_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_transition_route_group(
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].transition_route_group
mock_val = gcdc_transition_route_group.TransitionRouteGroup(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_transition_route_group_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_transition_route_group(
gcdc_transition_route_group.UpdateTransitionRouteGroupRequest(),
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [transition_route_group.DeleteTransitionRouteGroupRequest, dict,]
)
def test_delete_transition_route_group(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.DeleteTransitionRouteGroupRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_transition_route_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
client.delete_transition_route_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.DeleteTransitionRouteGroupRequest()
@pytest.mark.asyncio
async def test_delete_transition_route_group_async(
transport: str = "grpc_asyncio",
request_type=transition_route_group.DeleteTransitionRouteGroupRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.DeleteTransitionRouteGroupRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_transition_route_group_async_from_dict():
await test_delete_transition_route_group_async(request_type=dict)
def test_delete_transition_route_group_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.DeleteTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
call.return_value = None
client.delete_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_transition_route_group_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.DeleteTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_transition_route_group_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_transition_route_group_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_transition_route_group(
transition_route_group.DeleteTransitionRouteGroupRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_transition_route_group_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_transition_route_group_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_transition_route_group(
transition_route_group.DeleteTransitionRouteGroupRequest(),
name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TransitionRouteGroupsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TransitionRouteGroupsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(client.transport, transports.TransitionRouteGroupsGrpcTransport,)
def test_transition_route_groups_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TransitionRouteGroupsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_transition_route_groups_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflowcx_v3.services.transition_route_groups.transports.TransitionRouteGroupsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TransitionRouteGroupsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_transition_route_groups",
"get_transition_route_group",
"create_transition_route_group",
"update_transition_route_group",
"delete_transition_route_group",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_transition_route_groups_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflowcx_v3.services.transition_route_groups.transports.TransitionRouteGroupsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TransitionRouteGroupsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_transition_route_groups_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflowcx_v3.services.transition_route_groups.transports.TransitionRouteGroupsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TransitionRouteGroupsTransport()
adc.assert_called_once()
def test_transition_route_groups_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TransitionRouteGroupsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transition_route_groups_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TransitionRouteGroupsGrpcTransport, grpc_helpers),
(transports.TransitionRouteGroupsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_transition_route_groups_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transition_route_groups_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_transition_route_groups_host_no_port():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_transition_route_groups_host_with_port():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_transition_route_groups_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TransitionRouteGroupsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_transition_route_groups_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TransitionRouteGroupsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transition_route_groups_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transition_route_groups_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_flow_path():
project = "squid"
location = "clam"
agent = "whelk"
flow = "octopus"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}".format(
project=project, location=location, agent=agent, flow=flow,
)
actual = TransitionRouteGroupsClient.flow_path(project, location, agent, flow)
assert expected == actual
def test_parse_flow_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"agent": "cuttlefish",
"flow": "mussel",
}
path = TransitionRouteGroupsClient.flow_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_flow_path(path)
assert expected == actual
def test_intent_path():
project = "winkle"
location = "nautilus"
agent = "scallop"
intent = "abalone"
expected = "projects/{project}/locations/{location}/agents/{agent}/intents/{intent}".format(
project=project, location=location, agent=agent, intent=intent,
)
actual = TransitionRouteGroupsClient.intent_path(project, location, agent, intent)
assert expected == actual
def test_parse_intent_path():
expected = {
"project": "squid",
"location": "clam",
"agent": "whelk",
"intent": "octopus",
}
path = TransitionRouteGroupsClient.intent_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_intent_path(path)
assert expected == actual
def test_page_path():
project = "oyster"
location = "nudibranch"
agent = "cuttlefish"
flow = "mussel"
page = "winkle"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/pages/{page}".format(
project=project, location=location, agent=agent, flow=flow, page=page,
)
actual = TransitionRouteGroupsClient.page_path(project, location, agent, flow, page)
assert expected == actual
def test_parse_page_path():
expected = {
"project": "nautilus",
"location": "scallop",
"agent": "abalone",
"flow": "squid",
"page": "clam",
}
path = TransitionRouteGroupsClient.page_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_page_path(path)
assert expected == actual
def test_transition_route_group_path():
project = "whelk"
location = "octopus"
agent = "oyster"
flow = "nudibranch"
transition_route_group = "cuttlefish"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/transitionRouteGroups/{transition_route_group}".format(
project=project,
location=location,
agent=agent,
flow=flow,
transition_route_group=transition_route_group,
)
actual = TransitionRouteGroupsClient.transition_route_group_path(
project, location, agent, flow, transition_route_group
)
assert expected == actual
def test_parse_transition_route_group_path():
expected = {
"project": "mussel",
"location": "winkle",
"agent": "nautilus",
"flow": "scallop",
"transition_route_group": "abalone",
}
path = TransitionRouteGroupsClient.transition_route_group_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_transition_route_group_path(path)
assert expected == actual
def test_webhook_path():
project = "squid"
location = "clam"
agent = "whelk"
webhook = "octopus"
expected = "projects/{project}/locations/{location}/agents/{agent}/webhooks/{webhook}".format(
project=project, location=location, agent=agent, webhook=webhook,
)
actual = TransitionRouteGroupsClient.webhook_path(project, location, agent, webhook)
assert expected == actual
def test_parse_webhook_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"agent": "cuttlefish",
"webhook": "mussel",
}
path = TransitionRouteGroupsClient.webhook_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_webhook_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = TransitionRouteGroupsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = TransitionRouteGroupsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = TransitionRouteGroupsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = TransitionRouteGroupsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = TransitionRouteGroupsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = TransitionRouteGroupsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = TransitionRouteGroupsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = TransitionRouteGroupsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = TransitionRouteGroupsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = TransitionRouteGroupsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.TransitionRouteGroupsTransport, "_prep_wrapped_messages"
) as prep:
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.TransitionRouteGroupsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = TransitionRouteGroupsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(TransitionRouteGroupsClient, transports.TransitionRouteGroupsGrpcTransport),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-dialogflow-cx
|
tests/unit/gapic/dialogflowcx_v3/test_transition_route_groups.py
|
Python
|
apache-2.0
| 108,247
|
[
"Octopus"
] |
f7f98474c6e8f546dc235555a0d7042c83f973acafdca83cb763dfdc7fbcb22f
|
#!/usr/bin/env python
"""
The expressions module contains classes to represent an expression. The main
class is ExpressionNode. ExpressionNode's most useful method is py_string(),
which returns a Python string representing that expression.
"""
import sys
from .ctypedescs import *
import keyword
# Right now, the objects in this module are all oriented toward evaluation.
# However, they don't have to be, since ctypes objects are mutable. For example,
# shouldn't it be possible to translate the macro:
#
# #define INCREMENT(x) ++x
#
# into Python? The resulting code should be:
#
# def INCREMENT(x):
# x.value+=1
# return x.value
#
# On the other hand, this would be a challenge to write.
class EvaluationContext(object):
"""Interface for evaluating expression nodes."""
def evaluate_identifier(self, name):
warnings.warn('Attempt to evaluate identifier "%s" failed' % name)
return 0
def evaluate_sizeof(self, type):
warnings.warn('Attempt to evaluate sizeof "%s" failed' % str(type))
return 0
def evaluate_sizeof(self, object):
warnings.warn('Attempt to evaluate sizeof object "%s" failed' % str(object))
return 0
def evaluate_parameter(self, name):
warnings.warn('Attempt to evaluate parameter "%s" failed' % name)
return 0
class ExpressionNode(object):
def __init__(self):
self.errors = []
def error(self, message, cls=None):
self.errors.append((message, cls))
def __repr__(self):
try:
string = repr(self.py_string(True))
except ValueError:
string = "<error in expression node>"
return "<%s: %s>" % (type(self).__name__, string)
def visit(self, visitor):
for error, cls in self.errors:
visitor.visit_error(error, cls)
class ConstantExpressionNode(ExpressionNode):
def __init__(self, value):
ExpressionNode.__init__(self)
self.value = value
def evaluate(self, context):
return self.value
def py_string(self, can_be_ctype):
if sys.platform != "win32" or (sys.platform == "win32" and sys.version_info >= (2, 6)):
# Windows python did not get infinity support until 2.6
if self.value == float("inf"):
return "float('inf')"
elif self.value == float("-inf"):
return "float('-inf')"
return repr(self.value)
class IdentifierExpressionNode(ExpressionNode):
def __init__(self, name):
ExpressionNode.__init__(self)
self.name = name
def evaluate(self, context):
return context.evaluate_identifier(self.name)
def visit(self, visitor):
visitor.visit_identifier(self.name)
ExpressionNode.visit(self, visitor)
def py_string(self, can_be_ctype):
# Errors will be thrown in generated code if identifier evaluates
# to a ctypes object, and can_be_ctype is False.
return self.name
class ParameterExpressionNode(ExpressionNode):
def __init__(self, name):
ExpressionNode.__init__(self)
self.name = name
def evaluate(self, context):
return context.evaluate_parameter(self.name)
def visit(self, visitor):
ExpressionNode.visit(self, visitor)
def py_string(self, can_be_ctype):
# Errors will be thrown in generated code if parameter is
# a ctypes object, and can_be_ctype is False.
return self.name
class UnaryExpressionNode(ExpressionNode):
def __init__(self, name, op, format, child_can_be_ctype, child):
ExpressionNode.__init__(self)
self.name = name
self.op = op
self.format = format
self.child_can_be_ctype = child_can_be_ctype
self.child = child
def visit(self, visitor):
self.child.visit(visitor)
ExpressionNode.visit(self, visitor)
def evaluate(self, context):
if self.op:
return self.op(self.child.evaluate(context))
else:
raise ValueError('The C operator "%s" can\'t be evaluated right ' "now" % self.name)
def py_string(self, can_be_ctype):
return self.format % self.child.py_string(self.child_can_be_ctype and can_be_ctype)
class SizeOfExpressionNode(ExpressionNode):
def __init__(self, child):
ExpressionNode.__init__(self)
self.child = child
def visit(self, visitor):
self.child.visit(visitor)
ExpressionNode.visit(self, visitor)
def evaluate(self, context):
if isinstance(self.child, CtypesType):
return context.evaluate_sizeof(self.child)
else:
return context.evaluate_sizeof_object(self.child)
def py_string(self, can_be_ctype):
if isinstance(self.child, CtypesType):
return "sizeof(%s)" % self.child.py_string()
else:
return "sizeof(%s)" % self.child.py_string(True)
class BinaryExpressionNode(ExpressionNode):
def __init__(self, name, op, format, can_be_ctype, left, right):
ExpressionNode.__init__(self)
self.name = name
self.op = op
self.format = format
self.can_be_ctype = can_be_ctype
self.left = left
self.right = right
def visit(self, visitor):
self.left.visit(visitor)
self.right.visit(visitor)
ExpressionNode.visit(self, visitor)
def evaluate(self, context):
if self.op:
return self.op(self.left.evaluate(context), self.right.evaluate(context))
else:
raise ValueError('The C operator "%s" can\'t be evaluated right ' "now" % self.name)
def py_string(self, can_be_ctype):
return self.format % (
self.left.py_string(self.can_be_ctype[0] and can_be_ctype),
self.right.py_string(self.can_be_ctype[0] and can_be_ctype),
)
class ConditionalExpressionNode(ExpressionNode):
def __init__(self, cond, yes, no):
ExpressionNode.__init__(self)
self.cond = cond
self.yes = yes
self.no = no
def visit(self, visitor):
self.cond.visit(visitor)
self.yes.visit(visitor)
self.no.visit(visitor)
ExpressionNode.visit(self, visitor)
def evaluate(self, context):
if self.cond.evaluate(context):
return self.yes.evaluate(context)
else:
return self.no.evaluate(context)
def py_string(self, can_be_ctype):
return "%s and %s or %s" % (
self.cond.py_string(True),
self.yes.py_string(can_be_ctype),
self.no.py_string(can_be_ctype),
)
class AttributeExpressionNode(ExpressionNode):
def __init__(self, op, format, base, attribute):
ExpressionNode.__init__(self)
self.op = op
self.format = format
self.base = base
self.attribute = attribute
# Attribute access will raise parse errors if you don't do this.
# Fortunately, the processor module does the same thing to
# the struct member name.
if self.attribute in keyword.kwlist:
self.attribute = "_" + self.attribute
def visit(self, visitor):
self.base.visit(visitor)
ExpressionNode.visit(self, visitor)
def evaluate(self, context):
return self.op(self.base.evalute(context), self.attribute)
def py_string(self, can_be_ctype):
if can_be_ctype:
return self.format % (self.base.py_string(can_be_ctype), self.attribute)
else:
return "(%s.value)" % (
self.format % (self.base.py_string(can_be_ctype), self.attribute)
)
class CallExpressionNode(ExpressionNode):
def __init__(self, function, arguments):
ExpressionNode.__init__(self)
self.function = function
self.arguments = arguments
def visit(self, visitor):
self.function.visit(visitor)
for arg in self.arguments:
arg.visit(visitor)
ExpressionNode.visit(self, visitor)
def evaluate(self, context):
arguments = [arg.evaluate(context) for arg in self.arguments]
return self.function.evaluate(context)(*arguments)
def py_string(self, can_be_ctype):
function = self.function.py_string(can_be_ctype)
arguments = [x.py_string(can_be_ctype) for x in self.arguments]
return "(%s (%s))" % (function, ", ".join(arguments))
class TypeCastExpressionNode(ExpressionNode):
"""
Type cast expressions as handled by ctypesgen. There is a strong
possibility that this does not support all types of casts.
"""
def __init__(self, base, ctype):
ExpressionNode.__init__(self)
self.base = base
self.ctype = ctype
def visit(self, visitor):
self.base.visit(visitor)
self.ctype.visit(visitor)
ExpressionNode.visit(self, visitor)
def evaluate(self, context):
return self.base.evaluate(context)
def py_string(self, can_be_ctype):
if isinstance(self.ctype, CtypesPointer):
return "cast({}, {})".format(self.base.py_string(True), self.ctype.py_string())
elif isinstance(self.ctype, CtypesStruct):
raise TypeError(
"conversion to non-scalar type ({}) requested from {}".format(
self.ctype, self.base.py_string(False)
)
)
else:
# In reality, this conversion should only really work if the types
# are scalar types. We won't work really hard to test if the types
# are indeed scalar.
# To be backwards compatible, we always return literals for builtin types.
# We use a function to convert to integer for c_char types since
# c_char can take integer or byte types, but the others can *only*
# take non-char arguments.
# ord_if_char must be provided by preambles
if isinstance(self.ctype, CtypesSimple) and (self.ctype.name, self.ctype.signed) == (
"char",
True,
):
ord_if_char = ""
elif isinstance(self.ctype, CtypesSimple) and self.ctype.name == "void":
# This is a very simple type cast: cast everything to (void)
# At least one macro from mingw does this
return "None"
else:
ord_if_char = "ord_if_char"
return "({to} ({ord_if_char}({frm}))).value".format(
to=self.ctype.py_string(), ord_if_char=ord_if_char, frm=self.base.py_string(False)
)
class UnsupportedExpressionNode(ExpressionNode):
def __init__(self, message):
ExpressionNode.__init__(self)
self.message = message
self.error(message, "unsupported-type")
def evaluate(self, context):
raise ValueError("Tried to evaluate an unsupported expression " "node: %s" % self.message)
def __repr__(self):
return "<UnsupportedExpressionNode>"
def py_string(self, can_be_ctype):
raise ValueError("Called py_string() an unsupported expression " "node: %s" % self.message)
|
davidjamesca/ctypesgen
|
ctypesgen/expressions.py
|
Python
|
bsd-2-clause
| 11,176
|
[
"VisIt"
] |
6db17d9efd163e042bc6854ea219ff667776e1278cefd36f67d0a25f3734fd44
|
import numpy as np
colors = 'brgmck'
def sample_clusters(amps, means, varis, N=100, D=2, bounds=None):
'''
Draws samples from a set of clusters.
*amps*: [iterable] cluster weights; fraction of points drawn from each
*means*: [iterable of D-vectors] cluster centers, eg
[ [0,0], [1,2] ] describes one cluster at the origin and one at [1,2].
*varis*: [iterable] variances of the clusters
*N*: number of samples to draw
*D*: dimensionality of the samples
*bounds*: [iterable of (lo,hi) pairs] -- re-draw any samples outside this box
Returns a list of arrays with shape (n,D), where each array is
drawn from one cluster.
'''
K = len(amps)
amps = np.array(amps)
amps /= np.sum(amps)
nk = np.random.multinomial(N, amps)
x = []
def _sample_gaussian(mean, var, N, D):
if np.isscalar(var):
# Assume isotropic
return np.random.normal(loc=mean, scale=np.sqrt(var), size=(N,D))
# Assume covariance matrix
var = np.array(var)
u,s,v = np.linalg.svd(var)
x = np.random.normal(size=(N,D))
return mean + np.dot(u, (x * s).T).T
for n,mean,var in zip(nk, means, varis):
xi = _sample_gaussian(mean, var, n, D)
if bounds is not None:
outofbounds = np.empty(len(xi), bool)
while True:
outofbounds[:] = False
for d,(lo,hi) in enumerate(bounds):
outofbounds |= np.logical_or(xi[:,d] < lo, xi[:,d] > hi)
if not np.any(outofbounds):
break
xi[outofbounds,:] = _sample_gaussian(mean, var,
np.sum(outofbounds), D)
x.append(xi)
# print [xi.shape for xi in x]
return x
def get_clusters_A():
'''
Returns parameters of an example isotropic cluster, for K-means demo.
'''
amps = [ 0.5, 0.25, 0.25 ]
means = [ (3.5,2.5) , (7.5,3.5), (4.5,6.5) ]
varis = [ 1.**2, 0.7**2, 0.7**2 ]
ax = [0, 10, 0, 8.5]
return (amps, means, varis), ax
def get_clusters_C():
'''
Returns parameters of an example isotropic cluster, for K-means demo.
This one has 90% of the mass in one component and tends to mess up K-means.
'''
amps = [ 0.9, 0.1 ]
means = [ (3.5, 4.), (6.5, 4.) ]
varis = [ 0.8**2, 0.5**2 ]
ax = [0, 10, 0, 8]
return (amps, means, varis), ax
def get_clusters_D():
'''
Returns parameters of a 2-D general Gaussian mixture model
'''
amps = [0.8, 0.2]
means = [ (3., 4.), (6.5, 4.) ]
covs = [ np.array([[1.,-0.5],[-0.5,1.]]),
np.array([[1.,0.5],[0.5,1.]]),
]
ax = [0, 10, 0, 8]
return (amps,means,covs), ax
def distance_matrix(A, B):
'''
Given two sets of data points, computes the Euclidean distances
between each pair of points.
*A*: (N, D) array of data points
*B*: (M, D) array of data points
Returns: (N, M) array of Euclidean distances between points.
'''
Na,D = A.shape
Nb,Db = B.shape
assert(Db == D)
dists = np.zeros((Na,Nb))
for a in range(Na):
dists[a,:] = np.sqrt(np.sum((A[a] - B)**2, axis=1))
return dists
# Copied and very slightly modified from scipy
def voronoi_plot_2d(vor, ax=None):
#ptp_bound = vor.points.ptp(axis=0)
ptp_bound = np.array([1000,1000])
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
ax.plot([vor.vertices[i,0], far_point[0]],
[vor.vertices[i,1], far_point[1]], 'k--')
def plot_kmeans(i, X, K, centroids, newcentroids, nearest, show=True):
import pylab as plt
plt.clf()
plotsymbol = 'o'
if nearest is None:
distances = distance_matrix(X, centroids)
nearest = np.argmin(distances, axis=1)
for i,c in enumerate(centroids):
I = np.flatnonzero(nearest == i)
plt.plot(X[I,0], X[I,1], plotsymbol, mfc=colors[i], mec='k')
ax = plt.axis()
for i,(oc,nc) in enumerate(zip(centroids, newcentroids)):
plt.plot(oc[0], oc[1], 'kx', mew=2, ms=10)
plt.plot([oc[0], nc[0]], [oc[1], nc[1]], '-', color=colors[i])
plt.plot(nc[0], nc[1], 'x', mew=2, ms=15, color=colors[i])
vor = None
if K > 2:
from scipy.spatial import Voronoi #, voronoi_plot_2d
vor = Voronoi(centroids)
voronoi_plot_2d(vor, plt.gca())
else:
mid = np.mean(centroids, axis=0)
x0,y0 = centroids[0]
x1,y1 = centroids[1]
slope = (y1-y0)/(x1-x0)
slope = -1./slope
run = 1000.
plt.plot([mid[0] - run, mid[0] + run],
[mid[1] - run*slope, mid[1] + run*slope], 'k--')
plt.axis(ax)
if show:
plt.show()
def gaussian_probability(X, mean, cov):
'''
Returns the probability of drawing data points from a Gaussian distribution
*X*: (N,D) array of data points
*mean*: (D,) vector: mean of the Gaussian
*cov*: (D,D) array: covariance of the Gaussian
Returns: (N,) vector of Gaussian probabilities
'''
D,d = cov.shape
assert(D == d)
# I haven't found a beautiful way of writing this in numpy...
mahal = np.sum(np.dot(np.linalg.inv(cov), (X - mean).T).T * (X - mean),
axis=1)
return (1./((2.*np.pi)**(D/2.) * np.sqrt(np.linalg.det(cov)))
* np.exp(-0.5 * mahal))
def plot_ellipse(mean, cov, *args, **kwargs):
import pylab as plt
u,s,v = np.linalg.svd(cov)
angle = np.linspace(0., 2.*np.pi, 200)
u1 = u[0,:]
u2 = u[1,:]
s1,s2 = np.sqrt(s)
xy = (u1[np.newaxis,:] * s1 * np.cos(angle)[:,np.newaxis] +
u2[np.newaxis,:] * s2 * np.sin(angle)[:,np.newaxis])
return plt.plot(mean[0] + xy[:,0], mean[1] + xy[:,1], *args, **kwargs)
def plot_em(step, X, K, amps, means, covs, z,
newamps, newmeans, newcovs, show=True):
import pylab as plt
from matplotlib.colors import ColorConverter
(N,D) = X.shape
if z is None:
z = np.zeros((N,K))
for k,(amp,mean,cov) in enumerate(zip(amps, means, covs)):
z[:,k] = amp * gaussian_probability(X, mean, cov)
z /= np.sum(z, axis=1)[:,np.newaxis]
plt.clf()
# snazzy color coding
cc = np.zeros((N,3))
CC = ColorConverter()
for k in range(K):
rgb = np.array(CC.to_rgb(colors[k]))
cc += z[:,k][:,np.newaxis] * rgb[np.newaxis,:]
plt.scatter(X[:,0], X[:,1], color=cc, s=9, alpha=0.5)
ax = plt.axis()
for k,(amp,mean,cov) in enumerate(zip(amps, means, covs)):
plot_ellipse(mean, cov, 'k-', lw=4)
plot_ellipse(mean, cov, 'k-', color=colors[k], lw=2)
plt.axis(ax)
if show:
plt.show()
def plot_gmm_samples(X, K, params):
import pylab as plt
nwalkers,ndim = params.shape
plt.clf()
plt.scatter(X[:,0], X[:,1], color='k', s=9, alpha=0.5)
N,D = X.shape
for i in range(nwalkers):
logamps,means,covs = unpack_gmm_params(params[i,:], K, D)
amps = np.exp(np.append(1, logamps))
amps /= np.sum(amps)
for k,(amp,mean,cov) in enumerate(zip(amps, means, covs)):
plot_ellipse(mean, cov, '-', color=colors[k], lw=1, alpha=0.2)
def unpack_gmm_params(params, K, D):
logamps = params[:K-1]
# We're going to choose to work in log-amplitudes to avoid
# dealing with negative values. (One could also avoid negative values by
# using a prior that values must be non-negative; in practice, do this by
# returning -np.inf in this function)
amps = np.exp(np.append(0, logamps))
amps /= np.sum(amps)
params = params[K-1:]
means = params[:K*D].reshape((K,D))
params = params[K*D:]
covs = np.zeros((K,D,D))
# we have to unpack the covariances carefully (triangular matrix)
tri = np.tri(D)
I = np.flatnonzero(tri)
for k in range(K):
covs[k,:,:].flat[I] = params[:len(I)]
params = params[len(I):]
# copy lower triangle to upper triangle
covs[k,:,:] += (covs[k,:,:].T * (1 - tri))
return amps, means, covs
def pack_gmm_params(amps, means, covs):
'''
Pack Gaussian Mixture Model parameters into a flat array.
We will put log-amplitudes as the parameter space.
'''
K = len(amps)
k,D = means.shape
assert(k == K)
k,d1,d2 = covs.shape
assert(k == K)
assert(d1 == D)
assert(d2 == D)
# Normalize relative to the first element (which had better not be 0!):
amps = amps / float(amps[0])
logamps = np.log(amps)
pp = [logamps[1:], means.ravel()]
# grab the lower triangular matrix elements;
# 'tri' has ones in the lower diagonal
tri = np.tri(D)
# 'I' gives the flattened matrix elements in the lower diagonal
I = np.flatnonzero(tri)
for k in range(K):
pp.append(covs[k,:,:].flat[I])
return np.hstack(pp)
def gaussian_probability_1d(x, mean, vari):
'''
Returns the probability of drawing data points from a Gaussian distribution
*X*: (N,) array of data points
*mean*: scalar: mean of the Gaussian
*vari*: scalar: variance of the Gaussian
Returns: (N,) vector of Gaussian probabilities
'''
# I haven't found a beautiful way of writing this in numpy...
mahal = (x - mean)**2 / vari
return (1./np.sqrt(2.*np.pi * vari)
* np.exp(-0.5 * mahal))
def plot_sinusoid_samples(Xi, xf, params):
import pylab as plt
nwalkers,ndim = params.shape
plt.clf()
for i,X in enumerate(Xi):
plt.scatter(X[:,0], X[:,1], color=colors[i], s=9, alpha=0.5)
for i in range(nwalkers):
fg,offset,amp = params[i,:]
ypred = offset + amp * np.sin(xf)
plt.plot(xf, ypred, '-', color=colors[0], alpha=0.2)
if __name__ == '__main__':
a = np.array([3,2,1])
means = (1+np.arange(6)).reshape(3,2)
covs = (100 + np.arange(12)).reshape(3,2,2)
P = pack_gmm_params(a, means, covs)
print 'Packed params:', P
K = 3
D = 2
a2,m2,c2 = unpack_gmm_params(P, K, D)
print 'a2', a2
print 'm2', m2
print 'c2', c2
|
dstndstn/Unsupervised
|
utils.py
|
Python
|
gpl-2.0
| 10,718
|
[
"Gaussian"
] |
3f37e2537ba325e17a20edbdbd8c7edeab44b304e72802bafab784e14080d2e8
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Philippe Proulx <eeppeliteloop@gmail.com>
# Copyright (c) 2015 Simon Marchi <simon.marchi@polymtl.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from pygdbmi import parser
try:
from termcolor import colored
except ImportError:
def colored(s, c, attrs=[]):
return s
''' TODO: convert to extend BaseVisitor
class GenerateObjectsVisitor:
def visit(self, rr_node):
res_class = rr_node.result_class
if res_class == 'done' or res_class == 'running':
rr = pygdbmi.objects.DoneResultRecord(rr_node.token,
rr_node.results)
elif res_class == 'connected':
rr = pygdbmi.objects.ConnectedResultRecord(rr_node.token)
elif res_class == 'error':
msg = None
code = None
for result in rr_node.results:
if result.variable.value == 'msg':
msg = result.value
elif result.variable.value == 'code':
code = result.value
rr = pygdbmi.objects.ErrorResultRecord(rr_node.token, msg, code)
elif res_class == 'exit':
rr = pygdbmi.objects.ExitResultRecord(rr_node.token)
return rr
'''
class BaseVisitor:
def __init__(self):
self._visit_fns = {
parser.ResultRecord: self.visit_result_record,
parser.OutOfBandRecord: self.visit_out_of_band_record,
parser.AsyncRecord: self.visit_async_record,
parser.NotifyAsyncOutput: self.visit_notify_async_record,
parser.AsyncOutput: self.visit_async_output,
parser.Result: self.visit_result,
parser.Value: self.visit_value,
parser.CString: self.visit_cstring,
parser.List: self.visit_list,
parser.Tuple: self.visit_tuple,
parser.Output: self.visit_output
}
def visit(self, node):
if type(node) in self._visit_fns:
self._visit_fns[type(node)](node)
else:
fmt = 'Visiting type {} is not implemented.'
raise NotImplementedError(fmt.format(type(node)))
def visit_output(self, node):
pass
def visit_result(self, node):
pass
def visit_result_record(self, node):
pass
def visit_out_of_band_record(self, node):
self.visit(node.record)
def visit_async_record(self, node):
self.visit(node.output)
def visit_notify_async_record(self, node):
pass
def visit_async_output(self, node):
pass
def visit_value(self, node):
pass
def visit_cstring(self, node):
pass
def visit_list(self, node):
pass
def visit_tuple(self, node):
pass
class PrettyPrintVisitor(BaseVisitor):
class Indenter:
def __init__(self):
self._level = 0
def __enter__(self):
self._level += 1
def __exit__(self, type_, value, traceback):
self._level -= 1
def __call__(self):
return ' ' * self._level
def __init__(self, outfile=sys.stdout, en_colors=False):
super(PrettyPrintVisitor, self).__init__()
self._outfile = outfile
self._indent = PrettyPrintVisitor.Indenter()
if en_colors:
self._gos = PrettyPrintVisitor._get_out_str_colors
else:
self._gos = PrettyPrintVisitor._get_out_str_no_colors
@staticmethod
def _get_out_str_no_colors(s, c, attrs=[]):
return s
@staticmethod
def _get_out_str_colors(s, c, attrs=[]):
return colored(s, c, attrs=attrs)
def _indent(self):
self._outfile.write(' ' * self._indent)
def visit_output(self, output):
for oob_record in output.oob_records:
self.visit(oob_record)
if output.result_record is not None:
self.visit(output.result_record)
def _print_results(self, results):
with self._indent:
for i, result in enumerate(results):
self._outfile.write(self._indent())
self.visit(result)
if i == len(results) - 1:
self._outfile.write('\n')
else:
self._outfile.write(',\n')
def visit_result_record(self, rr):
maybe_comma = ',' if len(rr.results) > 0 else ''
ctoken = ''
if rr.token is not None:
ctoken = self._gos('{}'.format(rr.token.value), 'red', ['bold'])
cresult_class = self._gos('^' + rr.result_class,
'green', ['bold'])
self._outfile.write('{}{}{}\n'.format(ctoken, cresult_class,
maybe_comma))
self._print_results(rr.results)
def visit_notify_async_record(self, ar):
self._outfile.write(self._gos('=', 'green', ['bold']))
self.visit(ar.output)
def visit_async_output(self, output):
self._outfile.write(self._gos(output.async_class, 'green', ['bold']))
if output.results:
self._outfile.write(',')
self._outfile.write('\n')
self._print_results(output.results)
def visit_result(self, result):
cvariable_name = self._gos(result.variable.name, 'blue')
self._outfile.write('{} = '.format(cvariable_name))
self.visit(result.value)
def visit_value(self, value):
self.visit(value.value)
def visit_cstring(self, cstring):
cquote = self._gos('"', 'yellow')
cvalue = self._gos(cstring.value, 'yellow', ['bold'])
self._outfile.write('{quot}{val}{quot}'.format(quot=cquote, val=cvalue))
def visit_list(self, list_):
if len(list_.elements) == 0:
self._outfile.write('[]')
return
self._outfile.write('[\n')
with self._indent:
for i, element in enumerate(list_.elements):
self._outfile.write(self._indent())
self.visit(element)
if i == len(list_.elements) - 1:
self._outfile.write('\n')
else:
self._outfile.write(',\n')
self._outfile.write(self._indent())
self._outfile.write(']')
def visit_tuple(self, tuple_):
if len(tuple_.elements) == 0:
self._outfile.write('{}')
return
self._outfile.write('{\n')
with self._indent:
for i, element in enumerate(tuple_.elements):
self._outfile.write(self._indent())
self.visit(element)
if i == len(tuple_.elements) - 1:
self._outfile.write('\n')
else:
self._outfile.write(',\n')
self._outfile.write(self._indent())
self._outfile.write('}')
|
simark/pygdbmi
|
pygdbmi/visitors.py
|
Python
|
mit
| 7,906
|
[
"VisIt"
] |
6d9c10c75dd867b539a4a227ae2b31bc205dffccad34b0a284f69b79bca8d01d
|
# Copyright (c) 2018 Santosh Philip
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""home of easyopen - to easily open an idf file"""
# ideally this should be in idf_helper
# pytest kept failing since the other routines set the IDD file
# pytest works here in a seperate file, like eppy/runner.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import io
from io import IOBase
from six import StringIO
import eppy
import eppy.modeleditor
import eppy.EPlusInterfaceFunctions.parse_idd
import eppy.runner.run_functions
class MissingIDDException(Exception):
pass
def cleanupversion(ver):
"""massage the version number so it matches the format of install folder"""
lst = ver.split(".")
if len(lst) == 1:
lst.extend(['0', '0'])
elif len(lst) == 2:
lst.extend(['0'])
elif len(lst) > 2:
lst = lst[:3]
lst[2] = '0' # ensure the 3rd number is 0
cleanver = '.'.join(lst)
return cleanver
def getiddfile(versionid):
"""find the IDD file of the E+ installation"""
vlist = versionid.split('.')
if len(vlist) == 1:
vlist = vlist + ['0', '0']
elif len(vlist) == 2:
vlist = vlist + ['0']
ver_str = '-'.join(vlist)
eplus_exe, _ = eppy.runner.run_functions.install_paths(ver_str)
eplusfolder = os.path.dirname(eplus_exe)
iddfile = '{}/Energy+.idd'.format(eplusfolder, )
return iddfile
def getoldiddfile(versionid):
"""find the IDD file of the E+ installation
E+ version 7 and earlier have the idd in /EnergyPlus-7-2-0/bin/Energy+.idd """
vlist = versionid.split('.')
if len(vlist) == 1:
vlist = vlist + ['0', '0']
elif len(vlist) == 2:
vlist = vlist + ['0']
ver_str = '-'.join(vlist)
eplus_exe, _ = eppy.runner.run_functions.install_paths(ver_str)
eplusfolder = os.path.dirname(eplus_exe)
iddfile = '{}/bin/Energy+.idd'.format(eplusfolder, )
return iddfile
def easyopen(fname, idd=None, epw=None):
"""automatically set idd and open idf file. Uses version from idf to set correct idd
It will work under the following circumstances:
- the IDF file should have the VERSION object.
- Needs the version of EnergyPlus installed that matches the IDF version.
- Energyplus should be installed in the default location.
Parameters
----------
fname : str, StringIO or IOBase
Filepath IDF file,
File handle of IDF file open to read
StringIO with IDF contents within
idd : str, StringIO or IOBase
This is an optional argument. easyopen will find the IDD without this arg
Filepath IDD file,
File handle of IDD file open to read
StringIO with IDD contents within
epw : str
path name to the weather file. This arg is needed to run EneryPlus from eppy.
"""
if idd:
eppy.modeleditor.IDF.setiddname(idd)
idf = eppy.modeleditor.IDF(fname, epw=epw)
return idf
# the rest of the code runs if idd=None
if isinstance(fname, (IOBase, StringIO)):
fhandle = fname
else:
fhandle = io.open(fname, 'r', encoding='latin-1') # latin-1 seems to read most things
# - get the version number from the idf file
txt = fhandle.read()
# try:
# txt = txt.decode('latin-1') # latin-1 seems to read most things
# except AttributeError:
# pass
ntxt = eppy.EPlusInterfaceFunctions.parse_idd.nocomment(txt, '!')
blocks = ntxt.split(';')
blocks = [block.strip()for block in blocks]
bblocks = [block.split(',') for block in blocks]
bblocks1 = [[item.strip() for item in block] for block in bblocks]
ver_blocks = [block for block in bblocks1
if block[0].upper() == 'VERSION']
ver_block = ver_blocks[0]
versionid = ver_block[1]
# - get the E+ folder based on version number
iddfile = getiddfile(versionid)
if os.path.exists(iddfile):
pass
# might be an old version of E+
else:
iddfile = getoldiddfile(versionid)
if os.path.exists(iddfile):
# if True:
# - set IDD and open IDF.
eppy.modeleditor.IDF.setiddname(iddfile)
if isinstance(fname, (IOBase, StringIO)):
fhandle.seek(0)
idf = eppy.modeleditor.IDF(fhandle, epw=epw)
else:
idf = eppy.modeleditor.IDF(fname, epw=epw)
return idf
else:
# - can't find IDD -> throw an exception
astr = "input idf file says E+ version {}. easyopen() cannot find the corresponding idd file '{}'"
astr = astr.format(versionid, iddfile)
raise MissingIDDException(astr)
|
jamiebull1/eppy
|
eppy/easyopen.py
|
Python
|
mit
| 4,982
|
[
"EPW"
] |
a7116287f9c6bd1bd06f3964fef255b581cd32ed49c68f3338d93ea869b9ed31
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Popularity Contest (popcontest) parser."""
import unittest
from plaso.formatters import popcontest # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import popcontest
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
__author__ = 'Francesco Picasso (francesco.picasso@gmail.com)'
class PopularityContestUnitTest(test_lib.ParserTestCase):
"""Tests for the popcontest parser."""
@shared_test_lib.skipUnlessHasTestFile([u'popcontest1.log'])
def testParse(self):
"""Tests the Parse function."""
parser_object = popcontest.PopularityContestParser()
storage_writer = self._ParseFile([u'popcontest1.log'], parser_object)
self.assertEqual(len(storage_writer.events), 22)
event_object = storage_writer.events[0]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-06-22 05:41:41')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = (
u'Session 0 start '
u'ID 12345678901234567890123456789012 [ARCH:i386 POPCONVER:1.38]')
expected_short_string = u'Session 0 start'
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
event_object = storage_writer.events[1]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ACCESS_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-06-22 07:34:42')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = u'mru [/usr/sbin/atd] package [at]'
expected_short_string = u'/usr/sbin/atd'
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
event_object = storage_writer.events[3]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ACCESS_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-06-22 07:34:43')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = (
u'mru [/usr/lib/python2.5/lib-dynload/_struct.so] '
u'package [python2.5-minimal]')
expected_short_string = u'/usr/lib/python2.5/lib-dynload/_struct.so'
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
event_object = storage_writer.events[5]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ACCESS_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-05-30 05:26:20')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = (
u'mru [/usr/bin/empathy] package [empathy] tag [RECENT-CTIME]')
expected_short_string = u'/usr/bin/empathy'
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
event_object = storage_writer.events[6]
self.assertEqual(
event_object.timestamp_desc,
eventdata.EventTimestamp.ENTRY_MODIFICATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-05-30 05:27:43')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = (
u'mru [/usr/bin/empathy] package [empathy] tag [RECENT-CTIME]')
expected_short_string = u'/usr/bin/empathy'
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
event_object = storage_writer.events[11]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ACCESS_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-05-12 07:58:33')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = u'mru [/usr/bin/orca] package [gnome-orca] tag [OLD]'
expected_short_string = u'/usr/bin/orca'
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
event_object = storage_writer.events[13]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-06-22 05:41:41')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = u'Session 0 end'
expected_short_string = expected_string
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
event_object = storage_writer.events[14]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-06-22 05:41:41')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = (
u'Session 1 start '
u'ID 12345678901234567890123456789012 [ARCH:i386 POPCONVER:1.38]')
expected_short_string = u'Session 1 start'
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
event_object = storage_writer.events[15]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ACCESS_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-06-22 07:34:42')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = u'mru [/super/cool/plasuz] package [plaso]'
expected_short_string = u'/super/cool/plasuz'
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
event_object = storage_writer.events[18]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ACCESS_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-04-06 12:25:42')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = u'mru [/super/cool/plasuz] package [miss_ctime]'
expected_short_string = u'/super/cool/plasuz'
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
event_object = storage_writer.events[19]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ACCESS_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-05-12 07:58:33')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = u'mru [/super/cóól] package [plaso] tag [WRONG_TAG]'
expected_short_string = u'/super/cóól'
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
event_object = storage_writer.events[21]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-06-22 05:41:41')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_string = u'Session 1 end'
expected_short_string = expected_string
self._TestGetMessageStrings(
event_object, expected_string, expected_short_string)
if __name__ == '__main__':
unittest.main()
|
dc3-plaso/plaso
|
tests/parsers/popcontest.py
|
Python
|
apache-2.0
| 7,240
|
[
"ORCA"
] |
652ab09478727eb3a6614c056faf74016ef7d71d31b1ca81fe6de4ac13221cfe
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
MNIST example demonstrating the use of merge layers.
"""
from neon.data import DataIterator, load_mnist
from neon.initializers import Gaussian
from neon.layers import GeneralizedCost, Affine, Sequential, MergeMultistream
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# hyperparameters
num_epochs = args.epochs
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
train_set = DataIterator([X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28))
valid_set = DataIterator([X_test, X_test], y_test, nclass=nclass, lshape=(1, 28, 28))
# weight initialization
init_norm = Gaussian(loc=0.0, scale=0.01)
# initialize model
path1 = Sequential(layers=[Affine(nout=100, init=init_norm, activation=Rectlin()),
Affine(nout=100, init=init_norm, activation=Rectlin())])
path2 = Sequential(layers=[Affine(nout=100, init=init_norm, activation=Rectlin()),
Affine(nout=100, init=init_norm, activation=Rectlin())])
layers = [MergeMultistream(layers=[path1, path2], merge="stack"),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
model = Model(layers=layers)
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
# fit and validate
optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
# configure callbacks
callbacks = Callbacks(model, train_set, eval_set=valid_set, **args.callback_args)
model.fit(train_set, cost=cost, optimizer=optimizer, num_epochs=num_epochs, callbacks=callbacks)
|
nhynes/neon
|
examples/mnist_merge.py
|
Python
|
apache-2.0
| 2,572
|
[
"Gaussian"
] |
f083e21dad9292405aacfe031312c970b2c082e2e30ab4c0940ade05d1bd9409
|
import pybedtools
import os, difflib, sys
from textwrap import dedent
from nose import with_setup
from nose.tools import assert_raises, raises
from pybedtools.helpers import BEDToolsError
from pybedtools import featurefuncs
from tfuncs import setup, teardown, testdir, test_tempdir, unwriteable
def fix(x):
"""
Replaces spaces with tabs, removes spurious newlines, and lstrip()s each
line. Makes it really easy to create BED files on the fly for testing and
checking.
"""
s = ""
for i in x.splitlines():
i = i.lstrip()
if i.endswith('\t'):
add_tab = '\t'
else:
add_tab = ''
if len(i) == 0:
continue
i = i.split()
i = '\t'.join(i) + add_tab + '\n'
s += i
return s
# ----------------------------------------------------------------------------
# Tabix support tests
# ----------------------------------------------------------------------------
def make_unwriteable():
"""
Make a directory that cannot be written to and set the pybedtools tempdir
to it. This is used to isolate "streaming" tests to ensure they do not
write to disk.
"""
if os.path.exists(unwriteable):
os.system('rm -rf %s' % unwriteable)
os.system('mkdir -p %s' % unwriteable)
os.system('chmod -w %s' % unwriteable)
pybedtools.set_tempdir(unwriteable)
def cleanup_unwriteable():
"""
Reset to normal tempdir operation....
"""
if os.path.exists(unwriteable):
os.system('rm -rf %s' % unwriteable)
pybedtools.set_tempdir(test_tempdir)
def test_interval_index():
"""
supplement to the more general test in test_cbedtools.IntervalTest.testGetItemNegative
"""
iv = pybedtools.create_interval_from_list('chr21 9719768 9721892 ALR/Alpha 1004 +'.split())
assert iv[-1] == '+'
assert iv[2:-1] == ['9721892', 'ALR/Alpha', '1004']
iv = pybedtools.create_interval_from_list(
['chr1', 'ucb', 'gene', '465', '805', '.', '+', '.',
'ID=thaliana_1_465_805;match=scaffold_801404.1;rname=thaliana_1_465_805'])
print iv[4:-3]
assert iv[4:-3] == ['805', '.']
def test_tuple_creation():
# everything as a string
t = [
("chr1", "1", "100", "feature1", "0", "+"),
("chr1", "100", "200", "feature2", "0", "+"),
("chr1", "150", "500", "feature3", "0", "-"),
("chr1", "900", "950", "feature4", "0", "+")
]
x = pybedtools.BedTool(t).saveas()
assert pybedtools.example_bedtool('a.bed') == x
t = [
("chr1", 1, 100, "feature1", 0, "+"),
("chr1", 100, 200, "feature2", 0, "+"),
("chr1", 150, 500, "feature3", 0, "-"),
("chr1", 900, 950, "feature4", 0, "+")
]
x = pybedtools.BedTool(t).saveas()
assert pybedtools.example_bedtool('a.bed') == x
t = [
("chr1", "fake", "gene", "50", "300", ".", "+", ".", "ID=gene1"),
("chr1", "fake", "mRNA", "50", "300", ".", "+", ".", "ID=mRNA1;Parent=gene1;"),
("chr1", "fake", "CDS", "75", "150", ".", "+", ".", "ID=CDS1;Parent=mRNA1;"),
("chr1", "fake", "CDS", "200", "275", ".", "+", ".", "ID=CDS2;Parent=mRNA1;"),
("chr1", "fake", "rRNA", "1200", "1275", ".", "+", ".", "ID=rRNA1;"),]
x = pybedtools.BedTool(t).saveas()
# Make sure that x has actual Intervals and not plain tuples or something
assert isinstance(x[0], pybedtools.Interval)
assert repr(x[0]) == "Interval(chr1:49-300)"
assert x[0]['ID'] == 'gene1'
def test_tabix():
a = pybedtools.example_bedtool('a.bed')
t = a.tabix()
assert t._tabixed()
results = str(t.tabix_intervals('chr1:99-200'))
print results
assert results == fix("""
chr1 1 100 feature1 0 +
chr1 100 200 feature2 0 +
chr1 150 500 feature3 0 -""")
assert str(t.tabix_intervals(a[2])) == fix("""
chr1 100 200 feature2 0 +
chr1 150 500 feature3 0 -""")
# clean up
fns = [
pybedtools.example_filename('a.bed.gz'),
pybedtools.example_filename('a.bed.gz.tbi'),
]
for fn in fns:
if os.path.exists(fn):
os.unlink(fn)
def test_tabix_intervals():
a = pybedtools.BedTool('chr1 25 30', from_string=True).tabix()
assert len(a.tabix_intervals('chr1:30-35')) == 0
assert len(a.tabix_intervals('chr1:29-30')) == 1
# make sure it works OK even if strand was provided
assert len(a.tabix_intervals('chr1:30-35[-]')) == 0
assert len(a.tabix_intervals('chr1:29-30[-]')) == 1
# ----------------------------------------------------------------------------
# Streaming and non-file BedTool tests
# ----------------------------------------------------------------------------
@with_setup(make_unwriteable, cleanup_unwriteable)
def test_stream():
"""
Stream and file-based equality, both whole-file and Interval by
Interval
"""
cleanup_unwriteable()
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
c = a.intersect(b)
# this should really not be written anywhere
d = a.intersect(b, stream=True)
assert_raises(NotImplementedError, c.__eq__, d)
d_contents = d.fn.read()
c_contents = open(c.fn).read()
assert d_contents == c_contents
# reconstruct d and check Interval-by-Interval equality
make_unwriteable()
d = a.intersect(b, stream=True)
for i,j in zip(c, d):
assert str(i) == str(j)
# Now do something similar with GFF files.
a = pybedtools.example_bedtool('a.bed')
f = pybedtools.example_bedtool('d.gff')
# file-based
cleanup_unwriteable()
g1 = f.intersect(a)
# streaming
make_unwriteable()
g2 = f.intersect(a, stream=True)
for i,j in zip(g1, g2):
assert str(i) == str(j)
# this was segfaulting at one point, just run to make sure
g3 = f.intersect(a, stream=True)
for i in iter(g3):
print i
for row in a.cut([0, 1, 2, 5], stream=True):
row[0], row[1], row[2]
assert_raises(IndexError, row.__getitem__, 4)
def test_stream_of_stream():
"""
Second-level streaming using self-intersections
"""
a = pybedtools.example_bedtool('a.bed')
# Ensure non-stream and stream equality of self-intersection
nonstream1 = a.intersect(a, u=True)
stream1 = a.intersect(a, u=True, stream=True)
nonstream1_str = str(nonstream1)
stream1_str = str(stream1)
a_str = str(a)
assert nonstream1_str == stream1_str == a_str
# Have to reconstruct stream1 cause it was consumed in the str() call
nonstream1 = a.intersect(a, u=True)
stream1 = a.intersect(a, u=True, stream=True)
nonstream2 = a.intersect(nonstream1, u=True)
stream2 = a.intersect(stream1, u=True, stream=True)
nonstream2_str = str(nonstream2)
stream2_str = str(stream2)
assert nonstream2_str == stream2_str == nonstream1_str == stream1_str == a_str
def test_generator():
"""
Equality of BedTools created from file, iter(), and generator
"""
# Test creation from file vs
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.BedTool(iter(a))
assert str(a) == str(b)
# Ensure that streams work well too
b1 = a.intersect(a, stream=True)
b2 = pybedtools.BedTool((i for i in a)).intersect(a)
assert str(b1) == str(b2)
def test_stream_of_generator():
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
b1 = a.intersect(a, stream=True)
b2 = pybedtools.BedTool((i for i in a)).intersect(a, stream=True)
sb1 = str(b1)
sb2 = str(b2)
print sb1
print sb2
assert sb1 == sb2
def test_many_files():
"""regression test to make sure many files can be created
"""
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
# Previously, IntervalFile would leak open files and would cause OSError
# (too many open files) at iteration 1010 or so.
for i in xrange(1100):
c = a.intersect(b)
def test_malformed():
"""
Malformed BED lines should raise MalformedBedLineError
"""
a = pybedtools.BedTool("""
chr1 100 200
chr1 100 90
chr1 100 200
chr1 100 200
chr1 100 200
chr1 100 200
""", from_string=True)
a_i = iter(a)
# first feature is OK
print a_i.next()
# but next one is not and should raise exception
assert_raises(pybedtools.MalformedBedLineError, a_i.next)
def test_remove_invalid():
"""
Remove_invalid() removes invalid lines, track lines, and comments
"""
a = pybedtools.BedTool("""
chr1 100 200
chr1 100 90
track name='try to break parser'
chr1 100 200
chr1 100 200
chr1 100 200
#
chr1 100 200
""", from_string=True)
b = a.remove_invalid()
cleaned = pybedtools.BedTool("""
chr1 100 200
chr1 100 200
chr1 100 200
chr1 100 200
chr1 100 200""", from_string=True)
assert_raises(NotImplementedError, b.__eq__, cleaned)
assert str(b) == str(cleaned)
def test_create_from_list_long_features():
"""
Iterator handles extra fields from long features (BED+GFF -wao intersection)
"""
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('c.gff')
c = a.intersect(b, wao=True, stream=False)
d = a.intersect(b, wao=True, stream=True)
print b.closest(a)
for i in d:
print i
def test_iterator():
"""
Iterator should ignore non-BED lines
"""
s = """
track name="test"
browser position chrX:1-100
# comment line
chrX 1 10
# more comments
track name="another"
"""
a = pybedtools.BedTool(s, from_string=True)
results = list(a)
print results[0]
assert str(results[0]) == 'chrX\t1\t10\n', results
def test_indexing():
"""
Indexing into BedTools
"""
a = pybedtools.example_bedtool('a.bed')
# This is the first line
interval = pybedtools.Interval('chr1', 1, 100, 'feature1', '0', '+')
# just to make sure
assert interval == iter(a).next()
# test slice behavior
results = list(a[0:2])
assert len(results) == 2
assert results[0] == interval
# test single-integer indexing
assert a[0] == interval
# only slices and integers allowed....
assert_raises(ValueError, a.__getitem__, 'key')
def test_repr_and_printing():
"""
Missing files and streams should say so in repr()
"""
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
c = a+b
d = a.intersect(b, stream=True)
os.unlink(c.fn)
assert 'a.bed' in repr(a)
assert 'b.bed' in repr(b)
assert 'MISSING FILE' in repr(c)
assert 'stream' in repr(d)
def test_file_type():
"""
Regression test on file_type checks
Previously file_type was creating a new IntervalFile every time it was
called; now it's cached so an IntervalFile is only created once per
BedTool.
"""
a = pybedtools.example_bedtool('a.bed')
for i in range(5000):
a.file_type
# ----------------------------------------------------------------------------
# BEDTools wrapper tests --
# See test_iter.py, which uses YAML test case definitions, for more complete
# tests of BEDTools wrapper methods.
#
# Here, we assert exception raises and more complicated things that can't be
# easily described in YAML
# ----------------------------------------------------------------------------
def test_introns():
a = pybedtools.example_bedtool('mm9.bed12')
b = pybedtools.BedTool((f for f in a if f.name == "Tcea1,uc007afj.1")).saveas()
bfeat = iter(b).next()
bi = b.introns()
# b[9] is the exonCount from teh bed12 file. there should be
# b[9] -1 introns assuming no utrs.
assert len(bi) == int(bfeat[9]) - 1, (len(bi), len(b))
def test_slop():
"""
Calling slop with no genome should raise ValueError
"""
a = pybedtools.example_bedtool('a.bed')
# Make sure it complains if no genome is set
assert_raises(ValueError, a.slop, **dict(l=100, r=1))
def test_closest():
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
r = a.closest(b)
assert len(r) == len(a)
# TODO: there's enough stuff in here that it's probably worth it to eventually
# make a TestSequenceStuff class
def test_sequence():
"""
From UCSC:
chromStart - The starting position of the feature in the chromosome or
scaffold. The first base in a chromosome is numbered 0.
chromEnd - The ending position of the feature in the chromosome or
scaffold. The chromEnd base is not included in the display of the feature.
For example, the first 100 bases of a chromosome are defined as
chromStart=0, chromEnd=100, and span the bases numbered 0-99. """
fi = os.path.join(testdir, 'test.fasta')
s = """
chrX 9 16 . . +
chrX 9 16 . . -
chrY 1 4 . . +
chrZ 28 31 . . +
"""
fasta = """
>chrX
AAAAAAAAATGCACTGAAAAAAAAAAAAAAA
>chrY
GCTACCCCCCCCCCCCCCCCCCCCCCCCCCC
>chrZ
AAAAAAAAAAAAAAAAAAAAAAAAAAAATCT
"""
a = pybedtools.BedTool(s, from_string=True)
assert_raises(ValueError, a.save_seqs, ('none',))
fout = open(fi,'w')
for line in fasta.splitlines(True):
fout.write(line.lstrip())
fout.close()
# redirect stderr for the call to .sequence(), which reports the creation
# of an index file
tmp = open(a._tmp(),'w')
orig_stderr = sys.stderr
sys.stderr = tmp
f = a.sequence(fi=fi)
sys.stderr = orig_stderr
assert f.fn == f.fn
seqs = open(f.seqfn).read()
print seqs
expected = """>chrX:9-16
TGCACTG
>chrX:9-16
TGCACTG
>chrY:1-4
CTA
>chrZ:28-31
TCT
"""
print ''.join(difflib.ndiff(seqs,expected))
print expected
assert seqs == expected
f = a.sequence(fi=fi,s=True)
seqs = open(f.seqfn).read()
expected = """>chrX:9-16(+)
TGCACTG
>chrX:9-16(-)
CAGTGCA
>chrY:1-4(+)
CTA
>chrZ:28-31(+)
TCT
"""
print seqs
print expected
print ''.join(difflib.ndiff(seqs,expected))
assert seqs == expected
f = f.save_seqs('deleteme.fa')
assert open('deleteme.fa').read() == expected
assert f.print_sequence() == expected
os.unlink('deleteme.fa')
fresh_a = pybedtools.BedTool(s, from_string=True)
assert fresh_a == f
os.unlink(fi)
if os.path.exists(fi+'.fai'):
os.unlink(fi+'.fai')
# ----------------------------------------------------------------------------
# Operator tests
# ----------------------------------------------------------------------------
def test_add_subtract():
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
assert a.intersect(b,u=True) == (a+b)
assert a.intersect(b,v=True) == (a-b)
def test_subset():
a = pybedtools.example_bedtool('a.bed')
import random
random.seed(1)
s = list(a.random_subset(1).features())
assert len(s) == 1
assert isinstance(s[0], pybedtools.Interval)
s2 = list(a.random_subset(len(a)).features())
print len(s2)
assert len(s2) == len(a)
def test_eq():
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('a.bed')
# BedTool to BedTool
assert a == b
# BedTool to string
s= """chr1 1 100 feature1 0 +
chr1 100 200 feature2 0 +
chr1 150 500 feature3 0 -
chr1 900 950 feature4 0 +
"""
assert a == s
# Test not equa on bedtool
b = pybedtools.example_bedtool('b.bed')
assert b != a
# and string
assert a != "blah"
# Don't allow testing equality on streams
c = a.intersect(b, stream=True)
d = a.intersect(b)
assert_raises(NotImplementedError, c.__eq__, d)
assert_raises(NotImplementedError, d.__eq__, c)
# Test it on iterator, too....
e = pybedtools.BedTool((i for i in a))
assert_raises(NotImplementedError, e.__eq__, a)
assert_raises(NotImplementedError, a.__eq__, e)
# Make sure that if we force the iterator to be consumed, it is in fact
# equal
s = str(e)
print str(a).splitlines(True)
print s.splitlines(True)
assert a == s
def test_hash():
a = pybedtools.example_bedtool('a.bed')
d = {}
for i in a:
d[i] = 1
# ----------------------------------------------------------------------------
# Other BedTool method tests
# ----------------------------------------------------------------------------
def test_count_bed():
a = pybedtools.example_bedtool('a.bed')
assert a.count() == 4
assert len(a) == 4
def test_feature_centers():
from pybedtools import featurefuncs
a = pybedtools.BedTool("""
chr1 1 100
chr5 3000 4000
""", from_string=True)
b = a.each(featurefuncs.center, 1)
results = list(b.features())
print results
assert results[0].start == 50
assert results[0].stop == 51
assert results[0].chrom == 'chr1'
assert results[1].start == 3500
assert results[1].stop == 3501
assert results[1].chrom == 'chr5'
def test_bedtool_creation():
# make sure we can make a bedtool from a bedtool and that it points to the
# same file
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.BedTool(a)
assert b.fn == a.fn
assert_raises(ValueError, pybedtools.BedTool,'nonexistent.bed')
# note that *s* has both tabs and spaces....
s = """
chr1 1 100 feature1 0 +
chr1 100 200 feature2 0 +
chr1 150 500 feature3 0 -
chr1 900 950 feature4 0 +
"""
from_string = pybedtools.BedTool(s, from_string=True)
# difflib used here to show a bug where a newline was included when using
# from_string
print ''.join(difflib.ndiff(str(from_string), str(a)))
assert str(from_string) == str(a)
def test_special_methods():
# note that *s* has both tabs and spaces....
s = """
chr1 1 100 feature1 0 +
chr1 100 200 feature2 0 +
chr1 150 500 feature3 0 -
chr1 900 950 feature4 0 +
"""
from_string = pybedtools.BedTool(s, from_string=True)
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
assert from_string == a
assert from_string != b
assert not from_string == b
assert not from_string != a
def test_field_count():
a = pybedtools.example_bedtool('a.bed')
assert a.field_count() == 6
def test_repr_and_printing():
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
c = a+b
os.unlink(c.fn)
assert 'a.bed' in repr(a)
assert 'b.bed' in repr(b)
assert 'MISSING FILE' in repr(c)
print a.head(1)
def test_cut():
a = pybedtools.example_bedtool('a.bed')
c = a.cut([0, 1, 2, 4])
assert c.field_count() == 4, c
def test_filter():
a = pybedtools.example_bedtool('a.bed')
b = a.filter(lambda f: f.length < 100 and f.length > 0)
assert len(b) == 2
def test_random_intersection():
# TODO:
return
N = 4
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
li = list(a.randomintersection(b, N))
assert len(li) == N, li
def test_cat():
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
b_fn = pybedtools.example_filename('b.bed')
assert a.cat(b) == a.cat(b_fn)
expected = fix("""
chr1 1 500
chr1 800 950
""")
assert a.cat(b) == expected
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
c = a.cat(b, postmerge=False)
assert len(a) + len(b) == len(c), (len(a), len(b), len(c))
print c
assert c == fix("""
chr1 1 100 feature1 0 +
chr1 100 200 feature2 0 +
chr1 150 500 feature3 0 -
chr1 900 950 feature4 0 +
chr1 155 200 feature5 0 -
chr1 800 901 feature6 0 +
""")
def test_randomstats():
chromsizes = {'chr1':(1,1000)}
a = pybedtools.example_bedtool('a.bed').set_chromsizes(chromsizes)
b = pybedtools.example_bedtool('b.bed')
try:
results = a.randomstats(b, 100, debug=True)
assert results['actual'] == 3
assert results['median randomized'] == 2.0
assert results['percentile'] == 89.5
except ImportError:
# allow doctests to pass if SciPy not installed
sys.stderr.write('SciPy not installed, so not testing '
'BedTool.randomstats().')
# ----------------------------------------------------------------------------
# Interval tests
# ----------------------------------------------------------------------------
def test_gff_stuff():
s = """
chr1 fake gene 1 100 . + . ID=gene1
chr1 fake mRNA 1 100 . + . Name=mRNA1
chr1 fake CDS 50 90 . + . other=nothing
"""
d = pybedtools.BedTool(s, from_string=True)
f1, f2, f3 = d.features()
assert f1.name == 'gene1', f1.name
assert f2.name == 'mRNA1', f2.name
assert f3.name is None, f3.name
def test_name():
c = iter(pybedtools.example_bedtool('c.gff')).next()
assert c.name == "thaliana_1_465_805" , c.name
# ----------------------------------------------------------------------------
# Other non-BedTool tests
# ----------------------------------------------------------------------------
def test_flatten():
from pybedtools.helpers import _flatten_list
result = _flatten_list([[1,2,3,0,[0,5],9],[100]])
print result
assert result == [1, 2, 3, 0, 0, 5, 9, 100]
def test_history_step():
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
c = a.intersect(b)
d = c.subtract(a)
tag = c.history[0].result_tag
assert pybedtools.find_tagged(tag) == c
assert_raises(ValueError, pybedtools.find_tagged, 'nonexistent')
print d.history
d.delete_temporary_history(ask=True, raw_input_func=lambda x: 'n')
assert os.path.exists(a.fn)
assert os.path.exists(b.fn)
assert os.path.exists(c.fn)
assert os.path.exists(d.fn)
d.delete_temporary_history(ask=True, raw_input_func=lambda x: 'Yes')
assert os.path.exists(a.fn)
assert os.path.exists(b.fn)
assert not os.path.exists(c.fn) # this is the only thing that should change
assert os.path.exists(d.fn)
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
c = a.intersect(b)
d = c.subtract(a)
d.delete_temporary_history(ask=False)
assert os.path.exists(a.fn)
assert os.path.exists(b.fn)
assert not os.path.exists(c.fn) # this is the only thing that should change
assert os.path.exists(d.fn)
def test_kwargs():
a = pybedtools.example_bedtool('a.bed')
b = a.intersect(a, s=False)
c = a.intersect(a)
assert str(b) == str(c)
# ----------------------------------------------------------------------------
# gzip support tests
# ----------------------------------------------------------------------------
def test_gzip():
# make new gzipped files on the fly
agz = pybedtools.BedTool._tmp()
bgz = pybedtools.BedTool._tmp()
os.system('gzip -c %s > %s' % (pybedtools.example_filename('a.bed'), agz))
os.system('gzip -c %s > %s' % (pybedtools.example_filename('b.bed'), bgz))
agz = pybedtools.BedTool(agz)
bgz = pybedtools.BedTool(bgz)
assert agz.file_type == bgz.file_type == 'bed'
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
assert a.intersect(b) == agz.intersect(bgz) == a.intersect(bgz) == agz.intersect(b)
# ----------------------------------------------------------------------------
# BAM support tests
# ----------------------------------------------------------------------------
def test_bam_bedtool_creation():
x = pybedtools.example_bedtool('x.bam')
a = pybedtools.example_bedtool('a.bed')
assert x._isbam
assert not a._isbam
def test_print_abam():
x = pybedtools.example_bedtool('gdc.bam')
expected = fix("""
None 0 chr2L 11 255 5M * 0 0 CGACA IIIII NM:i:0 NH:i:1
None 16 chr2L 71 255 5M * 0 0 TTCTC IIIII NM:i:0 NH:i:1
None 16 chr2L 141 255 5M * 0 0 CACCA IIIII NM:i:0 NH:i:1
None 16 chr2L 151 255 5M * 0 0 GTTCA IIIII NM:i:0 NH:i:1
None 0 chr2L 211 255 5M * 0 0 AAATA IIIII NM:i:0 NH:i:1
None 0 chr2L 71 255 5M * 0 0 GAGAA IIIII NM:i:0 NH:i:1
None 0 chr2L 141 255 5M * 0 0 TGGTG IIIII NM:i:0 NH:i:1
None 0 chr2L 161 255 5M * 0 0 GATAA IIIII NM:i:0 NH:i:1""")
print 'x:'
print x
print 'expected:'
print expected
assert x == expected
def test_bam_iter():
x = pybedtools.example_bedtool('gdc.bam')
s = 'None 0 chr2L 11 255 5M * 0 0 CGACA IIIII NM:i:0 NH:i:1\n'
assert str(x[0]) == str(iter(x).next()) == s
def test_bam_stream_bed():
x = pybedtools.example_bedtool('gdc.bam')
b = pybedtools.example_bedtool('gdc.gff')
c = x.intersect(b, u=True, bed=True, stream=True)
str_c = str(c)
expected = fix("""
chr2L 70 75 None 255 - 70 75 0,0,0 1 5, 0,
chr2L 140 145 None 255 - 140 145 0,0,0 1 5, 0,
chr2L 150 155 None 255 - 150 155 0,0,0 1 5, 0,
chr2L 210 215 None 255 + 210 215 0,0,0 1 5, 0,
chr2L 70 75 None 255 + 70 75 0,0,0 1 5, 0,
chr2L 140 145 None 255 + 140 145 0,0,0 1 5, 0,
chr2L 160 165 None 255 + 160 165 0,0,0 1 5, 0,
""")
assert str_c == expected
def test_bam_stream_bam():
x = pybedtools.example_bedtool('gdc.bam')
b = pybedtools.example_bedtool('gdc.gff')
c = x.intersect(b, u=True, stream=True)
expected = fix("""
None 16 chr2L 71 255 5M * 0 0 TTCTC IIIII NM:i:0 NH:i:1
None 16 chr2L 141 255 5M * 0 0 CACCA IIIII NM:i:0 NH:i:1
None 16 chr2L 151 255 5M * 0 0 GTTCA IIIII NM:i:0 NH:i:1
None 0 chr2L 211 255 5M * 0 0 AAATA IIIII NM:i:0 NH:i:1
None 0 chr2L 71 255 5M * 0 0 GAGAA IIIII NM:i:0 NH:i:1
None 0 chr2L 141 255 5M * 0 0 TGGTG IIIII NM:i:0 NH:i:1
None 0 chr2L 161 255 5M * 0 0 GATAA IIIII NM:i:0 NH:i:1""")
assert str(c) == expected
def test_bam_stream_bam_stream():
x = pybedtools.example_bedtool('gdc.bam')
b = pybedtools.example_bedtool('gdc.gff')
c = x.intersect(b, u=True, stream=True)
expected = fix("""
None 16 chr2L 71 255 5M * 0 0 TTCTC IIIII NM:i:0 NH:i:1
None 16 chr2L 141 255 5M * 0 0 CACCA IIIII NM:i:0 NH:i:1
None 16 chr2L 151 255 5M * 0 0 GTTCA IIIII NM:i:0 NH:i:1
None 0 chr2L 211 255 5M * 0 0 AAATA IIIII NM:i:0 NH:i:1
None 0 chr2L 71 255 5M * 0 0 GAGAA IIIII NM:i:0 NH:i:1
None 0 chr2L 141 255 5M * 0 0 TGGTG IIIII NM:i:0 NH:i:1
None 0 chr2L 161 255 5M * 0 0 GATAA IIIII NM:i:0 NH:i:1""")
d = c.intersect(b)
print d
assert str(d) == expected
def test_bam_interval():
x = pybedtools.example_bedtool('x.bam')
assert x[0].chrom == 'chr2L'
assert x[0].start == 9329L
assert x[0][3] == '9330'
assert x[0].stop == 9365L
assert len(x[0][9]) == len(x[0]) == 36
def test_bam_regression():
# Regression test: with extra fields, the first item in x.bam was being
# parsed as gff (cause not ==13 fields). This does a check to prevent that
# from happening again.
x = pybedtools.example_bedtool('x.bam')
assert x[0].file_type == 'sam'
assert x[0].chrom == 'chr2L'
def test_sam_filetype():
# file_type was segfaulting cause IntervalFile couldn't parse SAM
a = pybedtools.example_bedtool('gdc.bam')
b = pybedtools.BedTool(i for i in a).saveas()
assert b.file_type == 'sam'
def test_bam_to_sam_to_bam():
a = pybedtools.example_bedtool('gdc.bam')
orig = str(a)
assert a.file_type == 'bam'
# saveas should maintain BAM format
b = a.saveas()
assert b.file_type == 'bam'
# Converting to string gets SAM format
assert str(b) == orig
# b is a bam; to_bam should return a bam
c = b.to_bam(genome='dm3')
assert c.file_type == 'bam'
# in fact, it should be the same file:
assert c.fn == b.fn
# In order to get SAM format, need to print to file.
d = open(pybedtools.BedTool._tmp(), 'w')
d.write(str(c))
d.close()
d = pybedtools.BedTool(d.name)
assert d.file_type == 'sam'
e = d.to_bam(genome='dm3')
assert e.file_type == 'bam'
# everybody should be the same
assert a == b == c == d == e
def test_bam_filetype():
# regression test -- this was segfaulting before because IntervalFile
# couldn't parse SAM
a = pybedtools.example_bedtool('gdc.bam')
b = pybedtools.example_bedtool('gdc.gff')
c = a.intersect(b)
assert c.file_type == 'bam'
def test_bam_header():
a = pybedtools.example_bedtool('gdc.bam')
b = pybedtools.example_bedtool('gdc.gff')
c = a.intersect(b)
print c._bam_header
assert c._bam_header == "@SQ SN:chr2L LN:23011544\n"
def test_output_kwarg():
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
c = a.intersect(b)
d = a.intersect(b, output='deleteme.bed')
assert c == d
os.unlink('deleteme.bed')
def test_copy():
a = pybedtools.example_bedtool('a.bed')
x = a[0]
# Before adding the __copy__ method to Interval class, making a copy would
# hang and then segfault
import copy
y = copy.copy(x)
assert y.start == x.start
assert y.stop == x.stop
assert y.chrom == x.chrom
assert y.name == x.name
assert y.fields == x.fields
assert y.file_type == x.file_type == 'bed'
# Make sure it's a real copy (changing something in y doesn't change
# something in x)
y.start += 1
assert y.start == x.start + 1
def test_pickleable():
interval = pybedtools.create_interval_from_list(
['chr1', '1', '100', 'asdf'])
fn = pybedtools.BedTool._tmp()
import pickle
out = open(fn, 'w')
pickle.dump(interval, out)
out.close()
new_interval = pickle.load(open(fn))
assert str(interval) == str(new_interval)
interval = pybedtools.create_interval_from_list(
['chr1', '1', '100'])
fn = pybedtools.BedTool._tmp()
import pickle
out = open(fn, 'w')
pickle.dump(interval, out)
out.close()
new_interval = pickle.load(open(fn))
assert str(interval) == str(new_interval)
interval = pybedtools.create_interval_from_list(
"chr2L . UTR 41 70 0 + . ID=mRNA:xs2:UTR:41-70;Parent=mRNA:xs2;".split('\t'))
fn = pybedtools.BedTool._tmp()
import pickle
out = open(fn, 'w')
pickle.dump(interval, out)
out.close()
new_interval = pickle.load(open(fn))
assert str(interval) == str(new_interval)
def test_split():
a = pybedtools.example_bedtool('a.bed')
def func(x, dist1, dist2):
"shift the features around"
newstart = x.start + dist1
newstop = x.stop + dist1
x.start = newstart
x.stop = newstop
yield x
x.start -= dist2
x.stop -= dist2
yield x
result = str(a.split(func, 1000, 100))
assert result == fix("""
chr1 1001 1100 feature1 0 +
chr1 901 1000 feature1 0 +
chr1 1100 1200 feature2 0 +
chr1 1000 1100 feature2 0 +
chr1 1150 1500 feature3 0 -
chr1 1050 1400 feature3 0 -
chr1 1900 1950 feature4 0 +
chr1 1800 1850 feature4 0 +
""")
def test_additional_args():
a = pybedtools.example_bedtool('a.bed')
expected = fix("""
chr1 1 2 1
chr1 100 101 1
chr1 900 901 1""")
assert a.genome_coverage(bg=True, strand='+', g=dict(chr1=(1, 1000)), additional_args='-5') == expected
def test_tss():
a = pybedtools.example_bedtool('a.bed')
results = str(a.each(featurefuncs.TSS, upstream=3, downstream=5, add_to_name='_TSS'))
print results
assert results == fix("""
chr1 0 6 feature1_TSS 0 +
chr1 97 105 feature2_TSS 0 +
chr1 495 503 feature3_TSS 0 -
chr1 897 905 feature4_TSS 0 +
""")
def test_extend_fields():
a = pybedtools.example_bedtool('a.bed')
results = str(a.each(featurefuncs.extend_fields, 8))
print results
assert results == fix("""
chr1 1 100 feature1 0 + 1 100
chr1 100 200 feature2 0 + 100 200
chr1 150 500 feature3 0 - 150 500
chr1 900 950 feature4 0 + 900 950
""")
def test_gff2bed():
a = pybedtools.example_bedtool('d.gff')
results = str(a.each(featurefuncs.gff2bed, name_field='Parent'))
assert results == fix("""
chr1 49 300 . . +
chr1 49 300 gene1 . +
chr1 74 150 mRNA1 . +
chr1 199 275 mRNA1 . +
chr1 1199 1275 . . +""")
results = str(a.each(featurefuncs.gff2bed))
assert results == fix("""
chr1 49 300 gene1 . +
chr1 49 300 mRNA1 . +
chr1 74 150 CDS1 . +
chr1 199 275 CDS2 . +
chr1 1199 1275 rRNA1 . +
""")
results = str(a.each(featurefuncs.gff2bed, name_field="nonexistent"))
assert results == fix("""
chr1 49 300 . . +
chr1 49 300 . . +
chr1 74 150 . . +
chr1 199 275 . . +
chr1 1199 1275 . . +
""")
results = str(a.each(featurefuncs.gff2bed, name_field=1))
print results
assert results == fix("""
chr1 49 300 fake . +
chr1 49 300 fake . +
chr1 74 150 fake . +
chr1 199 275 fake . +
chr1 1199 1275 fake . +""")
def test_add_color():
try:
from matplotlib import cm
except ImportError:
print "matplotlib not installed; skipping test_add_color"
return
def modify_scores(f):
fields = f.fields
fields[4] = str(f[2])
return pybedtools.create_interval_from_list(fields)
a = pybedtools.example_bedtool('a.bed')
a = a.each(modify_scores).saveas()
cmap = cm.jet
norm = a.colormap_normalize()
results = str(a.each(featurefuncs.add_color, cmap=cmap, norm=norm))
print results
assert results == fix("""
chr1 1 100 feature1 100 + 1 100 0,0,127
chr1 100 200 feature2 200 + 100 200 0,0,255
chr1 150 500 feature3 500 - 150 500 99,255,147
chr1 900 950 feature4 950 + 900 950 127,0,0""")
#------------------------------------------------------------------------------
# Tests for IntervalFile, as accessed by BedTool objects
#------------------------------------------------------------------------------
def test_any_hits():
a = pybedtools.example_bedtool('a.bed')
assert 1 == a.any_hits(pybedtools.create_interval_from_list(
['chr1', '900', '905', '.', '.', '-']))
assert 0 == a.any_hits(pybedtools.create_interval_from_list(
['chr1', '900', '905', '.', '.', '-']), same_strand=True)
assert 0 == a.any_hits(pybedtools.create_interval_from_list(
['chr1', '8000', '9000', '.', '.', '-']))
def test_all_hits():
a = pybedtools.example_bedtool('a.bed')
assert [a[2], a[3]] == a.all_hits(pybedtools.create_interval_from_list(
['chr1', '450', '905', '.', '.', '-']))
assert [a[2]] == a.all_hits(pybedtools.create_interval_from_list(
['chr1', '450', '905', '.', '.', '-']), same_strand=True)
def test_count_hits():
a = pybedtools.example_bedtool('a.bed')
assert len(a.all_hits(pybedtools.create_interval_from_list(
['chr1', '450', '905', '.', '.', '-']))) == 2
assert len(a.all_hits(pybedtools.create_interval_from_list(
['chr1', '450', '905', '.', '.', '-']), same_strand=True)) == 1
def test_multi_intersect():
# Need to test here because "-i" is not a single other-bedtool like other
# "-i" BEDTools programs, and this throws off the iter testing.
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
x = pybedtools.BedTool()
assert x.multi_intersect(i=[a.fn, b.fn]) == fix("""
chr1 1 155 1 1 1 0
chr1 155 200 2 1,2 1 1
chr1 200 500 1 1 1 0
chr1 800 900 1 2 0 1
chr1 900 901 2 1,2 1 1
chr1 901 950 1 1 1 0""")
assert x.multi_intersect(i=[a.fn, b.fn], cluster=True) == fix("""
chr1 155 200 2 1,2 1 1
chr1 900 901 2 1,2 1 1""")
def test_union_bedgraphs():
# from unionBedGraphs -examples...
a = pybedtools.BedTool("""
chr1 1000 1500 10
chr1 2000 2100 20
""", from_string=True)
b = pybedtools.BedTool("""
chr1 900 1600 60
chr1 1700 2050 50
""", from_string=True)
c = pybedtools.BedTool("""
chr1 1980 2070 80
chr1 2090 2100 20
""", from_string=True)
x = pybedtools.BedTool()
result = x.union_bedgraphs(i=[a.fn, b.fn, c.fn])
assert result == fix("""
chr1 900 1000 0 60 0
chr1 1000 1500 10 60 0
chr1 1500 1600 0 60 0
chr1 1700 1980 0 50 0
chr1 1980 2000 0 50 80
chr1 2000 2050 20 50 80
chr1 2050 2070 20 0 80
chr1 2070 2090 20 0 0
chr1 2090 2100 20 0 20
""")
def test_window_maker():
x = pybedtools.BedTool()
a = pybedtools.example_bedtool('a.bed')
result = x.window_maker(b=a.fn, w=50)
print result
assert result == fix("""
chr1 1 51
chr1 51 100
chr1 100 150
chr1 150 200
chr1 150 200
chr1 200 250
chr1 250 300
chr1 300 350
chr1 350 400
chr1 400 450
chr1 450 500
chr1 900 950
""")
x = pybedtools.BedTool()
z = x.window_maker(genome='hg19', w=100000)
assert str(z[0]) == "chr1\t0\t100000\n"
assert str(z[10000]) == 'chr16\t20800000\t20900000\n'
def test_random():
a = pybedtools.BedTool()
result = a.random(l=10, n=10, genome='hg19', seed=1)
assert result == fix("""
chr3 11945098 11945108 1 10 +
chr15 84985693 84985703 2 10 -
chr2 62691196 62691206 3 10 -
chr18 18871346 18871356 4 10 +
chr9 133374407 133374417 5 10 +
chr9 48958184 48958194 6 10 +
chrY 41568406 41568416 7 10 -
chr4 16579517 16579527 8 10 +
chr1 76589882 76589892 9 10 -
chr3 55995799 55995809 10 10 -
""")
def test_links():
# have to be careful about the path, since it is embedded in the HTML
# output -- so make a copy of the example file, and delete when done.
os.system('cp %s a.links.bed' % pybedtools.example_filename('a.bed'))
a = pybedtools.BedTool('a.links.bed')
a = a.links()
exp = open(pybedtools.example_filename('a.links.html')).read()
obs = open(a.links_html).read()
print exp
print obs
assert exp == obs
os.unlink('a.links.bed')
def test_igv():
a = pybedtools.example_bedtool('a.bed')
a = a.igv()
obs = open(a.igv_script).read()
exp = open(pybedtools.example_filename('a.igv_script')).read()
assert obs == exp
def test_bam_to_fastq():
x = pybedtools.example_bedtool('small.bam')
tmpfn = pybedtools.BedTool._tmp()
y = x.bam_to_fastq(fq=tmpfn)
assert open(y.fastq).read() == open(pybedtools.example_filename('small.fastq')).read()
def test_gtf_gff_attrs():
# smoke test.
#
# this has always worked:
gff = ["chr1","fake","mRNA","51", "300",".", "+",".","ID=mRNA1;Parent=gene1;"]
gff = pybedtools.create_interval_from_list(gff)
gff.attrs
# this previously failed because of the "=" in the attr string.
gff = ['scaffold_52', 'Cufflinks', 'exon', '5478', '5568', '.', '+', '.', 'gene_id "XLOC_017766"; transcript_id "TCONS_00033979"; exon_number "6"; gene_name "g18412"; oId "PAC:26897502"; nearest_ref "PAC:26897502"; class_code "="; tss_id "TSS21210"; p_id "P18851";']
gff = pybedtools.create_interval_from_list(gff)
gff.attrs
# TODO: is it necessary to support GFF vs GTF detection in this case:
#
# GFF:
# class_code=" "
#
# GTF:
# class_code "="
def test_jaccard():
x = pybedtools.example_bedtool('a.bed')
results = x.jaccard(pybedtools.example_bedtool('b.bed'))
assert results == {'intersection': 46, 'union-intersection': 649, 'jaccard': 0.0708783, 'n_intersections': 2}, results
results2 = x.jaccard(pybedtools.example_bedtool('b.bed'), stream=True)
assert results == results2, results2
def test_reldist():
x = pybedtools.example_bedtool('a.bed')
results = x.reldist(pybedtools.example_bedtool('b.bed'))
assert results == {'reldist': [0.15, 0.21, 0.28], 'count': [1, 1, 1], 'total': [3, 3, 3], 'fraction': [0.333, 0.333, 0.333]}, results
results2 = x.reldist(pybedtools.example_bedtool('b.bed'), detail=True)
print results2
assert results2 == fix("""
chr1 1 100 feature1 0 + 0.282
chr1 100 200 feature2 0 + 0.153
chr1 150 500 feature3 0 - 0.220""")
def test_remote_bam():
x = pybedtools.BedTool(
('ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/data/HG00096/'
'exome_alignment/HG00096.chrom11.ILLUMINA.bwa.GBR.exome.'
'20120522.bam'),
remote=True)
def gen():
for i, f in enumerate(x.bam_to_bed(stream=True)):
yield f
if i == 9:
break
results = pybedtools.BedTool(gen()).saveas()
assert results == fix("""
11 60636 60736 SRR081241.13799221/1 0 +
11 60674 60774 SRR077487.5548889/1 0 +
11 60684 60784 SRR077487.12853301/1 0 +
11 60789 60889 SRR077487.5548889/2 0 -
11 60950 61050 SRR077487.13826494/1 0 +
11 60959 61059 SRR081241.13799221/2 0 -
11 61052 61152 SRR077487.12853301/2 0 -
11 61548 61648 SRR081241.16743804/2 0 +
11 61665 61765 SRR081241.16743804/1 0 -
11 61989 62089 SRR077487.167173/2 0 +"""), results
def test_empty_overloaded_ops():
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.BedTool("", from_string=True)
assert b.file_type == 'empty'
# NOTE: change in semantics. Previously, intersecting a BED file with an
# empty file would return the original BED file.
assert (a + b) == b
assert (b + a) == b
assert (a - b) == a
assert (b - a) == b
assert (b - b) == b
def test_issue_81():
genome = {'chr1': (0, 5000)}
result = pybedtools.BedTool().window_maker(genome=genome, w=1000, s=500)
assert result == fix(
"""
chr1 0 1000
chr1 500 1500
chr1 1000 2000
chr1 1500 2500
chr1 2000 3000
chr1 2500 3500
chr1 3000 4000
chr1 3500 4500
chr1 4000 5000
chr1 4500 5000
"""), result
def test_to_dataframe():
def fix_dataframe(df):
return ''.join(df.splitlines(True)[1:])
try:
import pandas
except ImportError:
print("pandas not installed; skipping test")
a = pybedtools.example_bedtool('a.bed')
results = str(a.to_dataframe())
expected = fix_dataframe("""
chrom start end name score strand
0 chr1 1 100 feature1 0 +
1 chr1 100 200 feature2 0 +
2 chr1 150 500 feature3 0 -
3 chr1 900 950 feature4 0 +""")
assert results == expected
d = pybedtools.example_bedtool('d.gff')
results = str(d.to_dataframe())
expected = fix_dataframe("""
seqname source feature start end score strand frame \\
0 chr1 fake gene 50 300 . + .
1 chr1 fake mRNA 50 300 . + .
2 chr1 fake CDS 75 150 . + .
3 chr1 fake CDS 200 275 . + .
4 chr1 fake rRNA 1200 1275 . + .
attributes
0 ID=gene1
1 ID=mRNA1;Parent=gene1;
2 ID=CDS1;Parent=mRNA1;
3 ID=CDS2;Parent=mRNA1;
4 ID=rRNA1; """)
assert results == expected
# get a gff file with too many fields...
x = pybedtools.example_bedtool('c.gff')
x = x.intersect(x, c=True)
assert_raises(ValueError, x.to_dataframe)
names = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand',
'frame', 'attributes', 'count']
results = str(x.to_dataframe(names=names))
expected = fix_dataframe("""
seqname source feature start end score strand frame \\
0 chr1 ucb gene 465 805 . + .
1 chr1 ucb gene 631 899 . + .
2 chr1 ucb mRNA 631 913 . + .
3 chr1 ucb CDS 760 913 . + .
4 chr1 ucb CDS 486 605 . + .
5 chr1 ucb CDS 706 1095 . + .
6 chr1 ucb CDS 174 326 . + .
7 chr1 ucb CDS 439 630 . + .
8 chr1 ucb mRNA 496 576 . + .
9 chr1 ucb mRNA 486 605 . + .
10 chr1 ucb mRNA 706 895 . + .
11 chr1 ucb mRNA 174 326 . + .
12 chr1 ucb mRNA 439 899 . + .
13 chr1 ucb gene 60 269 . - .
attributes count
0 ID=thaliana_1_465_805;match=scaffold_801404.1;... 11
1 ID=AT1G01010;Name=AT1G01010;rname=AT1G01010 7
2 ID=AT1G01010.mRNA;Parent=AT1G01010;rname=AT1G0... 7
3 Parent=AT1G01010.mRNA;rname=AT1G01010 7
4 Parent=AT1G01010.mRNA;rname=AT1G01010 6
5 Parent=AT1G01010.mRNA;rname=AT1G01010 7
6 Parent=AT1G01010.mRNA;rname=AT1G01010 3
7 Parent=AT1G01010.mRNA;rname=AT1G01010 6
8 ID=AT1G01010.mRNA;Parent=AT1G01010;rname=AT1G0... 6
9 ID=AT1G01010.mRNA;Parent=AT1G01010;rname=AT1G0... 6
10 ID=AT1G01010.mRNA;Parent=AT1G01010;rname=AT1G0... 7
11 ID=AT1G01010.mRNA;Parent=AT1G01010;rname=AT1G0... 3
12 ID=AT1G01010.mRNA;Parent=AT1G01010;rname=AT1G0... 11
13 ID=thaliana_1_6160_6269;match=fgenesh1_pg.C_sc... 3 """)
assert results == expected
def test_tail():
a = pybedtools.example_bedtool('rmsk.hg18.chr21.small.bed')
observed = a.tail(as_string=True)
expected = fix(
"""
chr21 13355834 13356047 MER58A 892 -
chr21 13356250 13356290 AT_rich 26 +
chr21 13356358 13356381 AT_rich 23 +
chr21 13356571 13356910 L2 333 -
chr21 13357179 13357987 L1MEc 1264 -
chr21 13358003 13358300 L1MEc 379 -
chr21 13358304 13358952 L1MEc 1271 -
chr21 13358960 13359288 L2 336 +
chr21 13359444 13359751 AluY 2337 +
chr21 13360044 13360225 L1M5 284 -""")
assert observed == expected
# only ask for 3 lines
observed = a.tail(3, as_string=True)
expected = fix(
"""
chr21 13358960 13359288 L2 336 +
chr21 13359444 13359751 AluY 2337 +
chr21 13360044 13360225 L1M5 284 -""")
assert observed == expected
# For short files, whole thing should be returned
a = pybedtools.example_bedtool('a.bed')
expected = str(a)
obs = a.tail(as_string=True)
assert obs == expected
def test_fisher():
a = pybedtools.example_bedtool('a.bed')
b = pybedtools.example_bedtool('b.bed')
c = a.fisher(b, genome='hg19')
assert str(c) == \
"""# Contingency Table
#_________________________________________
# | not in -b | in -b |
# not in -a | 3137160615 | 503 |
# in -a | 100 | 46 |
#_________________________________________
# p-values for fisher's exact test
left right two-tail ratio
1.00000 0.00000 0.00000 2868973.922
""", c
|
jos4uke/getSeqFlankBlatHit
|
lib/python2.7/site-packages/pybedtools/test/test1.py
|
Python
|
gpl-2.0
| 47,373
|
[
"BWA"
] |
4d93774960541edb28d138b33635e5885bea463e6786c1ee720fbea8a3b34777
|
"""
Module simplifying manipulation of XML described at
http://libvirt.org/formatdomain.html
"""
import logging
from autotest.client.shared import error
from virttest import virsh, xml_utils
from virttest.libvirt_xml import base, accessors, xcepts
from virttest.libvirt_xml.devices import librarian
class VMXMLDevices(list):
"""
List of device instances from classes handed out by librarian.get()
"""
def __type_check__(self, other):
try:
# Raise error if object isn't dict-like or doesn't have key
device_tag = other['device_tag']
# Check that we have support for this type
librarian.get(device_tag)
except (AttributeError, TypeError, xcepts.LibvirtXMLError):
# Required to always raise TypeError for list API in VMXML class
raise TypeError("Unsupported item type: %s" % str(type(other)))
def __setitem__(self, key, value):
self.__type_check__(value)
super(VMXMLDevices, self).__setitem__(key, value)
return self
def append(self, value):
self.__type_check__(value)
super(VMXMLDevices, self).append(value)
return self
def extend(self, iterable):
# Make sure __type_check__ happens
for item in iterable:
self.append(item)
return self
def by_device_tag(self, tag):
result = VMXMLDevices()
for device in self:
if device.device_tag == tag:
result.append(device)
return result
class VMXMLBase(base.LibvirtXMLBase):
"""
Accessor methods for VMXML class properties (items in __slots__)
Properties:
hypervisor_type: string, hypervisor type name
get: return domain's type attribute value
set: change domain type attribute value
del: raise xcepts.LibvirtXMLError
vm_name: string, name of the vm
get: return text value of name tag
set: set text value of name tag
del: raise xcepts.LibvirtXMLError
uuid: string, uuid string for vm
get: return text value of uuid tag
set: set text value for (new) uuid tag (unvalidated)
del: remove uuid tag
vcpu, max_mem, current_mem: integers
get: returns integer
set: set integer
del: removes tag
numa: dictionary
get: return dictionary of numatune/memory attributes
set: set numatune/memory attributes from dictionary
del: remove numatune/memory tag
devices: VMXMLDevices (list-like)
get: returns VMXMLDevices instance for all devices
set: Define all devices from VMXMLDevices instance
del: remove all devices
"""
# Additional names of attributes and dictionary-keys instances may contain
__slots__ = base.LibvirtXMLBase.__slots__ + ('hypervisor_type', 'vm_name',
'uuid', 'vcpu', 'max_mem',
'current_mem', 'numa',
'devices', 'seclabel')
__uncompareable__ = base.LibvirtXMLBase.__uncompareable__
__schema_name__ = "domain"
def __init__(self, virsh_instance=base.virsh):
accessors.XMLAttribute(property_name="hypervisor_type",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='domain',
attribute='type')
accessors.XMLElementText(property_name="vm_name",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='name')
accessors.XMLElementText(property_name="uuid",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='uuid')
accessors.XMLElementInt(property_name="vcpu",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='vcpu')
accessors.XMLElementInt(property_name="max_mem",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='memory')
accessors.XMLElementInt(property_name="current_mem",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='currentMemory')
accessors.XMLElementDict(property_name="numa",
libvirtxml=self,
forbidden=None,
parent_xpath='numatune',
tag_name='memory')
super(VMXMLBase, self).__init__(virsh_instance=virsh_instance)
def get_devices(self, device_type=None):
"""
Put all nodes of devices into a VMXMLDevices instance.
"""
devices = VMXMLDevices()
all_devices = self.xmltreefile.find('devices')
if device_type is not None:
device_nodes = all_devices.findall(device_type)
else:
device_nodes = all_devices
for node in device_nodes:
device_tag = node.tag
device_class = librarian.get(device_tag)
new_one = device_class.new_from_element(node)
devices.append(new_one)
return devices
def set_devices(self, value):
"""
Define devices based on contents of VMXMLDevices instance
"""
value_type = type(value)
if not issubclass(value_type, VMXMLDevices):
raise xcepts.LibvirtXMLError("Value %s Must be a VMXMLDevices or "
"subclass not a %s"
% (str(value), str(value_type)))
# Start with clean slate
self.del_devices()
if len(value) > 0:
devices_element = xml_utils.ElementTree.SubElement(
self.xmltreefile.getroot(), 'devices')
for device in value:
# Separate the element from the tree
device_element = device.xmltreefile.getroot()
devices_element.append(device_element)
self.xmltreefile.write()
def del_devices(self):
"""
Remove all devices
"""
self.xmltreefile.remove_by_xpath('/devices')
self.xmltreefile.write()
def get_seclabel(self):
"""
Return seclabel + child attribute dict or raise LibvirtXML error
@return: None if no seclabel in xml,
dict of seclabel's attributs and children.
"""
__children_list__ = ['label', 'baselabel', 'imagelabel']
seclabel_node = self.xmltreefile.find("seclabel")
#no seclabel tag found in xml.
if seclabel_node is None:
raise xcepts.LibvirtXMLError("Seclabel for this domain does not "
"exist")
seclabel = dict(seclabel_node.items())
for child_name in __children_list__:
child_node = seclabel_node.find(child_name)
if child_node is not None:
seclabel[child_name] = child_node.text
return seclabel
def set_seclabel(self, seclabel_dict):
"""
Set seclabel of vm. Modify the attributs and children if seclabel
exists and create a new seclabel if seclabel is not found in
xmltreefile.
"""
__attributs_list__ = ['type', 'model', 'relabel']
__children_list__ = ['label', 'baselabel', 'imagelabel']
#check the type of seclabel_dict.
if not isinstance(seclabel_dict, dict):
raise xcepts.LibvirtXMLError("seclabel_dict should be a instance of"
"dict, but not a %s.\n"
% type(seclabel_dict))
seclabel_node = self.xmltreefile.find("seclabel")
if seclabel_node is None:
seclabel_attr = {}
seclabel_node = xml_utils.ElementTree.SubElement(
self.xmltreefile.getroot(),
"seclabel")
for key, value in seclabel_dict.items():
if key in __children_list__:
child_node = seclabel_node.find(key)
if child_node is None:
child_node = xml_utils.ElementTree.SubElement(seclabel_node,
key)
child_node.text = value
elif key in __attributs_list__:
seclabel_node.set(key, value)
else:
continue
self.xmltreefile.write()
def del_seclabel(self):
"""
Remove the seclabel tag from a domain
"""
try:
self.xmltreefile.remove_by_xpath("/seclabel")
except (AttributeError, TypeError):
pass # Element already doesn't exist
self.xmltreefile.write()
class VMXML(VMXMLBase):
"""
Higher-level manipulations related to VM's XML or guest/host state
"""
# Must copy these here or there will be descriptor problems
__slots__ = VMXMLBase.__slots__
def __init__(self, hypervisor_type='kvm', virsh_instance=base.virsh):
"""
Create new VM XML instance
"""
super(VMXML, self).__init__(virsh_instance=virsh_instance)
# Setup some bare-bones XML to build upon
self.xml = u"<domain type='%s'></domain>" % hypervisor_type
@staticmethod # static method (no self) needed b/c calls VMXML.__new__
def new_from_dumpxml(vm_name, virsh_instance=virsh):
"""
Return new VMXML instance from virsh dumpxml command
@param: vm_name: Name of VM to dumpxml
@param: virsh_instance: virsh module or instance to use
@return: New initialized VMXML instance
"""
# TODO: Look up hypervisor_type on incoming XML
vmxml = VMXML(virsh_instance=virsh_instance)
vmxml['xml'] = virsh_instance.dumpxml(vm_name)
return vmxml
@staticmethod
def get_device_class(type_name):
"""
Return class that handles type_name devices, or raise exception.
"""
return librarian.get(type_name)
def undefine(self):
"""Undefine this VM with libvirt retaining XML in instance"""
# Allow any exceptions to propigate up
self.virsh.remove_domain(self.vm_name)
def define(self):
"""Define VM with virsh from this instance"""
# Allow any exceptions to propigate up
self.virsh.define(self.xml)
@staticmethod
def vm_rename(vm, new_name, uuid=None, virsh_instance=base.virsh):
"""
Rename a vm from its XML.
@param vm: VM class type instance
@param new_name: new name of vm
@param uuid: new_vm's uuid, if None libvirt will generate.
@return: a new VM instance
"""
if vm.is_alive():
vm.destroy(gracefully=True)
vmxml = VMXML.new_from_dumpxml(vm_name=vm.name,
virsh_instance=virsh_instance)
backup = vmxml.copy()
# can't do in-place rename, must operate on XML
try:
vmxml.undefine()
# All failures trip a single exception
except error.CmdError, detail:
del vmxml # clean up temporary files
raise xcepts.LibvirtXMLError("Error reported while undefining VM:\n"
"%s" % detail)
# Alter the XML
vmxml.vm_name = new_name
if uuid is None:
# invalidate uuid so libvirt will regenerate
del vmxml.uuid
vm.uuid = None
else:
vmxml.uuid = uuid
vm.uuid = uuid
# Re-define XML to libvirt
logging.debug("Rename %s to %s.", vm.name, new_name)
try:
vmxml.define()
except error.CmdError, detail:
del vmxml # clean up temporary files
# Allow exceptions thrown here since state will be undefined
backup.define()
raise xcepts.LibvirtXMLError("Error reported while defining VM:\n%s"
% detail)
# Keep names uniform
vm.name = new_name
return vm
@staticmethod
def set_vm_vcpus(vm_name, value, virsh_instance=base.virsh):
"""
Convenience method for updating 'vcpu' property of a defined VM
@param: vm_name: Name of defined vm to change vcpu elemnet data
@param: value: New data value, None to delete.
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance)
if value is not None:
vmxml['vcpu'] = value # call accessor method to change XML
else: # value == None
del vmxml.vcpu
vmxml.undefine()
vmxml.define()
# Temporary files for vmxml cleaned up automatically
# when it goes out of scope here.
def check_cpu_mode(self, mode):
"""
Check input cpu mode invalid or not.
@param mode: the mode of cpu:'host-model'...
"""
# Possible values for the mode attribute are:
# "custom", "host-model", "host-passthrough"
cpu_mode = ["custom", "host-model", "host-passthrough"]
if mode.strip() not in cpu_mode:
raise xcepts.LibvirtXMLError("The cpu mode '%s' is invalid!" % mode)
def get_disk_all(self):
"""
Return VM's disk from XML definition, None if not set
"""
disk_nodes = self.xmltreefile.find('devices').findall('disk')
disks = {}
for node in disk_nodes:
dev = node.find('target').get('dev')
disks[dev] = node
return disks
@staticmethod
def get_disk_source(vm_name, virsh_instance=base.virsh):
"""
Get block device of a defined VM's disks.
@param: vm_name: Name of defined vm.
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
disks = vmxml.get_disk_all()
return disks.values()
@staticmethod
def get_disk_blk(vm_name, virsh_instance=base.virsh):
"""
Get block device of a defined VM's disks.
@param: vm_name: Name of defined vm.
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
disks = vmxml.get_disk_all()
return disks.keys()
@staticmethod
def get_disk_count(vm_name, virsh_instance=base.virsh):
"""
Get count of VM's disks.
@param: vm_name: Name of defined vm.
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
disks = vmxml.get_disk_all()
if disks != None:
return len(disks)
return 0
@staticmethod
def get_numa_params(vm_name, virsh_instance=base.virsh):
"""
Return VM's numa setting from XML definition
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance)
return vmxml.numa
def get_primary_serial(self):
"""
Get a dict with primary serial features.
"""
xmltreefile = self.dict_get('xml')
primary_serial = xmltreefile.find('devices').find('serial')
serial_features = {}
serial_type = primary_serial.get('type')
serial_port = primary_serial.find('target').get('port')
# Support node here for more features
serial_features['serial'] = primary_serial
# Necessary features
serial_features['type'] = serial_type
serial_features['port'] = serial_port
return serial_features
@staticmethod
def set_primary_serial(vm_name, dev_type, port, path=None,
virsh_instance=base.virsh):
"""
Set primary serial's features of vm_name.
@param vm_name: Name of defined vm to set primary serial.
@param dev_type: the type of serial:pty,file...
@param port: the port of serial
@param path: the path of serial, it is not necessary for pty
# TODO: More features
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
xmltreefile = vmxml.dict_get('xml')
try:
serial = vmxml.get_primary_serial()['serial']
except AttributeError:
logging.debug("Can not find any serial, now create one.")
# Create serial tree, default is pty
serial = xml_utils.ElementTree.SubElement(
xmltreefile.find('devices'),
'serial', {'type': 'pty'})
# Create elements of serial target, default port is 0
xml_utils.ElementTree.SubElement(serial, 'target', {'port': '0'})
serial.set('type', dev_type)
serial.find('target').set('port', port)
# path may not be exist.
if path is not None:
serial.find('source').set('path', path)
else:
try:
source = serial.find('source')
serial.remove(source)
except AssertionError:
pass # Element not found, already removed.
xmltreefile.write()
vmxml.set_xml(xmltreefile.name)
vmxml.undefine()
vmxml.define()
def get_iface_all(self):
"""
Get a dict with interface's mac and node.
"""
iface_nodes = self.xmltreefile.find('devices').findall('interface')
interfaces = {}
for node in iface_nodes:
mac_addr = node.find('mac').get('address')
interfaces[mac_addr] = node
return interfaces
@staticmethod
def get_iface_by_mac(vm_name, mac, virsh_instance=base.virsh):
"""
Get the interface if mac is matched.
@param vm_name: Name of defined vm.
@param mac: a mac address.
@return: return a dict include main interface's features
"""
vmxml = VMXML.new_from_dumpxml(vm_name, virsh_instance=virsh_instance)
interfaces = vmxml.get_iface_all()
try:
interface = interfaces[mac]
except KeyError:
interface = None
if interface is not None: # matched mac exists.
iface_type = interface.get('type')
source = interface.find('source').get(iface_type)
features = {}
features['type'] = iface_type
features['mac'] = mac
features['source'] = source
return features
else:
return None
def get_net_all(self):
"""
Return VM's net from XML definition, None if not set
"""
xmltreefile = self.dict_get('xml')
net_nodes = xmltreefile.find('devices').findall('interface')
nets = {}
for node in net_nodes:
dev = node.find('target').get('dev')
nets[dev] = node
return nets
#TODO re-visit this method after the libvirt_xml.devices.interface module is implemented
@staticmethod
def get_net_dev(vm_name):
"""
Get net device of a defined VM's nets.
@param: vm_name: Name of defined vm.
"""
vmxml = VMXML.new_from_dumpxml(vm_name)
nets = vmxml.get_net_all()
if nets != None:
return nets.keys()
return None
@staticmethod
def set_cpu_mode(vm_name, mode='host-model'):
"""
Set cpu's mode of VM.
@param vm_name: Name of defined vm to set primary serial.
@param mode: the mode of cpu:'host-model'...
"""
vmxml = VMXML.new_from_dumpxml(vm_name)
vmxml.check_cpu_mode(mode)
xmltreefile = vmxml.dict_get('xml')
try:
cpu = xmltreefile.find('/cpu')
logging.debug("Current cpu mode is '%s'!", cpu.get('mode'))
cpu.set('mode', mode)
except AttributeError:
logging.debug("Can not find any cpu, now create one.")
cpu = xml_utils.ElementTree.SubElement(xmltreefile.getroot(),
'cpu', {'mode': mode})
xmltreefile.write()
vmxml.undefine()
vmxml.define()
class VMCPUXML(VMXML):
"""
Higher-level manipulations related to VM's XML(CPU)
"""
# Must copy these here or there will be descriptor problems
__slots__ = VMXML.__slots__ + ('model', 'vendor', 'feature_list',)
def __init__(self, virsh_instance=virsh, vm_name='', mode='host-model'):
"""
Create new VMCPU XML instance
"""
# The set action is for test.
accessors.XMLElementText(property_name="model",
libvirtxml=self,
forbidden=['del'],
parent_xpath='/cpu',
tag_name='model')
accessors.XMLElementText(property_name="vendor",
libvirtxml=self,
forbidden=['del'],
parent_xpath='/cpu',
tag_name='vendor')
# This will skip self.get_feature_list() defined below
accessors.AllForbidden(property_name="feature_list",
libvirtxml=self)
super(VMCPUXML, self).__init__(virsh_instance=virsh_instance)
# Setup some bare-bones XML to build upon
self.set_cpu_mode(vm_name, mode)
self['xml'] = self.dict_get('virsh').dumpxml(vm_name,
extra="--update-cpu")
def get_feature_list(self):
"""
Accessor method for feature_list property (in __slots__)
"""
feature_list = []
xmltreefile = self.dict_get('xml')
for feature_node in xmltreefile.findall('/cpu/feature'):
feature_list.append(feature_node)
return feature_list
def get_feature_name(self, num):
"""
Get assigned feature name
@param: num: Assigned feature number
@return: Assigned feature name
"""
count = len(self.feature_list)
if num >= count:
raise xcepts.LibvirtXMLError("Get %d from %d features"
% (num, count))
feature_name = self.feature_list[num].get('name')
return feature_name
def remove_feature(self, num):
"""
Remove a assigned feature from xml
@param: num: Assigned feature number
"""
xmltreefile = self.dict_get('xml')
count = len(self.feature_list)
if num >= count:
raise xcepts.LibvirtXMLError("Remove %d from %d features"
% (num, count))
feature_remove_node = self.feature_list[num]
cpu_node = xmltreefile.find('/cpu')
cpu_node.remove(feature_remove_node)
def check_feature_name(self, value):
"""
Check feature name valid or not.
@param: value: The feature name
@return: True if check pass
"""
sys_feature = []
cpu_xml_file = open('/proc/cpuinfo', 'r')
for line in cpu_xml_file.readline():
if line.find('flags') != -1:
feature_names = line.split(':')[1].strip()
sys_sub_feature = feature_names.split(' ')
sys_feature = list(set(sys_feature + sys_sub_feature))
return (value in sys_feature)
def set_feature(self, num, value):
"""
Set a assigned feature value to xml
@param: num: Assigned feature number
@param: value: The feature name modified to
"""
count = len(self.feature_list)
if num >= count:
raise xcepts.LibvirtXMLError("Set %d from %d features"
% (num, count))
feature_set_node = self.feature_list[num]
feature_set_node.set('name', value)
def add_feature(self, value):
"""
Add a feature Element to xml
@param: num: Assigned feature number
"""
xmltreefile = self.dict_get('xml')
cpu_node = xmltreefile.find('/cpu')
xml_utils.ElementTree.SubElement(cpu_node, 'feature', {'name': value})
|
sathnaga/virt-test
|
virttest/libvirt_xml/vm_xml.py
|
Python
|
gpl-2.0
| 24,993
|
[
"VisIt"
] |
3ea305c24492227774a4e14dca88c057a2184f92fb220205235e263bdede147e
|
import numpy as np
import scipy as sp
import scipy.interpolate
import scipy.spatial
import pickle
import vtk
import vtk.util.colors
import math
import warnings
warnings.filterwarnings("error")
class Plotter:
COLOR_BG = vtk.util.colors.light_grey
COLOR_BG_PLOT = vtk.util.colors.ghost_white
#vtk.util.colors.ivory
COLOR_OBSTACLE = vtk.util.colors.banana
COLOR_SITES = vtk.util.colors.cobalt
COLOR_PATH = vtk.util.colors.brick
COLOR_CONTROL_POINTS = vtk.util.colors.tomato
COLOR_CONTROL_POLIG = vtk.util.colors.mint
COLOR_GRAPH = vtk.util.colors.sepia
COLOR_PLOT_CURV = vtk.util.colors.blue
COLOR_PLOT_TORS = vtk.util.colors.red
COLOR_LABELS = vtk.util.colors.blue
COLOR_LENGTH = vtk.util.colors.red
_DEFAULT_LINE_THICKNESS = 0.0005
_DEFAULT_POINT_THICKNESS = 0.002
_DEFAULT_BSPLINE_THICKNESS = 0.001
class KeyPressInteractorStyle(vtk.vtkInteractorStyleUnicam):
_screenshotFile = "/tmp/screenshot.png"
_cameraFile = "/tmp/cameraData.dat"
_cameraFile2 = "/tmp/cameraData2.dat"
def __init__(self, parent=None):
self.AddObserver("KeyPressEvent", self._keyPressEvent)
self.AddObserver("RightButtonPressEvent", self._mousePressEvent)
#super(KeyPressInteractorStyle, self).__init__()
def SetCamera(self, camera):
self._camera = camera
def SetRenderer(self, renderer):
self._renderer = renderer
def SetRenderWindow(self, renderWindow):
self._renderWindow = renderWindow
def _keyPressEvent(self, obj, event):
if obj.GetInteractor().GetKeySym() == "l":
print("Scene screenshot in "+self._screenshotFile)
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(self._renderWindow)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(self._screenshotFile)
writer.SetInputData(w2if.GetOutput())
writer.Write()
elif obj.GetInteractor().GetKeySym() == "c":
print("Save camera data in "+self._cameraFile)
record = {}
record['position'] = self._camera.GetPosition()
record['focalPoint'] = self._camera.GetFocalPoint()
record['viewAngle'] = self._camera.GetViewAngle()
record['viewUp'] = self._camera.GetViewUp()
record['clippingRange'] = self._camera.GetClippingRange()
with open(self._cameraFile, 'wb') as f:
pickle.dump(record, f)
elif obj.GetInteractor().GetKeySym() == "v":
print("Restore camera data from "+self._cameraFile)
with open(self._cameraFile, 'rb') as f:
record = pickle.load(f)
self._camera.SetPosition(record['position'])
self._camera.SetFocalPoint(record['focalPoint'])
self._camera.SetViewAngle(record['viewAngle'])
self._camera.SetViewUp(record['viewUp'])
self._camera.SetClippingRange(record['clippingRange'])
self._renderWindow.Render()
elif obj.GetInteractor().GetKeySym() == "b":
print("Restore camera data from "+self._cameraFile2)
with open(self._cameraFile2, 'rb') as f:
record = pickle.load(f)
self._camera.SetPosition(record['position'])
self._camera.SetFocalPoint(record['focalPoint'])
self._camera.SetViewAngle(record['viewAngle'])
self._camera.SetViewUp(record['viewUp'])
self._camera.SetClippingRange(record['clippingRange'])
self._renderWindow.Render()
self.OnKeyPress()
def _mousePressEvent(self, obj, event):
clickPos = obj.GetInteractor().GetEventPosition()
picker =vtk.vtkPropPicker()
picker.Pick(clickPos[0], clickPos[1], 0, self._renderer)
pos = picker.GetPickPosition()
print(pos)
class KeyPressContextInteractorStyle(vtk.vtkContextInteractorStyle):
_screenshotFile = "/tmp/screenshot.png"
def __init__(self, parent=None):
self.AddObserver("KeyPressEvent",self._keyPressEvent)
def SetRenderWindow(self, renderWindow):
self._renderWindow = renderWindow
def _keyPressEvent(self, obj, event):
if obj.GetInteractor().GetKeySym() == "l":
print("Plot screenshot in "+self._screenshotFile)
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(self._renderWindow)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(self._screenshotFile)
writer.SetInputData(w2if.GetOutput())
writer.Write()
def __init__(self):
self._rendererScene = vtk.vtkRenderer()
self._rendererScene.SetBackground(self.COLOR_BG)
self._renderWindowScene = vtk.vtkRenderWindow()
self._renderWindowScene.AddRenderer(self._rendererScene)
self._renderWindowInteractor = vtk.vtkRenderWindowInteractor()
self._renderWindowInteractor.SetRenderWindow(self._renderWindowScene)
#self._interactorStyle = vtk.vtkInteractorStyleUnicam()
self._interactorStyle = self.KeyPressInteractorStyle()
self._interactorStyle.SetCamera(self._rendererScene.GetActiveCamera())
self._interactorStyle.SetRenderer(self._rendererScene)
self._interactorStyle.SetRenderWindow(self._renderWindowScene)
self._contextViewPlotCurv = vtk.vtkContextView()
self._contextViewPlotCurv.GetRenderer().SetBackground(self.COLOR_BG_PLOT)
self._contextInteractorStyleCurv = self.KeyPressContextInteractorStyle()
self._contextInteractorStyleCurv.SetRenderWindow(self._contextViewPlotCurv.GetRenderWindow())
self._chartXYCurv = vtk.vtkChartXY()
self._contextViewPlotCurv.GetScene().AddItem(self._chartXYCurv)
self._chartXYCurv.SetShowLegend(True)
self._chartXYCurv.GetAxis(vtk.vtkAxis.LEFT).SetTitle("")
self._chartXYCurv.GetAxis(vtk.vtkAxis.BOTTOM).SetTitle("")
self._contextViewPlotTors = vtk.vtkContextView()
self._contextViewPlotTors.GetRenderer().SetBackground(self.COLOR_BG_PLOT)
self._contextInteractorStyleTors = self.KeyPressContextInteractorStyle()
self._contextInteractorStyleTors.SetRenderWindow(self._contextViewPlotTors.GetRenderWindow())
self._chartXYTors = vtk.vtkChartXY()
self._contextViewPlotTors.GetScene().AddItem(self._chartXYTors)
self._chartXYTors.SetShowLegend(True)
self._chartXYTors.GetAxis(vtk.vtkAxis.LEFT).SetTitle("")
self._chartXYTors.GetAxis(vtk.vtkAxis.BOTTOM).SetTitle("")
self._textActor = vtk.vtkTextActor()
self._textActor.GetTextProperty().SetColor(self.COLOR_LENGTH)
self._addedBSpline = False
def draw(self):
self._renderWindowInteractor.Initialize()
self._renderWindowInteractor.SetInteractorStyle(self._interactorStyle)
axes = vtk.vtkAxesActor()
widget = vtk.vtkOrientationMarkerWidget()
widget.SetOutlineColor(0.9300, 0.5700, 0.1300)
widget.SetOrientationMarker(axes)
widget.SetInteractor(self._renderWindowInteractor)
#widget.SetViewport(0.0, 0.0, 0.1, 0.1)
widget.SetViewport(0.0, 0.0, 0.2, 0.4)
widget.SetEnabled(True)
widget.InteractiveOn()
textWidget = vtk.vtkTextWidget()
textRepresentation = vtk.vtkTextRepresentation()
textRepresentation.GetPositionCoordinate().SetValue(.0,.0 )
textRepresentation.GetPosition2Coordinate().SetValue(.3,.04 )
textWidget.SetRepresentation(textRepresentation)
textWidget.SetInteractor(self._renderWindowInteractor)
textWidget.SetTextActor(self._textActor)
textWidget.SelectableOff()
textWidget.On()
self._rendererScene.ResetCamera()
camPos = self._rendererScene.GetActiveCamera().GetPosition()
self._rendererScene.GetActiveCamera().SetPosition((camPos[2],camPos[1],camPos[0]))
self._rendererScene.GetActiveCamera().SetViewUp((0.0,0.0,1.0))
self._rendererScene.GetActiveCamera().Zoom(1.4)
self._renderWindowScene.Render()
if self._addedBSpline:
self._contextViewPlotCurv.GetRenderWindow().SetMultiSamples(0)
self._contextViewPlotCurv.GetInteractor().Initialize()
self._contextViewPlotCurv.GetInteractor().SetInteractorStyle(self._contextInteractorStyleCurv)
#self._contextViewPlotCurv.GetInteractor().Start()
self._contextViewPlotTors.GetRenderWindow().SetMultiSamples(0)
self._contextViewPlotTors.GetInteractor().Initialize()
self._contextViewPlotTors.GetInteractor().SetInteractorStyle(self._contextInteractorStyleTors)
self._contextViewPlotTors.GetInteractor().Start()
else:
self._renderWindowInteractor.Start()
def addTetrahedron(self, vertexes, color):
vtkPoints = vtk.vtkPoints()
vtkPoints.InsertNextPoint(vertexes[0][0], vertexes[0][1], vertexes[0][2])
vtkPoints.InsertNextPoint(vertexes[1][0], vertexes[1][1], vertexes[1][2])
vtkPoints.InsertNextPoint(vertexes[2][0], vertexes[2][1], vertexes[2][2])
vtkPoints.InsertNextPoint(vertexes[3][0], vertexes[3][1], vertexes[3][2])
unstructuredGrid = vtk.vtkUnstructuredGrid()
unstructuredGrid.SetPoints(vtkPoints)
unstructuredGrid.InsertNextCell(vtk.VTK_TETRA, 4, range(4))
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(unstructuredGrid)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
self._rendererScene.AddActor(actor)
def addTriangles(self, triangles, color):
vtkPoints = vtk.vtkPoints()
idPoint = 0
allIdsTriangle = []
for triangle in triangles:
idsTriangle = []
for point in triangle:
vtkPoints.InsertNextPoint(point[0], point[1], point[2])
idsTriangle.append(idPoint)
idPoint += 1
allIdsTriangle.append(idsTriangle)
unstructuredGrid = vtk.vtkUnstructuredGrid()
unstructuredGrid.SetPoints(vtkPoints)
for idsTriangle in allIdsTriangle:
unstructuredGrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, idsTriangle)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(unstructuredGrid)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
self._rendererScene.AddActor(actor)
def addPolyLine(self, points, color, thick=False, thickness=_DEFAULT_LINE_THICKNESS):
vtkPoints = vtk.vtkPoints()
for point in points:
vtkPoints.InsertNextPoint(point[0], point[1], point[2])
if thick:
cellArray = vtk.vtkCellArray()
cellArray.InsertNextCell(len(points))
for i in range(len(points)):
cellArray.InsertCellPoint(i)
polyData = vtk.vtkPolyData()
polyData.SetPoints(vtkPoints)
polyData.SetLines(cellArray)
tubeFilter = vtk.vtkTubeFilter()
tubeFilter.SetNumberOfSides(8)
tubeFilter.SetInputData(polyData)
tubeFilter.SetRadius(thickness)
tubeFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tubeFilter.GetOutputPort())
else:
unstructuredGrid = vtk.vtkUnstructuredGrid()
unstructuredGrid.SetPoints(vtkPoints)
for i in range(1, len(points)):
unstructuredGrid.InsertNextCell(vtk.VTK_LINE, 2, [i-1, i])
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(unstructuredGrid)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
self._rendererScene.AddActor(actor)
def addPoints(self, points, color, thick=False, thickness=_DEFAULT_POINT_THICKNESS):
vtkPoints = vtk.vtkPoints()
for point in points:
vtkPoints.InsertNextPoint(point[0], point[1], point[2])
pointsPolyData = vtk.vtkPolyData()
pointsPolyData.SetPoints(vtkPoints)
if thick:
sphereSource = vtk.vtkSphereSource()
sphereSource.SetRadius(thickness)
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceConnection(sphereSource.GetOutputPort())
glyph3D.SetInputData(pointsPolyData)
glyph3D.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(glyph3D.GetOutputPort())
else:
vertexFilter = vtk.vtkVertexGlyphFilter()
vertexFilter.SetInputData(pointsPolyData)
vertexFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(vertexFilter.GetOutput())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
self._rendererScene.AddActor(actor)
def addBSpline(self, path, degree, color, thick=False, thickness=_DEFAULT_BSPLINE_THICKNESS):
self._addedBSpline = True
tau, u, spline, splineD1, splineD2, splineD3, curv, tors, arcLength, polLength = path.splinePoints()
self._textActor.SetInput("Length: "+str(arcLength))
numIntervals = len(tau)-1
curvPlotActor = vtk.vtkXYPlotActor()
curvPlotActor.SetTitle("Curvature")
curvPlotActor.SetXTitle("")
curvPlotActor.SetYTitle("")
curvPlotActor.SetXValuesToIndex()
torsPlotActor = vtk.vtkXYPlotActor()
torsPlotActor.SetTitle("Torsion")
torsPlotActor.SetXTitle("")
torsPlotActor.SetYTitle("")
torsPlotActor.SetXValuesToIndex()
uArrays = []
curvArrays = []
torsArrays = []
for i in range(numIntervals):
uArrays.append(vtk.vtkDoubleArray())
uArrays[i].SetName("t")
curvArrays.append(vtk.vtkDoubleArray())
curvArrays[i].SetName("Curvature")
torsArrays.append(vtk.vtkDoubleArray())
torsArrays[i].SetName("Torsion")
curvTorsArray = vtk.vtkDoubleArray()
#minCurv = minTors = minNd1Xd2 = float("inf")
#maxCurv = maxTors = float("-inf")
for i in range(len(u)):
for j in range(numIntervals):
if u[i] >= tau[j] and u[i] < tau[j+1]:
break
uArrays[j].InsertNextValue(u[i])
curvArrays[j].InsertNextValue(curv[i])
torsArrays[j].InsertNextValue(tors[i])
curvTorsArray.InsertNextValue(curv[i])# + abs(tors[i]))
#print("minCurv: {:e}; maxCurv: {:e}; minTors: {:e}; maxTors: {:e}; minNd1Xd2: {:e}".format(minCurv, maxCurv, minTors, maxTors, minNd1Xd2))
for inter in range(numIntervals):
plotTable = vtk.vtkTable()
plotTable.AddColumn(uArrays[inter])
plotTable.AddColumn(curvArrays[inter])
plotTable.AddColumn(torsArrays[inter])
points = self._chartXYCurv.AddPlot(vtk.vtkChart.LINE)
points.SetInputData(plotTable, 0, 1)
points.SetColor(self.COLOR_PLOT_CURV[0], self.COLOR_PLOT_CURV[1], self.COLOR_PLOT_CURV[2])
points.SetWidth(1.0)
if inter > 0:
points.SetLegendVisibility(False)
points = self._chartXYTors.AddPlot(vtk.vtkChart.LINE)
points.SetInputData(plotTable, 0, 2)
points.SetColor(self.COLOR_PLOT_TORS[0], self.COLOR_PLOT_TORS[1], self.COLOR_PLOT_TORS[2])
points.SetWidth(1.0)
if inter > 0:
points.SetLegendVisibility(False)
vtkPoints = vtk.vtkPoints()
for point in spline:
vtkPoints.InsertNextPoint(point[0], point[1], point[2])
polyDataLabelP = vtk.vtkPolyData()
polyDataLabelP.SetPoints(vtkPoints)
labels = vtk.vtkStringArray()
labels.SetNumberOfValues(len(spline))
labels.SetName("labels")
for i in range(len(spline)):
if i == 0:
labels.SetValue(i, "S")
elif i == len(spline)-1:
labels.SetValue(i, "E")
else:
labels.SetValue(i, "")
polyDataLabelP.GetPointData().AddArray(labels)
sizes = vtk.vtkIntArray()
sizes.SetNumberOfValues(len(spline))
sizes.SetName("sizes")
for i in range(len(spline)):
if i == 0 or i == len(spline)-1:
sizes.SetValue(i, 10)
else:
sizes.SetValue(i,1)
polyDataLabelP.GetPointData().AddArray(sizes)
pointMapper = vtk.vtkPolyDataMapper()
pointMapper.SetInputData(polyDataLabelP)
pointActor = vtk.vtkActor()
pointActor.SetMapper(pointMapper)
pointSetToLabelHierarchyFilter = vtk.vtkPointSetToLabelHierarchy()
pointSetToLabelHierarchyFilter.SetInputData(polyDataLabelP)
pointSetToLabelHierarchyFilter.SetLabelArrayName("labels")
pointSetToLabelHierarchyFilter.SetPriorityArrayName("sizes")
pointSetToLabelHierarchyFilter.GetTextProperty().SetColor(self.COLOR_LABELS)
pointSetToLabelHierarchyFilter.GetTextProperty().SetFontSize(15)
pointSetToLabelHierarchyFilter.GetTextProperty().SetBold(True)
pointSetToLabelHierarchyFilter.Update()
labelMapper = vtk.vtkLabelPlacementMapper()
labelMapper.SetInputConnection(pointSetToLabelHierarchyFilter.GetOutputPort())
labelActor = vtk.vtkActor2D()
labelActor.SetMapper(labelMapper)
self._rendererScene.AddActor(labelActor)
if thick:
cellArray = vtk.vtkCellArray()
cellArray.InsertNextCell(len(spline))
for i in range(len(spline)):
cellArray.InsertCellPoint(i)
polyData = vtk.vtkPolyData()
polyData.SetPoints(vtkPoints)
polyData.SetLines(cellArray)
polyData.GetPointData().SetScalars(curvTorsArray)
tubeFilter = vtk.vtkTubeFilter()
tubeFilter.SetNumberOfSides(8)
tubeFilter.SetInputData(polyData)
tubeFilter.SetRadius(thickness)
tubeFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tubeFilter.GetOutputPort())
else:
unstructuredGrid = vtk.vtkUnstructuredGrid()
unstructuredGrid.SetPoints(vtkPoints)
for i in range(1, len(spline)):
unstructuredGrid.InsertNextCell(vtk.VTK_LINE, 2, [i-1, i])
unstructuredGrid.GetPointData().SetScalars(curvArray)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(unstructuredGrid)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
self._rendererScene.AddActor(actor)
#self.addPolyLine(list(zip(out[0], out[1], out[2])), color, thick, thickness)
def addBSplineDEPRECATED(self, controlPolygon, degree, color, thick=False, thickness=_DEFAULT_BSPLINE_THICKNESS):
x = controlPolygon[:,0]
y = controlPolygon[:,1]
z = controlPolygon[:,2]
polLen = 0.
for i in range(1, len(controlPolygon)):
polLen += sp.spatial.distance.euclidean(controlPolygon[i-1], controlPolygon[i])
t = range(len(controlPolygon))
ipl_t = np.linspace(0.0, len(controlPolygon) - 1, max(polLen*100,100))
x_tup = sp.interpolate.splrep(t, x, k = degree)
y_tup = sp.interpolate.splrep(t, y, k = degree)
z_tup = sp.interpolate.splrep(t, z, k = degree)
x_list = list(x_tup)
xl = x.tolist()
x_list[1] = xl + [0.0, 0.0, 0.0, 0.0]
y_list = list(y_tup)
yl = y.tolist()
y_list[1] = yl + [0.0, 0.0, 0.0, 0.0]
z_list = list(z_tup)
zl = z.tolist()
z_list[1] = zl + [0.0, 0.0, 0.0, 0.0]
x_i = sp.interpolate.splev(ipl_t, x_list)
y_i = sp.interpolate.splev(ipl_t, y_list)
z_i = sp.interpolate.splev(ipl_t, z_list)
self.addPolyLine(list(zip(x_i, y_i, z_i)), color, thick, thickness)
def addGraph(self, graph, color):
vtkPoints = vtk.vtkPoints()
vtkId = 0
graph2VtkId = {}
for node in graph.nodes():
vtkPoints.InsertNextPoint(graph.node[node]['coord'][0], graph.node[node]['coord'][1], graph.node[node]['coord'][2])
graph2VtkId[node] = vtkId
vtkId += 1
unstructuredGrid = vtk.vtkUnstructuredGrid()
unstructuredGrid.SetPoints(vtkPoints)
for edge in graph.edges():
unstructuredGrid.InsertNextCell(vtk.VTK_LINE, 2, [graph2VtkId[edge[0]], graph2VtkId[edge[1]]])
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(unstructuredGrid)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
self._rendererScene.AddActor(actor)
def addGraphNodes(self, graph, color):
nodes = []
for node in graph.nodes():
nodes.append((graph.node[node]['coord'][0], graph.node[node]['coord'][1], graph.node[node]['coord'][2]))
self.addPoints(nodes, color, thick=True)
|
trianam/voropath
|
plotter.py
|
Python
|
gpl-2.0
| 22,000
|
[
"VTK"
] |
430365ddca2f9ec4fde18b1c9836632ac2f359fe59d1ea5ac1e04ea2391047b2
|
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
from __future__ import print_function
from clawpack.geoclaw import topotools
import pylab
import glob
from numpy import loadtxt
# --------------------------
def setplot(plotdata):
# --------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps, geoplot
plotdata.clearfigures() # clear any old figures,axes,items dat
plotdata.format = 'binary' # 'ascii', 'binary', 'netcdf'
try:
tsudata = open(plotdata.outdir+'/geoclaw.data').readlines()
for line in tsudata:
if 'sea_level' in line:
sea_level = float(line.split()[0])
print("sea_level = ",sea_level)
except:
print("Could not read sea_level, setting to 0.")
sea_level = 0.
clim_ocean = 0.0001
clim_CC = 0.0001
cmax_ocean = clim_ocean + sea_level
cmin_ocean = -clim_ocean + sea_level
cmax_CC = clim_CC + sea_level
cmin_CC = -clim_CC + sea_level
def timeformat(t):
from numpy import mod
hours = int(t/3600.)
tmin = mod(t,3600.)
min = int(tmin/60.)
sec = int(mod(tmin,60.))
timestr = '%s:%s:%s' % (hours,str(min).zfill(2),str(sec).zfill(2))
return timestr
def title_hours(current_data):
from pylab import title
t = current_data.t
timestr = timeformat(t)
title('Adjoint time %s before time of interest' % timestr)
def title(current_data):
from pylab import title
title('Surface Height',fontsize=15)
def plotcc(current_data):
from pylab import plot,text
plot([235.8162], [41.745616],'wo')
text(235.8,41.9,'Cr.City',color='w',fontsize=10)
#-----------------------------------------
# Figure for big area
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Pacific', figno=0)
plotfigure.kwargs = {'figsize': (5.5,5)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'axes([0.12,0.12,0.79,0.79])'
plotaxes.title = 'Pacific'
plotaxes.scaled = True
def aa(current_data):
from pylab import ticklabel_format, xticks, gca, cos, pi, yticks, grid
plotcc(current_data)
title_hours(current_data)
ticklabel_format(format='plain',useOffset=False)
xticks(rotation=20, fontsize = 10)
yticks(fontsize = 10)
a = gca()
a.set_aspect(1./cos(41.75*pi/180.))
grid(True)
plotaxes.afteraxes = aa
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
plotitem.plot_var = geoplot.surface_or_depth
my_cmap = colormaps.make_colormap({-1.0: [0.0,0.0,1.0], \
-0.5: [0.5,0.5,1.0], \
0.0: [1.0,1.0,1.0], \
0.5: [1.0,0.5,0.5], \
1.0: [1.0,0.0,0.0]})
plotitem.imshow_cmap = my_cmap
plotitem.imshow_cmin = cmin_ocean
plotitem.imshow_cmax = cmax_ocean
plotitem.add_colorbar = False
plotitem.colorbar_shrink = 0.7
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [0]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
plotitem.plot_var = geoplot.land
plotitem.imshow_cmap = geoplot.land_colors
plotitem.imshow_cmin = 0.0
plotitem.imshow_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [0]
plotaxes.xlimits = [-240,-100]
plotaxes.ylimits = [-41,65]
# Add contour lines of bathymetry:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = linspace(-6000,0,7)
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,1,0] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
# Add contour lines of topography:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(0., 11., 1.)
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,0,1] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.html_movie = 'JSAnimation' # new style, or "4.x" for old style
return plotdata
|
clawpack/adjoint
|
examples/adjoint_CrescentCity/setplot.py
|
Python
|
bsd-2-clause
| 5,998
|
[
"NetCDF"
] |
58398ac7fdd4c9d56a7dd64998c9370e65ffddb318ab304cf9155740141a5783
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import time
import numpy as np
from pair_correlation import *
import omero.scripts as scripts
import omero.util.script_utils as script_util
from omero.gateway import BlitzGateway
import omero
from omero.rtypes import *
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Utils import formatdate
import smtplib
ADMIN_EMAIL = 'admin@omerocloud.qbi.uq.edu.au'
def list_image_names(conn, ids, file_anns):
"""
Builds a list of the image names
@param conn: The BlitzGateway connection
@param ids: Python list of image ids
"""
image_names = []
for i,image_id in enumerate(ids):
img = conn.getObject('Image', image_id)
if not img:
continue
ds = img.getParent()
if ds:
pr = ds.getParent()
else:
pr = None
image_names.append("[%s][%s] Image %d : %s : %s" % (
pr and pr.getName() or '-',
ds and ds.getName() or '-',
image_id, os.path.basename(img.getName()),
file_anns[i].getFile().getName()))
return image_names
def validate_email(conn, params):
"""
Checks that a valid email address is present for the user_id
@param conn: The BlitzGateway connection
@param params: The script parameters
"""
userEmail = ''
if params['Email_address']:
userEmail = params['Email_address']
else:
user = conn.getUser()
user.getName() # Initialises the proxy object for simpleMarshal
dic = user.simpleMarshal()
if 'email' in dic and dic['email']:
userEmail = dic['email']
params['Email_address'] = userEmail
print userEmail
# Validate with a regular expression. Not perfect but it will do
return re.match("^[a-zA-Z0-9._%-]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$",
userEmail)
def email_results(conn,params,image_ids,file_anns):
"""
E-mail the result to the user.
@param conn: The BlitzGateway connection
@param params: The script parameters
@param image_ids: A python list of the new image omero ids
"""
if not params['Email_Results']:
return
image_names = list_image_names(conn, image_ids, file_anns)
msg = MIMEMultipart()
msg['From'] = ADMIN_EMAIL
msg['To'] = params['Email_address']
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = '[OMERO Job] Pair correlation'
msg.attach(MIMEText("""
New pair correlation results files created:
Format:
[parent project/datset] image id : image name : result filename
------------------------------------------------------------------------
%s""" % ("\n".join(image_names))))
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(ADMIN_EMAIL, [params['Email_address']], msg.as_string())
smtpObj.quit()
def get_rectangles(conn, imageId):
"""
Returns a list of (x, y, width, height, zStart, zStop, tStart, tStop)
of each rectange ROI in the image scaled to nm
@param conn: the BlitzGateWay connection
@param imageId: the id on the server of the image that holds the ROIs
"""
rois = []
roiService = conn.getRoiService()
result = roiService.findByImage(imageId, None)
physX = 1.0
physY = 1.0
for roi in result.rois:
zStart = None
zEnd = 0
tStart = None
tEnd = 0
x = None
for shape in roi.copyShapes():
if type(shape) == omero.model.RectI:
# check t range and z range for every rectangle
t = shape.getTheT().getValue()
z = shape.getTheZ().getValue()
if tStart is None:
tStart = t
if zStart is None:
zStart = z
tStart = min(t, tStart)
tEnd = max(t, tEnd)
zStart = min(z, zStart)
zEnd = max(z, zEnd)
if x is None: # get x, y, width, height for first rect only
x = int(shape.getX().getValue())
y = int(shape.getY().getValue())
width = int(shape.getWidth().getValue())
height = int(shape.getHeight().getValue())
# if we have found any rectangles at all...
if zStart is not None:
rois.append((x*physX, y*physY, width*physX, height*physY, zStart, zEnd, tStart, tEnd))
return rois
def do_fit(data,rmax,solver,func,guess):
try:
fit = np.zeros(data.shape)
result = np.zeros((data.shape[1],len(guess)))
for c in range(data.shape[1]):
curve,params = fit_correlation(data[:,c],rmax,solver,func,guess)
fit[:,c] = curve
result[c,:] = params
except:
fit = None
result = None
return fit,result
def fit_paircorrelation(data,rmax,expo_fit,expo_guess,\
expogauss_fit,expogauss_guess):
"""
Launches the pair correlation calculation
see 'pair_correlation.py'
@param data: the image being processed as a multipage array
@param rmax: the maximum radius used in the correlation
calculation
@param expo_fit: boolean indicating we are fitting an exponential
@param expo_guess: a list of parameters passed to the model
@param expogauss_fit: boolean indicating we are fitting an exponential+gaussian
@param expogauss_guess: a list of parameters passed to the model
"""
if data.any():
efit = eresult = egfit = egresult = None
if expo_fit:
efit,eresult = do_fit(data,rmax,fit_exponential,exponential,expo_guess)
if expogauss_fit:
egfit,egresult = do_fit(data,rmax,fit_exponential_gaussian,\
exponential_gaussian,expogauss_guess)
return efit,eresult,egfit,egresult
def cross_correlate(image_array,sizeC,roi,rmax):
"""
Calculates the pair correlation function for coordinates in
user-defined rectangular region of interest
@param image_array: a generator of image planes
@param sizeC: how many channels in the image (1 or 2)
@param roi: the region of interest being processed
@param rmax: the maximum radius used in the correlation
calculation
"""
sizeZ = 1
sizeT = 1
data = []
r = np.arange(rmax+1)
g = np.zeros((r.shape[0],1))
for z in range(sizeZ):
for c in range(sizeC):
for t in range(sizeT):
data.append(image_array.next())
# roi will be [xmin,xmax,ymin,ymax]
r = [roi[1],roi[1]+roi[3],roi[0],roi[0]+roi[2]]
corr,radius = pc_corr(data[0],data[1],r,rmax)
g[:,0] = corr
return g, radius
def auto_correlate(image_array,sizeC,roi,rmax):
"""
Calculates the pair correlation function for coordinates in
user-defined rectangular region of interest
@param image_array: a generator of image planes
@param sizeC: how many channels in the image (1 or 2)
@param roi: the region of interest being processed
@param rmax: the maximum radius used in the correlation
calculation
"""
sizeZ = 1
sizeT = 1
r = np.arange(rmax+1)
print 'size C',sizeC
for z in range(sizeZ):
g = np.zeros((r.shape[0],sizeC))
for c in range(sizeC):
for t in range(sizeT):
data = image_array.next()
# roi will be [xmin,xmax,ymin,ymax]
r = [roi[1],roi[1]+roi[3],roi[0],roi[0]+roi[2]]
corr,radius = pc_corr(data[:,:],data[:,:],r,rmax)
g[:,c] = corr
return g, radius
def process_data(conn,image,corr_func,rmax,expo_fit,expo_params,expogauss_fit,\
expogauss_params):
"""
Run the processing on each region of interest in the image
@param conn: the BlitzGateWay connection
@param image: the image being processed
@param rmax: the maximum radius used in the pair correlation
calculation
@param model: if we are fitting what model is being used
@param params: a list of parameters passed to the model
"""
imageId = image.getId()
pixels = image.getPrimaryPixels()
imgW = image.getSizeX()
imgH = image.getSizeY()
rois = get_rectangles(conn,imageId)
try:
pix_size = pixels.getPhysicalSizeX()*1000
except:
message = 'No pixel size set on image!'
return message,None
rmax = int(rmax)
for index, r in enumerate(rois):
x, y, w, h, z1, z2, t1, t2 = r
# Bounding box
X = max(x, 0)
Y = max(y, 0)
X2 = min(x + w, imgW)
Y2 = min(y + h, imgH)
W = X2 - X
H = Y2 - Y
if (x, y, w, h) != (X, Y, W, H):
print "\nCropping ROI (x, y, w, h) %s to be within image."\
" New ROI: %s" % ((x, y, w, h), (X, Y, W, H))
rois[index] = (X, Y, W, H, z1, z2, t1, t2)
print "rois"
print rois
if len(rois) == 0:
print "No rectangular ROIs found for image ID: %s" % imageId
return
output = []
for r in rois:
x, y, w, h, z1, z2, t1, t2 = r
print " ROI x: %s y: %s w: %s h: %s z1: %s z2: %s t1: %s t2: %s"\
% (x, y, w, h, z1, z2, t1, t2)
sizeC = image.getSizeC()
z, t = 0, 0
zctList = [(z, c, t) for c in range(sizeC)]
planes = pixels.getPlanes(zctList)
def plane_gen():
for i,p in enumerate(planes):
yield p
pair_corr = {}
if 'auto' in corr_func:
g, radius = auto_correlate(plane_gen(), sizeC, r, rmax)
elif 'cross' in corr_func:
g, radius = cross_correlate(plane_gen(), sizeC, r, rmax)
pair_corr['correlation'] = g
pair_corr['radius'] = np.reshape(radius,(radius.shape[0],1))*pix_size
if expo_fit or expogauss_fit:
ecurve,eresults,egcurve,egresults = fit_paircorrelation(g,radius*pix_size,\
expo_fit,expo_params,\
expogauss_fit,expogauss_params)
pair_corr['exponential_fit'] = ecurve
pair_corr['exponential_params'] = eresults
pair_corr['exponential+gaussian_fit'] = egcurve
pair_corr['exponential+gaussiam_params'] = egresults
else:
pair_corr['exponential_fit'] = None
pair_corr['exponential_params'] = None
pair_corr['exponential+gaussian_fit'] = None
pair_corr['exponential+gaussiam_params'] = None
output.append(pair_corr)
message = 'Successfully ran pair-correlation. '
return message,output
def run_processing(conn,script_params):
"""
Collects params and starts the processing
@param conn: the BlitzGateWay connection
@param script_params: the parameters collected from the script input
"""
file_anns = []
message = ""
rmax = script_params['Max_radius']
expo_fit = script_params['Fit_exponential_model']
expo_params = [script_params['Exponential_baseline'],\
script_params['Exponential_amplitude'],\
script_params['Exponential_decay']]
expogauss_fit = script_params['Fit_exponential+gaussian_model']
expogauss_params = [script_params['Density'],script_params['PSF'],\
script_params['Amplitude'],script_params['Decay'],\
script_params['Baseline']]
image_ids = script_params['IDs']
for image in conn.getObjects("Image",image_ids):
if not image:
message = 'Could not find specified image'
return message
image_id = image.getId()
sizeC = image.getSizeC()
corr_func = script_params['Pair_correlation']
if ('cross' in corr_func) and (sizeC != 2):
return 'image should have two channels to cross-correlate'
message,output = process_data(conn,image,corr_func,rmax,expo_fit,expo_params,\
expogauss_fit,expogauss_params)
if output:
file_name = "image%s_%s_correlation.csv" % (image_id,corr_func)
with file(file_name, 'w') as outfile:
outfile.write('# auto correlation data for %s ROIs and %s channels: \n' %\
(len(output), output[0]['correlation'].shape[1] ))
for r, pair_corr in enumerate(output):
header = 'radius,'
data = np.concatenate((pair_corr['radius'],pair_corr['correlation']),axis=1)
for i in range(pair_corr['correlation'].shape[1]):
header += 'correlation,'
outfile.write('# Region of interest %s\n' % r)
if pair_corr['exponential_fit'] is not None:
outfile.write('exponential fit params for ROI %s: \n' % r)
outfile.write('Baseline: %s \n' % pair_corr['exponential_params'][:,0])
outfile.write('Amplitude: %s \n' % pair_corr['exponential_params'][:,1])
outfile.write('Decay: %s \n' % pair_corr['exponential_params'][:,2])
for i in range(pair_corr['exponential_fit'].shape[1]):
header += 'fit,'
data = np.concatenate((data,pair_corr['exponential_fit']),axis=1)
if pair_corr['exponential+gaussian_fit'] is not None:
outfile.write('exponential+gaussian fit params for ROI %s: \n' % r)
outfile.write('Density: %s \n' % pair_corr['exponential+gaussiam_params'][:,0])
outfile.write('PSF: %s \n' % pair_corr['exponential+gaussiam_params'][:,1])
outfile.write('Amplitude: %s \n' % pair_corr['exponential+gaussiam_params'][:,2])
outfile.write('Decay: %s \n' % pair_corr['exponential+gaussiam_params'][:,3])
outfile.write('Baseline: %s \n' % pair_corr['exponential+gaussiam_params'][:,4])
for i in range(pair_corr['exponential+gaussian_fit'].shape[1]):
header += 'fit,'
data = np.concatenate((data,pair_corr['exponential+gaussian_fit']),axis=1)
outfile.write(header[:-1] + '\n')
np.savetxt(outfile, data, fmt='%-7.2f', delimiter=',', newline='\n')
new_file_ann, faMessage = script_util.createLinkFileAnnotation(
conn, file_name, image, output="Pair correlation csv file",
mimetype="text/csv", desc=None)
if new_file_ann:
file_anns.append(new_file_ann)
if not file_anns:
faMessage = "No Analysis files created. See 'Info' or 'Error' for"\
" more details"
elif len(file_anns) > 1:
faMessage = "Created %s csv (Excel) files" % len(file_anns)
message += faMessage
if script_params['Email_Results'] and file_anns:
email_results(conn,script_params,image_ids,file_anns)
return message
def run_as_script():
"""
The main entry point of the script, as called by the client via the scripting service,
passing the required parameters.
"""
dataTypes = [rstring('Image')]
model = [rstring('Exponential')]#,rstring('Gaussian+Exponential',rstring('Cosine+Exponential')]
pc_corr = [rstring('auto'),rstring('cross')]
client = scripts.client('Pair_Correlation_Function.py', """This script calculates the
pair (auto or cross) correlation function for ROIs on PALM/STORM images.
This script should only be run on super resolved images where the
physical pixel size has been set (e.g. CZI or OME-TIFF files).
This script uses code, translated to Python, that was provided in:
"Correlation Functions Quantify Super-Resolution Images
and Estimate Apparent Clustering Due to Over-Counting"
Veatch et al, PlosONE, DOI: 10.1371/journal.pone.0031457
This paper should be referenced in any publication that
results from the use of this script.""",
scripts.String("Data_Type", optional=False, grouping="01",
description="Choose source of images (only Image supported)", values=dataTypes, default="Image"),
scripts.List("IDs", optional=False, grouping="02",
description="ID of super resolved image to process"),
scripts.String("Pair_correlation", optional=False, grouping="03",
description="Choose the type of pair correlation to perform", values=pc_corr, default="auto"),
scripts.Int("Max_radius", optional=False, grouping="04",
description="Maximum distance scale for calculation (in pixels)", default=50),
scripts.Bool("Fit_exponential_model", grouping="05", default=True,
description="Choose model to fit to correlation data"),
scripts.Int("Exponential_amplitude", optional=False, grouping="05.1",
description="Amplitude of exponential", default=30),
scripts.Int("Exponential_decay", optional=False, grouping="05.2",
description="Decay length of exponential (in nm)",default=80),
scripts.Int("Exponential_baseline", optional=False, grouping="05.3",
description="Baseline of exponential",default=1),
scripts.Bool("Fit_exponential+gaussian_model", grouping="06", default=False,
description="Choose model to fit to correlation data"),
scripts.Int("Density", optional=False, grouping="06.1",
description="Surface density of probe (1/um^2)", default=1),
scripts.Int("PSF", optional=False, grouping="06.2",
description="sqrt(2)*PSF of the image (nm)", default=30),
scripts.Int("Amplitude", optional=False, grouping="06.3",
description="Amplitude of exponential", default=30),
scripts.Int("Decay", optional=False, grouping="06.4",
description="Decay length of exponential (in nm)",default=80),
scripts.Int("Baseline", optional=False, grouping="06.5",
description="Baseline of exponential",default=1),
scripts.Bool(
"Email_Results", grouping="07", default=False,
description="E-mail the results"),
scripts.String("Email_address", grouping="07.1",
description="Specify e-mail address"),
authors = ["Daniel Matthews", "QBI"],
institutions = ["University of Queensland"],
contact = "d.matthews1@uq.edu.au",
)
try:
# process the list of args above.
scriptParams = {}
for key in client.getInputKeys():
if client.getInput(key):
scriptParams[key] = client.getInput(key, unwrap=True)
print scriptParams
# wrap client to use the Blitz Gateway
conn = BlitzGateway(client_obj=client)
if scriptParams['Email_Results'] and not validate_email(conn, scriptParams):
client.setOutput("Message", rstring("No valid email address"))
return
# process images in Datasets
message = run_processing(conn, scriptParams)
client.setOutput("Message", rstring(message))
#client.setOutput("Message", rstring("No plates created. See 'Error' or 'Info' for details"))
finally:
client.closeSession()
if __name__ == "__main__":
run_as_script()
|
QBI-Microscopy/omero-user-scripts
|
PALM-STORM/Pair_Correlation_Function.py
|
Python
|
gpl-2.0
| 20,571
|
[
"Gaussian"
] |
b66af0fb6d54e61edc8abbf5cf5903d08bbade6053b1c8b3b2050bca69b6a197
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# restartfe - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Restart frontend"""
import shared.returnvalues as returnvalues
from shared.findtype import is_owner
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.handlers import correct_handler
from shared.init import initialize_main_variables
from shared.resadm import stop_resource, start_resource
def signature():
"""Signature of the main function"""
defaults = {'unique_resource_name': REJECT_UNSET}
return ['text', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id)
output_objects.append({'object_type': 'text', 'text'
: '--------- Trying to RESTART frontend ----------'
})
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
if not correct_handler('POST'):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Only accepting POST requests to prevent unintended updates'})
return (output_objects, returnvalues.CLIENT_ERROR)
unique_resource_name = accepted['unique_resource_name'][-1]
logger.info('%s attempts to restart frontend at %s', client_id,
unique_resource_name)
if not is_owner(client_id, unique_resource_name,
configuration.resource_home, logger):
output_objects.append({'object_type': 'error_text', 'text'
: 'You must be an owner of '
+ unique_resource_name
+ ' to restart the resource frontend!'})
return (output_objects, returnvalues.CLIENT_ERROR)
(status, msg) = stop_resource(unique_resource_name,
configuration.resource_home, logger)
if not status:
output_objects.append({'object_type': 'error_text', 'text'
: '%s. Error stopping resource' % msg})
return (output_objects, returnvalues.CLIENT_ERROR)
(status, msg2) = start_resource(unique_resource_name,
configuration.resource_home,
configuration.migserver_https_sid_url,
logger)
if not status:
output_objects.append({'object_type': 'error_text', 'text'
: '%s. Error starting resource' % msg})
return (output_objects, returnvalues.CLIENT_ERROR)
# everything ok
output_objects.append({'object_type': 'text', 'text'
: 'Stop output: %s, Start output %s' % (msg,
msg2)})
return (output_objects, returnvalues.OK)
|
heromod/migrid
|
mig/shared/functionality/restartfe.py
|
Python
|
gpl-2.0
| 3,973
|
[
"Brian"
] |
45d5df440cce7bd5cd436c4a2a536ae0b14c3c0221d72c0e465ffbe80875dbcd
|
#!/usr/local/bin/env python
#=============================================================================================
# Analyze datafiles produced by YANK.
#=============================================================================================
#=============================================================================================
# REQUIREMENTS
#
# The netcdf4-python module is now used to provide netCDF v4 support:
# http://code.google.com/p/netcdf4-python/
#
# This requires NetCDF with version 4 and multithreading support, as well as HDF5.
#=============================================================================================
#=============================================================================================
# TODO
#=============================================================================================
#=============================================================================================
# CHAGELOG
#=============================================================================================
#=============================================================================================
# VERSION CONTROL INFORMATION
#=============================================================================================
#=============================================================================================
# IMPORTS
#=============================================================================================
import os
import os.path
import sys
import math
import numpy
import netCDF4 as netcdf # netcdf4-python
from pymbar import MBAR # multistate Bennett acceptance ratio
import timeseries # for statistical inefficiency analysis
import simtk.unit as units
#=============================================================================================
# PARAMETERS
#=============================================================================================
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
#=============================================================================================
# SUBROUTINES
#=============================================================================================
def write_file(filename, contents):
"""Write the specified contents to a file.
ARGUMENTS
filename (string) - the file to be written
contents (string) - the contents of the file to be written
"""
outfile = open(filename, 'w')
if type(contents) == list:
for line in contents:
outfile.write(line)
elif type(contents) == str:
outfile.write(contents)
else:
raise "Type for 'contents' not supported: " + repr(type(contents))
outfile.close()
return
def read_file(filename):
"""Read contents of the specified file.
ARGUMENTS
filename (string) - the name of the file to be read
RETURNS
lines (list of strings) - the contents of the file, split by line
"""
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
return lines
def initialize_netcdf(netcdf_file, title, natoms, is_periodic = False, has_velocities = False):
"""Initialize the given NetCDF file according to the AMBER NetCDF Convention Version 1.0, Revision B.
ARGUMENTS
netcdf_file (NetCDFFile object) - the file to initialize global attributes, dimensions, and variables for
title (string) - the title for the netCDF file
natoms (integer) - the number of atoms in the trajectories to be written
is_periodic (boolean) - if True, box coordinates will also be stored
has_velocities (boolean) - if True, the velocity trajectory variables will also be created
NOTES
The AMBER NetCDF convention is defined here:
http://amber.scripps.edu/netcdf/nctraj.html
"""
# Create dimensions.
netcdf_file.createDimension('frame', 0) # unlimited number of frames in trajectory
netcdf_file.createDimension('spatial', 3) # number of spatial coordinates
netcdf_file.createDimension('atom', natoms) # number of atoms in the trajectory
netcdf_file.createDimension('label', 5) # label lengths for cell dimensions
netcdf_file.createDimension('cell_spatial', 3) # naming conventions for cell spatial dimensions
netcdf_file.createDimension('cell_angular', 3) # naming conventions for cell angular dimensions
# Set attributes.
setattr(netcdf_file, 'title', title)
setattr(netcdf_file, 'application', 'AMBER')
setattr(netcdf_file, 'program', 'sander')
setattr(netcdf_file, 'programVersion', '8')
setattr(netcdf_file, 'Conventions', 'AMBER')
setattr(netcdf_file, 'ConventionVersion', '1.0')
# Define variables to store unit cell data, if specified.
if is_periodic:
cell_spatial = netcdf_file.createVariable('cell_spatial', 'c', ('cell_spatial',))
cell_angular = netcdf_file.createVariable('cell_angular', 'c', ('cell_spatial', 'label'))
cell_lengths = netcdf_file.createVariable('cell_lengths', 'd', ('frame', 'cell_spatial'))
setattr(cell_lengths, 'units', 'angstrom')
cell_angles = netcdf_file.createVariable('cell_angles', 'd', ('frame', 'cell_angular'))
setattr(cell_angles, 'units', 'degree')
netcdf_file.variables['cell_spatial'][0] = 'x'
netcdf_file.variables['cell_spatial'][1] = 'y'
netcdf_file.variables['cell_spatial'][2] = 'z'
netcdf_file.variables['cell_angular'][0] = 'alpha'
netcdf_file.variables['cell_angular'][1] = 'beta '
netcdf_file.variables['cell_angular'][2] = 'gamma'
# Define variables to store velocity data, if specified.
if has_velocities:
velocities = netcdf_file.createVariable('velocities', 'd', ('frame', 'atom', 'spatial'))
setattr(velocities, 'units', 'angstrom/picosecond')
setattr(velocities, 'scale_factor', 20.455)
# Define coordinates and snapshot times.
frame_times = netcdf_file.createVariable('time', 'f', ('frame',))
setattr(frame_times, 'units', 'picosecond')
frame_coordinates = netcdf_file.createVariable('coordinates', 'f', ('frame', 'atom', 'spatial'))
setattr(frame_coordinates, 'units', 'angstrom')
# Define optional data not specified in the AMBER NetCDF Convention that we will make use of.
frame_energies = netcdf_file.createVariable('total_energy', 'f', ('frame',))
setattr(frame_energies, 'units', 'kilocalorie/mole')
frame_energies = netcdf_file.createVariable('potential_energy', 'f', ('frame',))
setattr(frame_energies, 'units', 'kilocalorie/mole')
return
def write_netcdf_frame(netcdf_file, frame_index, time = None, coordinates = None, cell_lengths = None, cell_angles = None, total_energy = None, potential_energy = None):
"""Write a NetCDF frame.
ARGUMENTS
netcdf_file (NetCDFFile) - the file to write a frame to
frame_index (integer) - the frame to be written
OPTIONAL ARGUMENTS
time (float) - time of frame (in picoseconds)
coordinates (natom x nspatial NumPy array) - atomic coordinates (in Angstroms)
cell_lengths (nspatial NumPy array) - cell lengths (Angstroms)
cell_angles (nspatial NumPy array) - cell angles (degrees)
total_energy (float) - total energy (kcal/mol)
potential_energy (float) - potential energy (kcal/mol)
"""
if time != None: netcdf_file.variables['time'][frame_index] = time
if coordinates != None: netcdf_file.variables['coordinates'][frame_index,:,:] = coordinates
if cell_lengths != None: netcdf_file.variables['cell_lengths'][frame_index,:] = cell_lengths
if cell_angles != None: netcdf_file.variables['cell_angles'][frame_index,:] = cell_angles
if total_energy != None: netcdf_file.variables['total_energy'][frame_index] = total_energy
if potential_energy != None: netcdf_file.variables['total_energy'][frame_index] = potential_energy
return
def read_amber_energy_frame(infile):
"""Read a frame of energy components from the AMBER energy file.
ARGUMENTS
infile (Python file handle) - the file to read from
RETURNS
energies (Python dict) -- energies[keyword] contains the energy for the corresponding keyword
"""
# number of lines per .ene block
ene_lines_per_block = 10
# energy keys
energy_keys = [
'Nsteps', 'time', 'Etot', 'EKinetic', # L0
'Temp', 'T_solute', 'T_solv', 'Pres_scal_solu', # L1
'Pres_scal_solv', 'BoxX', 'BoxY', 'BoxZ', # L2
'volume', 'pres_X', 'pres_Y', 'pres_Z',
'Pressure', 'EKCoM_x', 'EKCoM_y', 'EKCoM_z',
'EKComTot', 'VIRIAL_x', 'VIRIAL_y', 'VIRIAL_z',
'VIRIAL_tot', 'E_pot', 'E_vdw', 'E_el',
'E_hbon', 'E_bon', 'E_angle', 'E_dih',
'E_14vdw', 'E_14el', 'E_const', 'E_pol',
'AV_permMoment', 'AV_indMoment', 'AV_totMoment', 'Density', 'dV/dlambda'
]
# Read energy block.
energies = dict()
key_index = 0
for line_counter in range(ene_lines_per_block):
line = infile.readline() # read the line
elements = line.split() # split into elements
elements.pop(0) # drop the 'L#' initial element
for element in elements:
key = energy_keys[key_index] # get the key
energies[key] = float(element) # store the energy
key_index += 1 # increment index
return energies
def write_netcdf_replica_trajectories(directory, prefix, title, ncfile):
"""Write out replica trajectories in AMBER NetCDF format.
ARGUMENTS
directory (string) - the directory to write files to
prefix (string) - prefix for replica trajectory files
title (string) - the title to give each NetCDF file
ncfile (NetCDF) - NetCDF file object for input file
"""
# Get current dimensions.
niterations = ncfile.variables['positions'].shape[0]
nstates = ncfile.variables['positions'].shape[1]
natoms = ncfile.variables['positions'].shape[2]
# Write out each replica to a separate file.
for replica in range(nstates):
# Create a new replica file.
output_filename = os.path.join(directory, '%s-%03d.nc' % (prefix, replica))
ncoutfile = netcdf.Dataset(output_filename, 'w')
initialize_netcdf(ncoutfile, title + " (replica %d)" % replica, natoms)
for iteration in range(niterations):
coordinates = numpy.array(ncfile.variables['positions'][iteration,replica,:,:])
coordinates *= 10.0 # convert nm to angstroms
write_netcdf_frame(ncoutfile, iteration, time = 1.0 * iteration, coordinates = coordinates)
ncoutfile.close()
return
def compute_torsion_trajectories(ncfile, filename):
"""Write out torsion trajectories for Val 111.
ARGUMENTS
ncfile (NetCDF) - NetCDF file object for input file
filename (string) - name of file to be written
"""
atoms = [1735, 1737, 1739, 1741] # N-CA-CB-CG1 of Val 111
# Get current dimensions.
niterations = ncfile.variables['positions'].shape[0]
nstates = ncfile.variables['positions'].shape[1]
natoms = ncfile.variables['positions'].shape[2]
# Compute torsion angle
def compute_torsion(positions, atoms):
# Compute vectors from cross products
vBA = positions[atoms[0],:] - positions[atoms[1],:]
vBC = positions[atoms[2],:] - positions[atoms[1],:]
vCB = positions[atoms[1],:] - positions[atoms[2],:]
vCD = positions[atoms[3],:] - positions[atoms[2],:]
v1 = numpy.cross(vBA,vBC)
v2 = numpy.cross(vCB,vCD)
cos_theta = numpy.dot(v1,v2) / numpy.sqrt(numpy.dot(v1,v1) * numpy.dot(v2,v2))
theta = arccos(cos_theta) * 180.0 / math.pi
return theta
# Compute torsion angles for each replica
contents = ""
for iteration in range(niterations):
for replica in range(nstates):
# Compute torsion
torsion = compute_torsion(array(ncfile.variables['positions'][iteration,replica,:,:]), atoms)
# Write torsion
contents += "%8.1f" % torsion
contents += "\n"
# Write contents.
write_file(filename, contents)
return
def write_pdb_replica_trajectories(basepdb, directory, prefix, title, ncfile, trajectory_by_state=True):
"""Write out replica trajectories as multi-model PDB files.
ARGUMENTS
basepdb (string) - name of PDB file to read atom names and residue information from
directory (string) - the directory to write files to
prefix (string) - prefix for replica trajectory files
title (string) - the title to give each PDB file
ncfile (NetCDF) - NetCDF file object for input file
"""
niterations = ncfile.variables['positions'].shape[0]
nstates = ncfile.variables['positions'].shape[1]
natoms = ncfile.variables['positions'].shape[2]
atom_list=read_pdb(basepdb)
if (len(atom_list) != natoms):
print ("Number of atoms in trajectory (%d) differs from number of atoms in reference PDB (%d)." % (natoms, len(atom_list)))
raise Exception
if trajectory_by_state:
for state_index in range(0,nstates):
print "Working on state %d / %d" % (state_index,nstates)
file_name= "%s-%03d.pdb" % (prefix,state_index)
full_filename=directory+'/'+file_name
outfile = open(full_filename, 'w')
for iteration in range(niterations):
state_indices = ncfile.variables['states'][iteration,:]
replica_index = list(state_indices).index(state_index)
outfile.write('MODEL %4d\n' % (iteration+1))
write_pdb(atom_list,outfile,iteration,replica_index,title,ncfile,trajectory_by_state=True)
outfile.write('ENDMDL\n')
outfile.close()
else:
for replica_index in range(nstates):
print "Working on replica %d / %d" % (replica_index,nstates)
file_name="R-%s-%03d.pdb" % (prefix,replica_index)
full_filename=directory+'/'+file_name
outfile = open(full_filename, 'w')
for iteration in range(niterations):
outfile.write('MODEL %4d\n' % (iteration+1))
write_pdb(atom_list,outfile,iteration,replica_index,title,ncfile,trajectory_by_state=False)
outfile.write('ENDMDL\n')
outfile.close()
return
def read_pdb(filename):
"""
Read the contents of a PDB file.
ARGUMENTS
filename (string) - name of the file to be read
RETURNS
atoms (list of dict) - atoms[index] is a dict of fields for the ATOM residue
"""
# Read the PDB file into memory.
pdbfile = open(filename, 'r')
# Extract the ATOM entries.
# Format described here: http://bmerc-www.bu.edu/needle-doc/latest/atom-format.html
atoms = list()
for line in pdbfile:
if line[0:6] == "ATOM ":
# Parse line into fields.
atom = dict()
atom["serial"] = line[6:11]
atom["atom"] = line[12:16]
atom["altLoc"] = line[16:17]
atom["resName"] = line[17:20]
atom["chainID"] = line[21:22]
atom["Seqno"] = line[22:26]
atom["iCode"] = line[26:27]
atom["x"] = line[30:38]
atom["y"] = line[38:46]
atom["z"] = line[46:54]
atom["occupancy"] = line[54:60]
atom["tempFactor"] = line[60:66]
atoms.append(atom)
# Close PDB file.
pdbfile.close()
# Return dictionary of present residues.
return atoms
def write_pdb(atoms, filename, iteration, replica, title, ncfile,trajectory_by_state=True):
"""Write out replica trajectories as multi-model PDB files.
ARGUMENTS
atoms (list of dict) - parsed PDB file ATOM entries from read_pdb() - WILL BE CHANGED
filename (string) - name of PDB file to be written
title (string) - the title to give each PDB file
ncfile (NetCDF) - NetCDF file object for input file
"""
# Extract coordinates to be written.
coordinates = numpy.array(ncfile.variables['positions'][iteration,replica,:,:])
coordinates *= 10.0 # convert nm to angstroms
# Create file.
#outfile = open(filename, 'w')
# Write ATOM records.
for (index, atom) in enumerate(atoms):
atom["x"] = "%8.3f" % coordinates[index,0]
atom["y"] = "%8.3f" % coordinates[index,1]
atom["z"] = "%8.3f" % coordinates[index,2]
filename.write('ATOM %(serial)5s %(atom)4s%(altLoc)c%(resName)3s %(chainID)c%(Seqno)5s %(x)8s%(y)8s%(z)8s\n' % atom)
# Close file.
#outfile.close()
return
def write_crd(filename, iteration, replica, title, ncfile):
"""
Write out AMBER format CRD file.
"""
# Extract coordinates to be written.
coordinates = numpy.array(ncfile.variables['positions'][iteration,replica,:,:])
coordinates *= 10.0 # convert nm to angstroms
# Create file.
outfile = open(filename, 'w')
# Write title.
outfile.write(title + '\n')
# Write number of atoms.
natoms = ncfile.variables['positions'].shape[2]
outfile.write('%6d\n' % natoms)
# Write coordinates.
for index in range(natoms):
outfile.write('%12.7f%12.7f%12.7f' % (coordinates[index,0], coordinates[index,1], coordinates[index,2]))
if ((index+1) % 2 == 0): outfile.write('\n')
# Close file.
outfile.close()
def show_mixing_statistics(ncfile, cutoff=0.05, nequil=0):
"""
Print summary of mixing statistics.
ARGUMENTS
ncfile (netCDF4.Dataset) - NetCDF file
OPTIONAL ARGUMENTS
cutoff (float) - only transition probabilities above 'cutoff' will be printed (default: 0.05)
nequil (int) - if specified, only samples nequil:end will be used in analysis (default: 0)
"""
# Get dimensions.
niterations = ncfile.variables['states'].shape[0]
nstates = ncfile.variables['states'].shape[1]
# Compute statistics of transitions.
Nij = numpy.zeros([nstates,nstates], numpy.float64)
for iteration in range(nequil, niterations-1):
for ireplica in range(nstates):
istate = ncfile.variables['states'][iteration,ireplica]
jstate = ncfile.variables['states'][iteration+1,ireplica]
Nij[istate,jstate] += 0.5
Nij[jstate,istate] += 0.5
Tij = numpy.zeros([nstates,nstates], numpy.float64)
for istate in range(nstates):
Tij[istate,:] = Nij[istate,:] / Nij[istate,:].sum()
# Print observed transition probabilities.
print "Cumulative symmetrized state mixing transition matrix:"
print "%6s" % "",
for jstate in range(nstates):
print "%6d" % jstate,
print ""
for istate in range(nstates):
print "%-6d" % istate,
for jstate in range(nstates):
P = Tij[istate,jstate]
if (P >= cutoff):
print "%6.3f" % P,
else:
print "%6s" % "",
print ""
# Estimate second eigenvalue and equilibration time.
mu = numpy.linalg.eigvals(Tij)
mu = -numpy.sort(-mu) # sort in descending order
if (mu[1] >= 1):
print "Perron eigenvalue is unity; Markov chain is decomposable."
else:
print "Perron eigenvalue is %9.5f; state equilibration timescale is ~ %.1f iterations" % (mu[1], 1.0 / (1.0 - mu[1]))
return
def analyze_acceptance_probabilities(ncfile, cutoff = 0.4):
"""Analyze acceptance probabilities.
ARGUMENTS
ncfile (NetCDF) - NetCDF file to be analyzed.
OPTIONAL ARGUMENTS
cutoff (float) - cutoff for showing acceptance probabilities as blank (default: 0.4)
"""
# Get current dimensions.
niterations = ncfile.variables['mixing'].shape[0]
nstates = ncfile.variables['mixing'].shape[1]
# Compute mean.
mixing = ncfile.variables['mixing'][:,:,:]
Pij = mean(mixing, 0)
# Write title.
print "Average state-to-state acceptance probabilities"
print "(Probabilities less than %(cutoff)f shown as blank.)" % vars()
print ""
# Write header.
print "%4s" % "",
for j in range(nstates):
print "%6d" % j,
print ""
# Write rows.
for i in range(nstates):
print "%4d" % i,
for j in range(nstates):
if Pij[i,j] > cutoff:
print "%6.3f" % Pij[i,j],
else:
print "%6s" % "",
print ""
return
def check_energies(ncfile, atoms):
"""
Examine energy history for signs of instability (nans).
ARGUMENTS
ncfile (NetCDF) - input YANK netcdf file
"""
# Get current dimensions.
niterations = ncfile.variables['energies'].shape[0]
nstates = ncfile.variables['energies'].shape[1]
# Extract energies.
print "Reading energies..."
energies = ncfile.variables['energies']
u_kln_replica = numpy.zeros([nstates, nstates, niterations], numpy.float64)
for n in range(niterations):
u_kln_replica[:,:,n] = energies[n,:,:]
print "Done."
# Deconvolute replicas
print "Deconvoluting replicas..."
u_kln = numpy.zeros([nstates, nstates, niterations], numpy.float64)
for iteration in range(niterations):
state_indices = ncfile.variables['states'][iteration,:]
u_kln[state_indices,:,iteration] = energies[iteration,:,:]
print "Done."
# Show all self-energies
show_self_energies = False
if (show_self_energies):
print 'all self-energies for all replicas'
for iteration in range(niterations):
for replica in range(nstates):
state = int(ncfile.variables['states'][iteration,replica])
print '%12.1f' % energies[iteration, replica, state],
print ''
# If no energies are 'nan', we're clean.
if not numpy.any(numpy.isnan(energies[:,:,:])):
return
# There are some energies that are 'nan', so check if the first iteration has nans in their *own* energies:
u_k = numpy.diag(energies[0,:,:])
if numpy.any(numpy.isnan(u_k)):
print "First iteration has exploded replicas. Check to make sure structures are minimized before dynamics"
print "Energies for all replicas after equilibration:"
print u_k
sys.exit(1)
# There are some energies that are 'nan' past the first iteration. Find the first instances for each replica and write PDB files.
first_nan_k = numpy.zeros([nstates], numpy.int32)
for iteration in range(niterations):
for k in range(nstates):
if numpy.isnan(energies[iteration,k,k]) and first_nan_k[k]==0:
first_nan_k[k] = iteration
if not all(first_nan_k == 0):
print "Some replicas exploded during the simulation."
print "Iterations where explosions were detected for each replica:"
print first_nan_k
print "Writing PDB files immediately before explosions were detected..."
for replica in range(nstates):
if (first_nan_k[replica] > 0):
state = ncfile.variables['states'][iteration,replica]
iteration = first_nan_k[replica] - 1
filename = 'replica-%d-before-explosion.pdb' % replica
title = 'replica %d state %d iteration %d' % (replica, state, iteration)
write_pdb(atoms, filename, iteration, replica, title, ncfile)
filename = 'replica-%d-before-explosion.crd' % replica
write_crd(filename, iteration, replica, title, ncfile)
sys.exit(1)
# There are some energies that are 'nan', but these are energies at foreign lambdas. We'll just have to be careful with MBAR.
# Raise a warning.
print "WARNING: Some energies at foreign lambdas are 'nan'. This is recoverable."
return
def check_positions(ncfile):
"""Make sure no positions have gone 'nan'.
ARGUMENTS
ncfile (NetCDF) - NetCDF file object for input file
"""
# Get current dimensions.
niterations = ncfile.variables['positions'].shape[0]
nstates = ncfile.variables['positions'].shape[1]
natoms = ncfile.variables['positions'].shape[2]
# Compute torsion angles for each replica
for iteration in range(niterations):
for replica in range(nstates):
# Extract positions
positions = numpy.array(ncfile.variables['positions'][iteration,replica,:,:])
# Check for nan
if numpy.any(numpy.isnan(positions)):
# Nan found -- raise error
print "Iteration %d, state %d - nan found in positions." % (iteration, replica)
# Report coordinates
for atom_index in range(natoms):
print "%16.3f %16.3f %16.3f" % (positions[atom_index,0], positions[atom_index,1], positions[atom_index,2])
if numpy.any(numpy.isnan(positions[atom_index,:])):
raise "nan detected in positions"
return
def estimate_free_energies(ncfile, ndiscard = 0, nuse = None):
"""Estimate free energies of all alchemical states.
ARGUMENTS
ncfile (NetCDF) - input YANK netcdf file
OPTIONAL ARGUMENTS
ndiscard (int) - number of iterations to discard to equilibration
nuse (int) - maximum number of iterations to use (after discarding)
TODO: Automatically determine 'ndiscard'.
"""
# Get current dimensions.
niterations = ncfile.variables['energies'].shape[0]
nstates = ncfile.variables['energies'].shape[1]
natoms = ncfile.variables['energies'].shape[2]
# Extract energies.
print "Reading energies..."
energies = ncfile.variables['energies']
u_kln_replica = numpy.zeros([nstates, nstates, niterations], numpy.float64)
for n in range(niterations):
u_kln_replica[:,:,n] = energies[n,:,:]
print "Done."
# Deconvolute replicas
print "Deconvoluting replicas..."
u_kln = numpy.zeros([nstates, nstates, niterations], numpy.float64)
for iteration in range(niterations):
state_indices = ncfile.variables['states'][iteration,:]
u_kln[state_indices,:,iteration] = energies[iteration,:,:]
print "Done."
# Compute total negative log probability over all iterations.
u_n = numpy.zeros([niterations], numpy.float64)
for iteration in range(niterations):
u_n[iteration] = numpy.sum(numpy.diagonal(u_kln[:,:,iteration]))
#print u_n
# DEBUG
outfile = open('u_n.out', 'w')
for iteration in range(niterations):
outfile.write("%8d %24.3f\n" % (iteration, u_n[iteration]))
outfile.close()
# Discard initial data to equilibration.
u_kln_replica = u_kln_replica[:,:,ndiscard:]
u_kln = u_kln[:,:,ndiscard:]
u_n = u_n[ndiscard:]
# Truncate to number of specified conforamtions to use
if (nuse):
u_kln_replica = u_kln_replica[:,:,0:nuse]
u_kln = u_kln[:,:,0:nuse]
u_n = u_n[0:nuse]
# Subsample data to obtain uncorrelated samples
N_k = numpy.zeros(nstates, numpy.int32)
indices = timeseries.subsampleCorrelatedData(u_n) # indices of uncorrelated samples
#print u_n # DEBUG
#indices = range(0,u_n.size) # DEBUG - assume samples are uncorrelated
N = len(indices) # number of uncorrelated samples
N_k[:] = N
u_kln[:,:,0:N] = u_kln[:,:,indices]
print "number of uncorrelated samples:"
print N_k
print ""
#===================================================================================================
# Estimate free energy difference with MBAR.
#===================================================================================================
# Initialize MBAR (computing free energy estimates, which may take a while)
print "Computing free energy differences..."
mbar = MBAR(u_kln, N_k, verbose = False, method = 'self-consistent-iteration', maximum_iterations = 50000) # use slow self-consistent-iteration (the default)
#mbar = MBAR(u_kln, N_k, verbose = True, method = 'Newton-Raphson') # use faster Newton-Raphson solver
# Get matrix of dimensionless free energy differences and uncertainty estimate.
print "Computing covariance matrix..."
(Deltaf_ij, dDeltaf_ij) = mbar.getFreeEnergyDifferences(uncertainty_method='svd-ew')
# # Matrix of free energy differences
print "Deltaf_ij:"
for i in range(nstates):
for j in range(nstates):
print "%8.3f" % Deltaf_ij[i,j],
print ""
# print Deltaf_ij
# # Matrix of uncertainties in free energy difference (expectations standard deviations of the estimator about the true free energy)
print "dDeltaf_ij:"
for i in range(nstates):
for j in range(nstates):
print "%8.3f" % dDeltaf_ij[i,j],
print ""
# Return free energy differences and an estimate of the covariance.
return (Deltaf_ij, dDeltaf_ij)
def estimate_enthalpies(ncfile, ndiscard = 0, nuse = None):
"""Estimate enthalpies of all alchemical states.
ARGUMENTS
ncfile (NetCDF) - input YANK netcdf file
OPTIONAL ARGUMENTS
ndiscard (int) - number of iterations to discard to equilibration
nuse (int) - number of iterations to use (after discarding)
TODO: Automatically determine 'ndiscard'.
TODO: Combine some functions with estimate_free_energies.
"""
# Get current dimensions.
niterations = ncfile.variables['energies'].shape[0]
nstates = ncfile.variables['energies'].shape[1]
natoms = ncfile.variables['energies'].shape[2]
# Extract energies.
print "Reading energies..."
energies = ncfile.variables['energies']
u_kln_replica = numpy.zeros([nstates, nstates, niterations], numpy.float64)
for n in range(niterations):
u_kln_replica[:,:,n] = energies[n,:,:]
print "Done."
# Deconvolute replicas
print "Deconvoluting replicas..."
u_kln = numpy.zeros([nstates, nstates, niterations], numpy.float64)
for iteration in range(niterations):
state_indices = ncfile.variables['states'][iteration,:]
u_kln[state_indices,:,iteration] = energies[iteration,:,:]
print "Done."
# Compute total negative log probability over all iterations.
u_n = numpy.zeros([niterations], numpy.float64)
for iteration in range(niterations):
u_n[iteration] = numpy.sum(numpy.diagonal(u_kln[:,:,iteration]))
#print u_n
# DEBUG
outfile = open('u_n.out', 'w')
for iteration in range(niterations):
outfile.write("%8d %24.3f\n" % (iteration, u_n[iteration]))
outfile.close()
# Discard initial data to equilibration.
u_kln_replica = u_kln_replica[:,:,ndiscard:]
u_kln = u_kln[:,:,ndiscard:]
u_n = u_n[ndiscard:]
# Truncate to number of specified conformations to use
if (nuse):
u_kln_replica = u_kln_replica[:,:,0:nuse]
u_kln = u_kln[:,:,0:nuse]
u_n = u_n[0:nuse]
# Subsample data to obtain uncorrelated samples
N_k = numpy.zeros(nstates, numpy.int32)
indices = timeseries.subsampleCorrelatedData(u_n) # indices of uncorrelated samples
#print u_n # DEBUG
#indices = range(0,u_n.size) # DEBUG - assume samples are uncorrelated
N = len(indices) # number of uncorrelated samples
N_k[:] = N
u_kln[:,:,0:N] = u_kln[:,:,indices]
print "number of uncorrelated samples:"
print N_k
print ""
# Compute average enthalpies.
H_k = numpy.zeros([nstates], numpy.float64) # H_i[i] is estimated enthalpy of state i
dH_k = numpy.zeros([nstates], numpy.float64)
for k in range(nstates):
H_k[k] = u_kln[k,k,:].mean()
dH_k[k] = u_kln[k,k,:].std() / numpy.sqrt(N)
return (H_k, dH_k)
def extract_u_n(ncfile):
"""
Extract timeseries of u_n = - log q(x_n)
"""
# Get current dimensions.
niterations = ncfile.variables['energies'].shape[0]
nstates = ncfile.variables['energies'].shape[1]
natoms = ncfile.variables['energies'].shape[2]
# Extract energies.
print "Reading energies..."
energies = ncfile.variables['energies']
u_kln_replica = numpy.zeros([nstates, nstates, niterations], numpy.float64)
for n in range(niterations):
u_kln_replica[:,:,n] = energies[n,:,:]
print "Done."
# Deconvolute replicas
print "Deconvoluting replicas..."
u_kln = numpy.zeros([nstates, nstates, niterations], numpy.float64)
for iteration in range(niterations):
state_indices = ncfile.variables['states'][iteration,:]
u_kln[state_indices,:,iteration] = energies[iteration,:,:]
print "Done."
# Compute total negative log probability over all iterations.
u_n = numpy.zeros([niterations], numpy.float64)
for iteration in range(niterations):
u_n[iteration] = numpy.sum(numpy.diagonal(u_kln[:,:,iteration]))
return u_n
def detect_equilibration(A_t):
"""
Automatically detect equilibrated region.
ARGUMENTS
A_t (numpy.array) - timeseries
RETURNS
t (int) - start of equilibrated data
g (float) - statistical inefficiency of equilibrated data
Neff_max (float) - number of uncorrelated samples
"""
T = A_t.size
# Special case if timeseries is constant.
if A_t.std() == 0.0:
return (0, 1, T)
g_t = numpy.ones([T-1], numpy.float32)
Neff_t = numpy.ones([T-1], numpy.float32)
for t in range(T-1):
g_t[t] = timeseries.statisticalInefficiency(A_t[t:T])
Neff_t[t] = (T-t+1) / g_t[t]
Neff_max = Neff_t.max()
t = Neff_t.argmax()
g = g_t[t]
return (t, g, Neff_max)
#=============================================================================================
# MAIN AND TESTS
#=============================================================================================
if __name__ == '__main__':
import doctest
doctest.testmod()
|
choderalab/brokenyank
|
src/yank/analysis.py
|
Python
|
lgpl-3.0
| 33,936
|
[
"Amber",
"NetCDF"
] |
858f38099a74df20ebf8aad7f48db26af6229e02ac087d690965835101dc5558
|
skills = [
{
"id" : "0001",
"name" : "Liver of Steel",
"type" : "Passive",
"isPermable" : False,
"effects" :
{
"maximumInebriety" : "+5",
},
},
{
"id" : "0002",
"name" : "Chronic Indigestion",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "0003",
"name" : "The Smile of Mr. A.",
"type" : "Buff",
"mpCost" : 5,
"isPermable" : False,
},
{
"id" : "0004",
"name" : "Arse Shoot",
"type" : "Buff",
"mpCost" : 5,
"isPermable" : False,
},
{
"id" : "0005",
"name" : "Stomach of Steel",
"type" : "Passive",
"isPermable" : False,
"effects" :
{
"maximumFullness" : "+5",
},
},
{
"id" : "0006",
"name" : "Spleen of Steel",
"type" : "Passive",
"isPermable" : False,
"effects" :
{
"maximumSpleen" : "+5",
},
},
{
"id" : "0010",
"name" : "Powers of Observatiogn",
"type" : "Passive",
"effects" :
{
"itemDrop" : "+10%",
},
},
{
"id" : "0011",
"name" : "Gnefarious Pickpocketing",
"type" : "Passive",
"effects" :
{
"meatDrop" : "+10%",
},
},
{
"id" : "0012",
"name" : "Torso Awaregness",
"type" : "Passive",
},
{
"id" : "0013",
"name" : "Gnomish Hardigness",
"type" : "Passive",
"effects" :
{
"maximumHP" : "+5%",
},
},
{
"id" : "0014",
"name" : "Cosmic Ugnderstanding",
"type" : "Passive",
"effects" :
{
"maximumMP" : "+5%",
},
},
{
"id" : "0015",
"name" : "CLEESH",
"type" : "Combat",
"mpCost" : 10,
},
{
"id" : "0019",
"name" : "Transcendent Olfaction",
"type" : "Combat",
"mpCost" : 40,
"isAutomaticallyPermed" : True,
},
{
"id" : "0020",
"name" : "Really Expensive Jewelrycrafting",
"type" : "Passive",
"isPermable" : False,
},
{
"id" : "0021",
"name" : "Lust",
"type" : "Passive",
"isPermable" : False,
"effects" :
{
"combatInitiative" : "+50%",
"spellDamage" : "-5",
"meleeDamage" : "-5",
},
},
{
"id" : "0022",
"name" : "Gluttony",
"type" : "Passive",
"isPermable" : False,
"effects" :
{
"strengthensFood" : True,
"statsPerFight" : "-2",
},
},
{
"id" : "0023",
"name" : "Greed",
"type" : "Passive",
"isPermable" : False,
"effects" :
{
"meatDrop" : "+50%",
"itemDrop" : "-15%",
},
},
{
"id" : "0024",
"name" : "Sloth",
"type" : "Passive",
"isPermable" : False,
"effects" :
{
"damageReduction" : "+8",
"combatInitiative" : "-25%",
},
},
{
"id" : "0025",
"name" : "Wrath",
"type" : "Passive",
"isPermable" : False,
"effects" :
{
"spellDamage" : "+10",
"meleeDamage" : "+10",
"damageReduction" : "-4",
},
},
{
"id" : "0026",
"name" : "Envy",
"type" : "Passive",
"isPermable" : False,
"effects" :
{
"itemDrop" : "+30%",
"meatDrop" : "-25%",
},
},
{
"id" : "0027",
"name" : "Pride",
"type" : "Passive",
"isPermable" : False,
"effects" :
{
"statsPerFight" : "+4",
"weakensFood" : True,
},
},
{
"id" : "0028",
"name" : "Awesome Balls of Fire",
"type" : "Combat",
"mpCost" : 120,
},
{
"id" : "0029",
"name" : "Conjure Relaxing Campfire",
"type" : "Combat",
"mpCost" : 30,
},
{
"id" : "0030",
"name" : "Snowclone",
"type" : "Combat",
"mpCost" : 120,
},
{
"id" : "0031",
"name" : "Maximum Chill",
"type" : "Combat",
"mpCost" : 30,
},
{
"id" : "0032",
"name" : "Eggsplosion",
"type" : "Combat",
"mpCost" : 120,
},
{
"id" : "0033",
"name" : "Mudbath",
"type" : "Combat",
"mpCost" : 30,
},
{
"id" : "0036",
"name" : "Grease Lightning",
"type" : "Combat",
"mpCost" : 120,
},
{
"id" : "0037",
"name" : "Inappropriate Backrub",
"type" : "Combat",
"mpCost" : 30,
},
{
"id" : "0038",
"name" : "Natural Born Scrabbler",
"type" : "Passive",
"effects" :
{
"itemDrop" : "+5%",
},
},
{
"id" : "0039",
"name" : "Thrift and Grift",
"type" : "Passive",
"effects" :
{
"meatDrop" : "+10%",
},
},
{
"id" : "0040",
"name" : "Abs of Tin",
"type" : "Passive",
"effects" :
{
"maximumHP" : "+10%",
},
},
{
"id" : "0041",
"name" : "Marginally Insane",
"type" : "Passive",
"effects" :
{
"maximumMP" : "+10%",
},
},
{
"id" : "0042",
"name" : "Raise Backup Dancer",
"type" : "Combat",
"mpCost" : 120,
},
{
"id" : "0043",
"name" : "Creepy Lullaby",
"type" : "Combat",
"mpCost" : 30,
},
{
"id" : "0044",
"name" : "Rainbow Gravitation",
"type" : "Noncombat",
"mpCost" : 30,
},
{
"id" : "1000",
"name" : "Seal Clubbing Frenzy",
"type" : "Noncombat",
"mpCost" : 1,
},
{
"id" : "1003",
"name" : "Thrust-Smack",
"type" : "Combat",
"mpCost" : 3,
},
{
"id" : "1004",
"name" : "Lunge-Smack",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "1005",
"name" : "Lunging Thrust-Smack",
"type" : "Combat",
"mpCost" : 8,
},
{
"id" : "1006",
"name" : "Super-Advanced Meatsmithing",
"type" : "Passive",
},
{
"id" : "1007",
"name" : "Tongue of the Otter",
"type" : "Noncombat",
"mpCost" : 7,
},
{
"id" : "1008",
"name" : "Hide of the Otter",
"type" : "Passive",
},
{
"id" : "1009",
"name" : "Claws of the Otter",
"type" : "Passive",
},
{
"id" : "1010",
"name" : "Tongue of the Walrus",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "1011",
"name" : "Hide of the Walrus",
"type" : "Passive",
},
{
"id" : "1012",
"name" : "Claws of the Walrus",
"type" : "Passive",
},
{
"id" : "1014",
"name" : "Eye of the Stoat",
"type" : "Passive",
},
{
"id" : "1015",
"name" : "Rage of the Reindeer",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "1016",
"name" : "Pulverize",
"type" : "Passive",
},
{
"id" : "1017",
"name" : "Double-Fisted Skull Smashing",
"type" : "Passive",
},
{
"id" : "1018",
"name" : "Northern Exposure",
"type" : "Passive",
},
{
"id" : "1019",
"name" : "Musk of the Moose",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "1020",
"name" : "Snarl of the Timberwolf",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "2000",
"name" : "Patience of the Tortoise",
"type" : "Noncombat",
"mpCost" : 1,
},
{
"id" : "2003",
"name" : "Headbutt",
"type" : "Combat",
"mpCost" : 3,
},
{
"id" : "2004",
"name" : "Skin of the Leatherback",
"type" : "Passive",
},
{
"id" : "2005",
"name" : "Shieldbutt",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "2006",
"name" : "Armorcraftiness",
"type" : "Passive",
},
{
"id" : "2007",
"name" : "Ghostly Shell",
"type" : "Buff",
"mpCost" : 6,
},
{
"id" : "2008",
"name" : "Reptilian Fortitude",
"type" : "Buff",
"mpCost" : 10,
},
{
"id" : "2009",
"name" : "Empathy of the Newt",
"type" : "Buff",
"mpCost" : 15,
},
{
"id" : "2010",
"name" : "Tenacity of the Snapper",
"type" : "Buff",
"mpCost" : 8,
},
{
"id" : "2011",
"name" : "Wisdom of the Elder Tortoises",
"type" : "Passive",
},
{
"id" : "2012",
"name" : "Astral Shell",
"type" : "Buff",
"mpCost" : 10,
},
{
"id" : "2014",
"name" : "Amphibian Sympathy",
"type" : "Passive",
},
{
"id" : "2015",
"name" : "Kneebutt",
"type" : "Combat",
"mpCost" : 4,
},
{
"id" : "2016",
"name" : "Cold-Blooded Fearlessness",
"type" : "Passive",
},
{
"id" : "2020",
"name" : "Hero of the Half-Shell",
"type" : "Passive",
},
{
"id" : "2021",
"name" : "Tao of the Terrapin",
"type" : "Passive",
},
{
"id" : "2022",
"name" : "Spectral Snapper",
"type" : "Combat",
"mpCost" : 20,
},
{
"id" : "2103",
"name" : "Head + Knee Combo",
"type" : "Combat",
"mpCost" : 8,
},
{
"id" : "2105",
"name" : "Head + Shield Combo",
"type" : "Combat",
"mpCost" : 9,
},
{
"id" : "2106",
"name" : "Knee + Shield Combo",
"type" : "Combat",
"mpCost" : 10,
},
{
"id" : "2107",
"name" : "Head + Knee + Shield Combo",
"type" : "Combat",
"mpCost" : 13,
},
{
"id" : "3000",
"name" : "Manicotti Meditation",
"type" : "Noncombat",
"mpCost" : 1,
},
{
"id" : "3003",
"name" : "Ravioli Shurikens",
"type" : "Combat",
"mpCost" : 4,
},
{
"id" : "3004",
"name" : "Entangling Noodles",
"type" : "Combat",
"mpCost" : 3,
},
{
"id" : "3005",
"name" : "Cannelloni Cannon",
"type" : "Combat",
"mpCost" : 7,
},
{
"id" : "3006",
"name" : "Pastamastery",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "3007",
"name" : "Stuffed Mortar Shell",
"type" : "Combat",
"mpCost" : 19,
},
{
"id" : "3008",
"name" : "Weapon of the Pastalord",
"type" : "Combat",
"mpCost" : 35,
},
{
"id" : "3009",
"name" : "Lasagna Bandages",
"type" : "Combat / Noncombat",
"mpCost" : 6,
},
{
"id" : "3010",
"name" : "Leash of Linguini",
"type" : "Noncombat",
"mpCost" : 12,
},
{
"id" : "3011",
"name" : "Spirit of Rigatoni",
"type" : "Passive",
},
{
"id" : "3012",
"name" : "Cannelloni Cocoon",
"type" : "Noncombat",
"mpCost" : 20,
},
{
"id" : "3014",
"name" : "Spirit of Ravioli",
"type" : "Passive",
},
{
"id" : "3015",
"name" : "Springy Fusilli",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "3016",
"name" : "Tolerance of the Kitchen",
"type" : "Passive",
},
{
"id" : "3017",
"name" : "Flavour of Magic",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "3018",
"name" : "Transcendental Noodlecraft",
"type" : "Passive",
},
{
"id" : "3019",
"name" : "Fearful Fettucini",
"type" : "Combat",
"mpCost" : 35,
},
{
"id" : "3020",
"name" : "Spaghetti Spear",
"type" : "Combat",
"mpCost" : 1,
},
{
"id" : "3101",
"name" : "Spirit of Cayenne",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "3102",
"name" : "Spirit of Peppermint",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "3103",
"name" : "Spirit of Garlic",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "3104",
"name" : "Spirit of Wormwood",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "3105",
"name" : "Spirit of Bacon Grease",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "4000",
"name" : "Sauce Contemplation",
"type" : "Noncombat",
"mpCost" : 1,
},
{
"id" : "4003",
"name" : "Stream of Sauce",
"type" : "Combat",
"mpCost" : 3,
},
{
"id" : "4004",
"name" : "Expert Panhandling",
"type" : "Passive",
},
{
"id" : "4005",
"name" : "Saucestorm",
"type" : "Combat",
"mpCost" : 12,
},
{
"id" : "4006",
"name" : "Advanced Saucecrafting",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "4007",
"name" : "Elemental Saucesphere",
"type" : "Buff",
"mpCost" : 10,
},
{
"id" : "4008",
"name" : "Jalapeno Saucesphere",
"type" : "Buff",
"mpCost" : 5,
},
{
"id" : "4009",
"name" : "Wave of Sauce",
"type" : "Combat",
"mpCost" : 23,
},
{
"id" : "4010",
"name" : "Intrinsic Spiciness",
"type" : "Passive",
},
{
"id" : "4011",
"name" : "Jabanero Saucesphere",
"type" : "Buff",
"mpCost" : 10,
},
{
"id" : "4012",
"name" : "Saucegeyser",
"type" : "Combat",
"mpCost" : 40,
},
{
"id" : "4014",
"name" : "Saucy Salve",
"type" : "Combat",
"mpCost" : 4,
},
{
"id" : "4015",
"name" : "Impetuous Sauciness",
"type" : "Passive",
},
{
"id" : "4016",
"name" : "Diminished Gag Reflex",
"type" : "Passive",
},
{
"id" : "4017",
"name" : "Immaculate Seasoning",
"type" : "Passive",
},
{
"id" : "4018",
"name" : "The Way of Sauce",
"type" : "Passive",
},
{
"id" : "4019",
"name" : "Scarysauce",
"type" : "Buff",
"mpCost" : 10,
},
{
"id" : "4020",
"name" : "Salsaball",
"type" : "Combat",
"mpCost" : 1,
},
{
"id" : "5000",
"name" : "Disco Aerobics",
"type" : "Noncombat",
"mpCost" : 1,
},
{
"id" : "5003",
"name" : "Disco Eye-Poke",
"type" : "Combat",
"mpCost" : 3,
},
{
"id" : "5004",
"name" : "Nimble Fingers",
"type" : "Passive",
},
{
"id" : "5005",
"name" : "Disco Dance of Doom",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "5006",
"name" : "Mad Looting Skillz",
"type" : "Passive",
},
{
"id" : "5007",
"name" : "Disco Nap",
"type" : "Noncombat",
"mpCost" : 8,
},
{
"id" : "5008",
"name" : "Disco Dance II: Electric Boogaloo",
"type" : "Combat",
"mpCost" : 7,
},
{
"id" : "5009",
"name" : "Disco Fever",
"type" : "Passive",
},
{
"id" : "5010",
"name" : "Overdeveloped Sense of Self Preservation",
"type" : "Passive",
},
{
"id" : "5011",
"name" : "Disco Power Nap",
"type" : "Noncombat",
"mpCost" : 12,
},
{
"id" : "5012",
"name" : "Disco Face Stab",
"type" : "Combat",
"mpCost" : 10,
},
{
"id" : "5014",
"name" : "Advanced Cocktailcrafting",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "5015",
"name" : "Ambidextrous Funkslinging",
"type" : "Passive",
},
{
"id" : "5016",
"name" : "Heart of Polyester",
"type" : "Passive",
},
{
"id" : "5017",
"name" : "Smooth Movement",
"type" : "Noncombat",
"mpCost" : 10,
},
{
"id" : "5018",
"name" : "Superhuman Cocktailcrafting",
"type" : "Passive",
},
{
"id" : "5019",
"name" : "Tango of Terror",
"type" : "Combat",
"mpCost" : 8,
},
{
"id" : "6000",
"name" : "Moxie of the Mariachi",
"type" : "Noncombat",
"mpCost" : 1,
},
{
"id" : "6003",
"name" : "Aloysius' Antiphon of Aptitude",
"type" : "Buff",
"mpCost" : 40,
},
{
"id" : "6004",
"name" : "The Moxious Madrigal",
"type" : "Buff",
"mpCost" : 2,
},
{
"id" : "6005",
"name" : "Cletus's Canticle of Celerity",
"type" : "Buff",
"mpCost" : 4,
},
{
"id" : "6006",
"name" : "The Polka of Plenty",
"type" : "Buff",
"mpCost" : 7,
},
{
"id" : "6007",
"name" : "The Magical Mojomuscular Melody",
"type" : "Buff",
"mpCost" : 3,
},
{
"id" : "6008",
"name" : "The Power Ballad of the Arrowsmith",
"type" : "Buff",
"mpCost" : 5,
},
{
"id" : "6009",
"name" : "Brawnee's Anthem of Absorption",
"type" : "Buff",
"mpCost" : 13,
},
{
"id" : "6010",
"name" : "Fat Leon's Phat Loot Lyric",
"type" : "Buff",
"mpCost" : 11,
},
{
"id" : "6011",
"name" : "The Psalm of Pointiness",
"type" : "Buff",
"mpCost" : 15,
},
{
"id" : "6012",
"name" : "Jackasses' Symphony of Destruction",
"type" : "Buff",
"mpCost" : 9,
},
{
"id" : "6013",
"name" : "Stevedave's Shanty of Superiority",
"type" : "Buff",
"mpCost" : 30,
},
{
"id" : "6014",
"name" : "The Ode to Booze",
"type" : "Buff",
"mpCost" : 50,
},
{
"id" : "6015",
"name" : "The Sonata of Sneakiness",
"type" : "Buff",
"mpCost" : 20,
},
{
"id" : "6016",
"name" : "Carlweather's Cantata of Confrontation",
"type" : "Buff",
"mpCost" : 20,
},
{
"id" : "6017",
"name" : "Ur-Kel's Aria of Annoyance",
"type" : "Buff",
"mpCost" : 30,
},
{
"id" : "6018",
"name" : "Dirge of Dreadfulness",
"type" : "Buff",
"mpCost" : 9,
},
{
"id" : "6020",
"name" : "The Ballad of Richie Thingfinder",
"type" : "Buff",
"mpCost" : 50,
},
{
"id" : "6021",
"name" : "Benetton's Medley of Diversity",
"type" : "Buff",
"mpCost" : 50,
},
{
"id" : "6022",
"name" : "Elron's Explosive Etude",
"type" : "Buff",
"mpCost" : 50,
},
{
"id" : "6023",
"name" : "Chorale of Companionship",
"type" : "Buff",
"mpCost" : 50,
},
{
"id" : "6024",
"name" : "Prelude of Precision",
"type" : "Buff",
"mpCost" : 50,
},
{
"id": "6026",
"name": "Donho's Bubbly Ballad",
"type": "Buff",
"mpCost": 75
},
{
"id" : "6028",
"name" : "Inigo's Incantation of Inspiration",
"type" : "Buff",
"mpCost" : 100,
},
{
"id" : "7001",
"name" : "Give In To Your Vampiric Urges",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7002",
"name" : "Shake Hands",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7003",
"name" : "Hot Breath",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "7004",
"name" : "Cold Breath",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "7005",
"name" : "Spooky Breath",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "7006",
"name" : "Stinky Breath",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "7007",
"name" : "Sleazy Breath",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "7008",
"name" : "Moxious Maneuver",
"type" : "Combat",
},
{
"id" : "7009",
"name" : "Magic Missile",
"type" : "Combat",
"mpCost" : 2,
},
{
"id" : "7010",
"name" : "Fire Red Bottle-Rocket",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "7011",
"name" : "Fire Blue Bottle-Rocket",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "7012",
"name" : "Fire Orange Bottle-Rocket",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "7013",
"name" : "Fire Purple Bottle-Rocket",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "7014",
"name" : "Fire Black Bottle-Rocket",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "7015",
"name" : "Creepy Grin",
"type" : "Combat",
"mpCost" : 30,
},
{
"id" : "7016",
"name" : "Start Trash Fire",
"type" : "Combat",
"mpCost" : 100,
},
{
"id" : "7017",
"name" : "Overload Discarded Refrigerator",
"type" : "Combat",
"mpCost" : 100,
},
{
"id" : "7018",
"name" : "Trashquake",
"type" : "Combat",
"mpCost" : 100,
},
{
"id" : "7019",
"name" : "Zombo's Visage",
"type" : "Combat",
"mpCost" : 100,
},
{
"id" : "7020",
"name" : "Hypnotize Hobo",
"type" : "Combat",
"mpCost" : 100,
},
{
"id" : "7021",
"name" : "Ask Richard for a Bandage",
"type" : "Combat",
},
{
"id" : "7022",
"name" : "Ask Richard for a Grenade",
"type" : "Combat",
},
{
"id" : "7023",
"name" : "Ask Richard to Rough the Hobo Up a Bit",
"type" : "Combat",
},
{
"id" : "7024",
"name" : "Summon Mayfly Swarm",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7025",
"name" : "Get a You-Eye View",
"type" : "Combat",
"mpCost" : 30,
},
{
"id" : "7038",
"name" : "Vicious Talon Slash",
"type" : "Combat",
"mpCost" : 5,
},
{
"id" : "7039",
"name" : "All-You-Can-Beat Wing Buffet",
"type" : "Combat",
"mpCost" : 10,
},
{
"id" : "7040",
"name" : "Tunnel Upwards",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7041",
"name" : "Tunnel Downwards",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7042",
"name" : "Rise From Your Ashes",
"type" : "Combat",
"mpCost" : 20,
},
{
"id" : "7043",
"name" : "Antarctic Flap",
"type" : "Combat",
"mpCost" : 10,
},
{
"id" : "7044",
"name" : "The Statue Treatment",
"type" : "Combat",
"mpCost" : 20,
},
{
"id" : "7045",
"name" : "Feast on Carrion",
"type" : "Combat",
"mpCost" : 20,
},
{
"id" : "7046",
"name" : "Give Your Opponent \"The Bird\"",
"type" : "Combat",
"mpCost" : 20,
},
{
"id" : "7047",
"name" : "Ask the hobo for a drink",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7048",
"name" : "Ask the hobo for something to eat",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7049",
"name" : "Ask the hobo for some violence",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7050",
"name" : "Ask the hobo to tell you a joke",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7051",
"name" : "Ask the hobo to dance for you",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7052",
"name" : "Summon hobo underling",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7053",
"name" : "Rouse Sapling",
"type" : "Combat",
"mpCost" : 15,
},
{
"id" : "7054",
"name" : "Spray Sap",
"type" : "Combat",
"mpCost" : 15,
},
{
"id" : "7055",
"name" : "Put Down Roots",
"type" : "Combat",
"mpCost" : 15,
},
{
"id" : "7056",
"name" : "Fire off a Roman Candle",
"type" : "Combat",
"mpCost" : 10,
},
{
"id" : "7061",
"name" : "Spring Raindrop Attack",
"type" : "Combat",
"mpCost" : 0,
},
{
"id" : "7062",
"name" : "Summer Siesta",
"type" : "Combat",
"mpCost" : 10,
},
{
"id" : "7063",
"name" : "Falling Leaf Whirlwind",
"type" : "Combat",
"mpCost" : 10,
},
{
"id" : "7064",
"name" : "Winter's Bite Technique",
"type" : "Combat",
"mpCost" : 10,
},
{
"id" : "7065",
"name" : "The 17 Cuts",
"type" : "Combat",
"mpCost" : 10,
},
{
"id" : "8000",
"name" : "Summon Snowcones",
"type" : "Mystical Bookshelf",
"mpCost" : 5,
},
{
"id" : "8100",
"name" : "Summon Candy Heart",
"type" : "Mystical Bookshelf",
},
{
"id" : "8101",
"name" : "Summon Party Favor",
"type" : "Mystical Bookshelf",
},
{
"id" : "8200",
"name" : "Summon Hilarious Objects",
"type" : "Mystical Bookshelf",
"mpCost" : 5,
},
{
"id" : "8201",
"name" : "Summon \"Tasteful\" Gifts",
"type" : "Mystical Bookshelf",
"mpCost" : 5,
},
]
|
KevZho/buffbot
|
kol/data/Skills.py
|
Python
|
mit
| 27,396
|
[
"MOOSE",
"SIESTA"
] |
1c78c2042432896d542f2987209bd10a6429ae5a62040ff0e8d4d4dd5edb1b12
|
"""Fetch the NASA POWER Dataset.
For now, we just run each Monday for the current year RUN_2AM.sh
"""
import sys
import time
import datetime
import subprocess
from tqdm import tqdm
import requests
import numpy as np
from pyiem import iemre
from pyiem.util import ncopen, logger, exponential_backoff
LOG = logger()
def main(argv):
"""Go Main Go."""
year = int(argv[1])
sts = datetime.date(year, 1, 1)
ets = min([datetime.date(year, 12, 31), datetime.date.today()])
current = {}
now = ets
while now >= sts:
ds = iemre.get_grids(now, varnames="power_swdn")
maxval = ds["power_swdn"].values.max()
if np.isnan(maxval) or maxval < 0:
LOG.debug("adding %s as currently empty", now)
current[now] = {"data": ds, "dirty": False}
now -= datetime.timedelta(days=1)
if not current:
LOG.info("Nothing to be done...")
return
sts = min(list(current.keys()))
ets = max(list(current.keys()))
LOG.debug("running between %s and %s", sts, ets)
queue = []
# 10x10 degree chunk is the max request size...
for x0 in np.arange(iemre.WEST, iemre.EAST, 10.0):
for y0 in np.arange(iemre.SOUTH, iemre.NORTH, 10.0):
queue.append([x0, y0])
for x0, y0 in tqdm(queue, disable=not sys.stdout.isatty()):
url = (
"https://power.larc.nasa.gov/api/temporal/daily/regional?"
"latitude-min=%s&latitude-max=%s&longitude-min=%s&"
"longitude-max=%s¶meters=ALLSKY_SFC_SW_DWN&community=SB&"
"start=%s&end=%s&format=NETCDF"
) % (
y0,
y0 + 9.9,
x0,
x0 + 9.9,
sts.strftime("%Y%m%d"),
ets.strftime("%Y%m%d"),
)
req = exponential_backoff(requests.get, url, timeout=60)
# Can't find docs on how many requests/sec are allowed...
if req is not None and req.status_code == 429:
LOG.debug("Got 429 (too-many-requests), sleeping 60")
time.sleep(60)
req = exponential_backoff(requests.get, url, timeout=60)
if req is None or req.status_code != 200:
LOG.info(
"failed to download %s with %s %s",
url,
"req is none" if req is None else req.status_code,
"req is none" if req is None else req.text,
)
continue
ncfn = f"/tmp/power{year}.nc"
with open(ncfn, "wb") as fh:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
fh.write(chunk)
fh.close()
with ncopen(ncfn) as nc:
for day, _ in enumerate(nc.variables["time"][:]):
date = sts + datetime.timedelta(days=day)
if date not in current:
continue
# W/m2 to MJ/d 86400 / 1e6
data = nc.variables["ALLSKY_SFC_SW_DWN"][day, :, :] * 0.0864
# Sometimes there are missing values?
if np.ma.is_masked(data):
data[data.mask] = np.mean(data)
i, j = iemre.find_ij(x0, y0)
# resample data is 0.5, iemre is 0.125
data = np.repeat(np.repeat(data, 4, axis=0), 4, axis=1)
data = np.where(data < 0, np.nan, data)
shp = np.shape(data)
jslice = slice(j, min([j + shp[0], iemre.NY]))
islice = slice(i, min([i + shp[1], iemre.NX]))
# LOG.debug("islice %s jslice: %s", islice, jslice)
# align grids
data = data[
slice(0, jslice.stop - jslice.start),
slice(0, islice.stop - islice.start),
]
# get currentdata
present = current[date]["data"]["power_swdn"].values[
jslice, islice
]
if present.mean() == data.mean():
continue
current[date]["data"]["power_swdn"].values[
jslice, islice
] = data
current[date]["dirty"] = True
for date in current:
if not current[date]["dirty"]:
continue
LOG.debug("saving %s", date)
iemre.set_grids(date, current[date]["data"])
subprocess.call(
"python ../iemre/db_to_netcdf.py %s"
% (date.strftime("%Y %m %d"),),
shell=True,
)
if __name__ == "__main__":
main(sys.argv)
|
akrherz/iem
|
scripts/dl/fetch_power.py
|
Python
|
mit
| 4,562
|
[
"NetCDF"
] |
93bdb68d9684e3f41e9d1e55ee0f6db61236e533d736df5d819cb71e0717a2d5
|
"""Testing out vtk
"""
import os
import string
import time
import pdb
import numpy as np
import vtk
from vtk.util import numpy_support
from vtk.util.colors import tomato
from vtk.util.misc import vtkGetDataRoot
from point_cloud import wavefront, raycaster
from visualization import graphics
def vtk_example():
# generate a polygon data for a cube
cube = vtk.vtkCubeSource()
# create a mapper for the cube data
cube_mapper = vtk.vtkPolyDataMapper()
cube_mapper.SetInputData(cube.GetOutput())
# connect the mapper to an actor
cube_actor = vtk.vtkActor()
cube_actor.SetMapper(cube_mapper)
cube_actor.GetProperty().SetColor(1.0, 0.0, 0.0)
# render the cube actor
renderer = vtk.vtkRenderer()
renderer.SetBackground(0.0, 0.0, 0.0)
renderer.AddActor(cube_actor)
# create a render window
render_window = vtk.vtkRenderWindow()
render_window.SetWindowName("Simple VTK Scene")
render_window.SetSize(400, 400)
render_window.AddRenderer(renderer)
# create the interactor
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(render_window)
# initialize the interactor
interactor.Initialize()
render_window.Render()
interactor.Start()
def vtk_cylinder():
# create a polygon cylinder
cylinder = vtk.vtkCylinderSource()
cylinder.SetResolution(8)
# the mapper pushes the geometry into the graphics library
cylinderMapper = vtk.vtkPolyDataMapper()
cylinderMapper.SetInputConnection(cylinder.GetOutputPort())
# the actor is a grouping mechanics
cylinderActor = vtk.vtkActor()
cylinderActor.SetMapper(cylinderMapper)
cylinderActor.GetProperty().SetColor(tomato)
cylinderActor.RotateX(30.0)
cylinderActor.RotateY(-45.0)
# create the graphics structure.
# create the render window
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors tot he renderer, set background and size
ren.AddActor(cylinderActor)
ren.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(200, 200)
# initialize the interactor
iren.Initialize()
# zoom in the window
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.5)
renWin.Render()
# start the event loop
iren.Start()
def vtk_distance():
p0 = (0, 0, 0)
p1 = (1, 1, 1)
distSquared = vtk.vtkMath.Distance2BetweenPoints(p0, p1)
dist = np.sqrt(distSquared)
print("p0 = {}".format(p0))
print("p1 = {}".format(p1))
print("distance squared = {}".format(distSquared))
print("distance = {}".format(dist))
def vtk_plywriter():
"""Write to ply writer
"""
filename = '/tmp/writeply.ply'
sphereSource = vtk.vtkSphereSource()
sphereSource.Update()
plyWriter = vtk.vtkPLYWriter()
plyWriter.SetFileName(filename)
plyWriter.SetInputConnection(sphereSource.GetOutputPort())
plyWriter.Write()
# read and display for verification
reader = vtk.vtkPLYReader()
reader.SetFileName(filename)
reader.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(reader.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.SetBackground(0.3, 0.6, 0.3)
renderWindow.Render()
renderWindowInteractor.Start()
def vtk_stlwriter():
filename = '/tmp/test.stl'
sphereSource = vtk.vtkSphereSource()
sphereSource.Update()
# write the stl file to a disk
stlWriter = vtk.vtkSTLWriter()
stlWriter.SetFileName(filename)
stlWriter.SetInputConnection(sphereSource.GetOutputPort())
stlWriter.Write()
# read and display for verification
reader = vtk.vtkSTLReader()
reader.SetFileName(filename)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(reader.GetOutput())
else:
mapper.SetInputConnection(reader.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# assign actor to the renderer
ren.AddActor(actor)
# enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
def vtk_stlreader(filename='/tmp/test.stl'):
reader = vtk.vtkSTLReader()
reader.SetFileName(filename)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(reader.GetOutput())
else:
mapper.SetInputConnection(reader.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(actor)
iren.Initialize()
renWin.Render()
iren.Start()
def reconstruct_surface():
"""Example of constructing a surface from a point cloud
https://github.com/Kitware/VTK/blob/a1a94d0ca96854fe72480cf2ec031a533b129b04/Examples/Modelling/Python/reconstructSurface.py
"""
pointSource = vtk.vtkProgrammableSource()
def readPoints():
output = pointSource.GetPolyDataOutput()
points = vtk.vtkPoints()
output.SetPoints(points)
fname = open('./data/point_clouds/cactus.3337.pts')
line = fname.readline()
while line:
data = line.split()
if data and data[0] == 'p':
x, y, z = float(data[1]), float(data[2]), float(data[3])
points.InsertNextPoint(x, y, z)
line = fname.readline()
pointSource.SetExecuteMethod(readPoints)
surf = vtk.vtkSurfaceReconstructionFilter()
surf.SetInputConnection(pointSource.GetOutputPort())
cf = vtk.vtkContourFilter()
cf.SetInputConnection(surf.GetOutputPort())
cf.SetValue(0, 0.0)
reverse = vtk.vtkReverseSense()
reverse.SetInputConnection(cf.GetOutputPort())
reverse.ReverseCellsOn()
reverse.ReverseNormalsOn()
map = vtk.vtkPolyDataMapper()
map.SetInputConnection(reverse.GetOutputPort())
map.ScalarVisibilityOff()
surfaceActor = vtk.vtkActor()
surfaceActor.SetMapper(map)
surfaceActor.GetProperty().SetDiffuseColor(1, 0.3882, 0.2784)
surfaceActor.GetProperty().SetSpecularColor(1, 1, 1)
surfaceActor.GetProperty().SetSpecular(0.4)
surfaceActor.GetProperty().SetSpecularPower(50)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(surfaceActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(400, 400)
ren.GetActiveCamera().SetFocalPoint(0, 0, 0)
ren.GetActiveCamera().SetPosition(1, 0, 0)
ren.GetActiveCamera().SetViewUp(0, 0, 1)
ren.ResetCamera()
ren.GetActiveCamera().Azimuth(20)
ren.GetActiveCamera().Elevation(30)
ren.GetActiveCamera().Dolly(1.2)
ren.ResetCameraClippingRange()
iren.Initialize()
renWin.Render()
iren.Start()
def step1_cone():
"""Example of dealing with VTK
https://github.com/Kitware/VTK/blob/master/Examples/Tutorial/Step1/Python/Cone.py
"""
# Create a source object - returns vtkPolyData type
cone = vtk.vtkConeSource()
cone.SetHeight(3.0)
cone.SetRadius(1.0)
cone.SetResolution(10)
# no filters here - connect the source to the mapper to convert the data to a graphics primitvie
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
# create teh actor to represent the cone - this is what handles transforming the graphic primitives into something that can be rendered
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
# crete the renderer - this is the figure/viewport where we create the image
ren1 = vtk.vtkRenderer()
ren1.AddActor(coneActor)
ren1.SetBackground(0.1, 0.2, 0.4)
# create the render window - this is what shows up on the screen
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(300, 300)
# now animate
for i in range(0, 360):
time.sleep(0.03)
renWin.Render()
ren1.GetActiveCamera().Azimuth(1)
def step2_observer():
"""
This is an example of using a callback via an observer on the renderer
Everytime it's called a function is run
"""
def myCallback(obj, string):
print("Starting a render")
# create a basic pipeline
cone = vtk.vtkConeSource()
cone.SetHeight(3.0)
cone.SetRadius(1.0)
cone.SetResolution(10)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren1 = vtk.vtkRenderer()
ren1.AddActor(coneActor)
ren1.SetBackground(0.1, 0.2, 0.4)
# add observer
ren1.AddObserver("StartEvent", myCallback)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(300, 300)
for i in range(0, 360):
time.sleep(0.03)
renWin.Render()
ren1.GetActiveCamera().Azimuth(1)
def step3_multiple_renderer():
"""Create multiple renderers inside a render window
"""
cone = vtk.vtkConeSource()
cone.SetHeight(3.0)
cone.SetRadius(1.0)
cone.SetResolution(100)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren1 = vtk.vtkRenderer()
ren1.AddActor(coneActor)
ren1.SetBackground(0.1, 0.2, 0.4)
ren1.SetViewport(0.0, 0.0, 0.5, 1.0)
ren2 = vtk.vtkRenderer()
ren2.AddActor(coneActor)
ren2.SetBackground(0.1, 0.2, 0.4)
ren2.SetViewport(0.5, 0.0, 1.0, 1.0)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
renWin.SetSize(600, 300)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(90)
for i in range(0, 360):
time.sleep(0.03)
renWin.Render()
ren1.GetActiveCamera().Azimuth(1)
ren2.GetActiveCamera().Azimuth(1)
def read_obj(filename):
"""Test to read and display a wavefront OBJ
"""
reader = vtk.vtkOBJReader()
reader.SetFileName(filename)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(reader.GetOutput())
else:
mapper.SetInputConnection(reader.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(actor)
iren.Initialize()
renWin.Render()
iren.Start()
def write_obj(infile='./data/itokawa_low.obj', outfile='./data/point_clouds/itokawa_low_vtkwriter'):
"""Read from a OBJ and then write to a different one
"""
reader = vtk.vtkOBJReader()
reader.SetFileName(infile)
# write the stl file to a disk
objWriter = vtk.vtkOBJExporter()
objWriter.SetFilePrefix(outfile)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(reader.GetOutput())
else:
mapper.SetInputConnection(reader.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(actor)
iren.Initialize()
renWin.Render()
iren.Start()
objWriter.SetRenderWindow(renWin)
objWriter.Write()
def obj_to_numpy(filename):
"""Convert OBJ to a numpy array
https://stackoverflow.com/questions/23138112/vtk-to-maplotlib-using-numpy
"""
reader = vtk.vtkOBJReader()
reader.SetFileName(filename)
reader.Update()
output = reader.GetOutput()
points = output.GetPoints()
verts = numpy_support.vtk_to_numpy(points.GetData())
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(reader.GetOutput())
else:
mapper.SetInputConnection(reader.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(actor)
iren.Initialize()
renWin.Render()
iren.Start()
return verts
def mesh_decimate(filename):
"""Input a OBJ shape model and then reduce the number of vertices
then output to a numpy array
https://stackoverflow.com/questions/38197212/mesh-decimation-in-python
https://www.vtk.org/gitweb?p=VTK.git;a=blob;f=Examples/VisualizationAlgorithms/Python/deciFran.py
"""
reader = vtk.vtkOBJReader()
reader.SetFileName(filename)
reader.Update()
# decimate the obj file object
inputPoly = vtk.vtkPolyData()
inputPoly.ShallowCopy(reader.GetOutput())
print("Before decimation\n\n {} vertices \n {} faces".format(inputPoly.GetNumberOfPoints(), inputPoly.GetNumberOfPolys()))
# now decimate
decimate = vtk.vtkDecimatePro()
decimate.SetInputData(inputPoly)
decimate.SetTargetReduction(0.5)
decimate.Update()
pdb.set_trace()
decimatedPoly = vtk.vtkPolyData()
decimatedPoly.ShallowCopy(decimate.GetOutput())
print("After decimation\n\n {} vertices \n {} faces".format(decimatedPoly.GetNumberOfPoints(),decimatedPoly.GetNumberOfPolys()))
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(reader.GetOutput())
else:
mapper.SetInputConnection(reader.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(actor)
iren.Initialize()
renWin.Render()
iren.Start()
def build_triangle():
"""Build a triangle mesh using the raw points and faces
https://lorensen.github.io/VTKExamples/site/Python/PolyData/ColoredTriangle/
"""
Points = vtk.vtkPoints()
Triangles = vtk.vtkCellArray()
Points.InsertNextPoint(1.0, 0.0, 0.0)
Points.InsertNextPoint(0.0, 0.0, 0.0)
Points.InsertNextPoint(0.0, 1.0, 0.0)
Triangle = vtk.vtkTriangle()
Triangle.GetPointIds().SetId(0, 0)
Triangle.GetPointIds().SetId(1, 1)
Triangle.GetPointIds().SetId(2, 2)
Triangles.InsertNextCell(Triangle)
# setup colors
Colors = vtk.vtkUnsignedCharArray()
Colors.SetNumberOfComponents(3)
Colors.SetName("Colors")
Colors.InsertNextTuple3(255, 0, 0)
Colors.InsertNextTuple3(0, 255, 0)
Colors.InsertNextTuple3(0, 0, 255)
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
polydata.SetPolys(Triangles)
polydata.GetPointData().SetScalars(Colors)
polydata.Modified()
if vtk.VTK_MAJOR_VERSION <= 5:
polydata.Update()
# now display it
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# now render stuff
camera = vtk.vtkCamera()
camera.SetPosition(1, 1, 1)
camera.SetFocalPoint(0, 0, 0)
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkXRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer.AddActor(actor)
renderer.SetActiveCamera(camera)
renderer.ResetCamera()
renderer.SetBackground(0, 0, 0)
renWin.Render()
iren.Start()
def cube():
"""Construct a cube manually
https://www.vtk.org/Wiki/VTK/Examples/Python/DataManipulation/Cube.py
"""
verts = [(0.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(1.0, 1.0, 1.0),
(0.0, 1.0, 1.0)]
faces = [(0, 1, 2, 3),
(4, 5, 6, 7),
(0, 1, 5, 4),
(1, 2, 6, 5),
(2, 3, 7, 6),
(3, 0, 4, 7)]
# cube = vtk.vtkPolyData()
# points = vtk.vtkPoints()
# polys = vtk.vtkCellArray() # insert cell as a number and a tuple of the vertices in the face
# scalars = vtk.vtkFloatArray()
# # load the data
# for i in range(8):
# points.InsertPoint(i, verts[i])
# for i in range(6):
# polys.InsertNextCell(vtk_mesh.make_vtk_idlist(faces[i]))
# for i in range(8):
# scalars.InsertTuple1(i, i)
# # now assign everything to the polydata object
# cube.SetPoints(points)
# cube.SetPolys(polys)
# cube.GetPointData().SetScalars(scalars)
cube = vtk_mesh.numpy_to_vtk_poly(np.array(verts), np.array(faces))
# now viusalize
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(cube)
mapper.SetScalarRange(0, 7)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# now render
camera = vtk.vtkCamera()
camera.SetPosition(1, 1, 1)
camera.SetFocalPoint(0, 0, 0)
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkXRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer.AddActor(actor)
renderer.SetActiveCamera(camera)
renderer.ResetCamera()
renderer.SetBackground(0, 0, 0)
renWin.SetSize(300, 300)
renWin.Render()
iren.Start()
def vtk_render_polydata(poly):
"""Helper function to turn VTK poly data to a rendered window
"""
# now viusalize
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(poly)
mapper.SetScalarRange(0, 7)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# now render
camera = vtk.vtkCamera()
camera.SetPosition(1, 1, 1)
camera.SetFocalPoint(0, 0, 0)
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkXRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer.AddActor(actor)
renderer.SetActiveCamera(camera)
renderer.ResetCamera()
renderer.SetBackground(0, 0, 0)
renWin.SetSize(300, 300)
renWin.Render()
iren.Start()
def vtk_raycasting_obbtree():
"""Testing out raycasting inside of VTK using vtkOBBTree
"""
radius = 0.01
# read and turn OBJ to polydata
polydata = wavefront.read_obj_to_polydata('./data/shape_model/ITOKAWA/itokawa_high.obj')
renderer = vtk.vtkRenderer()
pSource = np.array([1, 0, 0])
pTarget = np.array([0.00372, -0.0609, -0.0609])
# oriented bounding box for the polydata
obbTree = vtk.vtkOBBTree()
obbTree.SetDataSet(polydata)
obbTree.BuildLocator()
pointsVTKintersection = vtk.vtkPoints()
code = obbTree.IntersectWithLine(pSource, pTarget, pointsVTKintersection, None)
if code:
points_int = numpy_support.vtk_to_numpy(pointsVTKintersection.GetData())
for p in points_int:
graphics.vtk_addPoint(renderer, p , color=[0, 0, 1], radius=radius)
print('{} intersections'.format(points_int.shape[0]))
graphics.vtk_addPoly(renderer, polydata)
graphics.vtk_addPoint(renderer, pSource, color=[0, 1.0, 0], radius=radius)
graphics.vtk_addPoint(renderer, pTarget, color=[1.0, 0, 0], radius=radius)
graphics.vtk_addLine(renderer, pSource, pTarget)
graphics.vtk_show(renderer)
def vtk_raycasting_bsptree():
"""Testing out raycasting inside of VTK using vtkModifiedBSPTree
"""
radius = 0.01
# read and turn OBJ to polydata
polydata = wavefront.read_obj_to_polydata('./data/shape_model/ITOKAWA/itokawa_high.obj')
renderer = vtk.vtkRenderer()
pSource = np.array([1, 0, 0])
pTarget = np.array([0.00372, -0.0609, -0.0609])
# oriented bounding box for the polydata
bspTree = vtk.vtkModifiedBSPTree()
bspTree.SetDataSet(polydata)
bspTree.BuildLocator()
pointsVTKintersection = vtk.vtkPoints()
code = bspTree.IntersectWithLine(pSource, pTarget, 1e-9, pointsVTKintersection, None)
if code:
points_int = numpy_support.vtk_to_numpy(pointsVTKintersection.GetData())
for p in points_int:
graphics.vtk_addPoint(renderer, p , color=[0, 0, 1], radius=radius)
print('{} intersections'.format(points_int.shape[0]))
graphics.vtk_addPoly(renderer, polydata)
graphics.vtk_addPoint(renderer, pSource, color=[0, 1.0, 0], radius=radius)
graphics.vtk_addPoint(renderer, pTarget, color=[1.0, 0, 0], radius=radius)
graphics.vtk_addLine(renderer, pSource, pTarget)
graphics.vtk_show(renderer)
def raycasting_visualization():
v, f = wavefront.read_obj('./data/shape_model/ITOKAWA/itokawa_low.obj')
caster = raycaster.RayCaster.loadmesh(v, f)
renderer = vtk.vtkRenderer()
pSource = np.array([-2.0, 0, 0])
pTarget = np.array([2, 0, 0])
intersections = caster.castray(pSource, pTarget)
for p in intersections:
vtk_addPoint(renderer, p, color=[0, 0, 1], radius=0.01)
vtk_addPoly(renderer, caster.polydata)
vtk_show(renderer)
def vtk_mesh_subdivision():
"""Subdivide a mesh into more triangles
The subdivisions are increasing the faces by 4^# subdivisions
"""
# get the polydata for a mesh
v, f = wavefront.ellipsoid_mesh(1, 2, 3, density=20)
polydata = wavefront.meshtopolydata(v, f)
# subdivide using appropriate filter
smooth_loop = vtk.vtkLoopSubdivisionFilter()
smooth_loop.SetNumberOfSubdivisions(1) # can define the number of subdivisions
smooth_loop.SetInputData(polydata)
smooth_loop.Update()
poly_loop = vtk.vtkPolyData()
poly_loop.ShallowCopy(smooth_loop.GetOutput())
smooth_butterfly = vtk.vtkButterflySubdivisionFilter()
smooth_butterfly.SetNumberOfSubdivisions(3)
smooth_butterfly.SetInputData(polydata)
smooth_butterfly.Update()
poly_butterfly = vtk.vtkPolyData()
poly_butterfly.ShallowCopy(smooth_butterfly.GetOutput())
# Create a mapper and actor for initial dataset
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create a mapper and actor for smoothed dataset (vtkLoopSubdivisionFilter)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(smooth_loop.GetOutputPort())
actor_loop = vtk.vtkActor()
actor_loop.SetMapper(mapper)
actor_loop.SetPosition(3, 0, 0)
# Create a mapper and actor for smoothed dataset (vtkButterflySubdivisionFilter)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(smooth_butterfly.GetOutputPort())
actor_butterfly = vtk.vtkActor()
actor_butterfly.SetMapper(mapper)
actor_butterfly.SetPosition(6, 0, 0)
# Visualise
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Add actors and render
renderer.AddActor(actor)
renderer.AddActor(actor_loop)
renderer.AddActor(actor_butterfly)
renderer.SetBackground(1, 1, 1) # Background color white
renderWindow.SetSize(800, 800)
renderWindow.Render()
renderWindowInteractor.Start()
# output to a new v, f array
# convert polydata to numpy
v_loop, f_loop = wavefront.polydatatomesh(poly_loop)
v_butterfly, f_butterfly = wavefront.polydatatomesh(poly_butterfly)
print('Original #V : {} #F : {}'.format(v.shape[0], f.shape[0]))
print('Loop #V : {} #F : {}'.format(v_loop.shape[0], f_loop.shape[0]))
print('BFly #V : {} #F : {}'.format(v_butterfly.shape[0], f_butterfly.shape[0]))
# output some statistics
if __name__ == '__main__':
vtk_cylinder()
vtk_distance()
|
skulumani/asteroid_dumbbell
|
integration/vtk_examples.py
|
Python
|
gpl-3.0
| 24,601
|
[
"VTK"
] |
c33b96742ab0592db7a0ccee939c97cbcef53262a5b070a93768942afaf2dfaa
|
"""Test correctness of vdW-DF potential."""
import os
from math import pi
from gpaw.grid_descriptor import GridDescriptor
import numpy as np
from gpaw.test import equal
from gpaw.xc import XC
from gpaw.mpi import world
N = 8
a = 2.0
gd = GridDescriptor((N, N, N), (a, a, a))
# Spin paired:
def paired():
xc = XC('vdW-DF')
n = 0.3 * np.ones((1, N, N, N))
n += 0.01 * np.cos(np.arange(N) * 2 * pi / N)
v = 0.0 * n
E = xc.calculate(gd, n, v)
n2 = 1.0 * n
i = 1
n2[0, i, i, i] += 0.00002
x = v[0, i, i, i] * gd.dv
E2 = xc.calculate(gd, n2, v)
n2[0, i, i, i] -= 0.00004
E2 -= xc.calculate(gd, n2, v)
x2 = E2 / 0.00004
print i, x, x2, x - x2, x / x2
equal(x, x2, 5e-12)
# Spin polarized:
def polarized():
xc = XC('vdW-DF')
n = 0.04 * np.ones((2, N, N, N))
n[1] = 0.3
n[0] += 0.02 * np.sin(np.arange(N) * 2 * pi / N)
n[1] += 0.2 * np.cos(np.arange(N) * 2 * pi / N)
v = 0.0 * n
E = xc.calculate(gd, n, v)
n2 = 1.0 * n
i = 1
n2[0, i, i, i] += 0.00002
x = v[0, i, i, i] * gd.dv
E2 = xc.calculate(gd, n2, v)
n2[0, i, i, i] -= 0.00004
E2 -= xc.calculate(gd, n2, v)
x2 = E2 / 0.00004
print i, x, x2, x - x2, x / x2
equal(x, x2, 1e-10)
if world.size == 1:
polarized()
paired()
|
qsnake/gpaw
|
gpaw/test/vdw/potential.py
|
Python
|
gpl-3.0
| 1,303
|
[
"GPAW"
] |
73c7a5d727bce3794fb78f50a3d6288393289ec3d9ddeeea15a8f47f99dba6a6
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import numpy as np
import scipy.constants as const
from monty.json import MSONable
from pymatgen.analysis.structure_matcher import StructureMatcher, \
OrderDisorderElementComparator
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.util.coord_utils import pbc_diff
"""
A module to perform diffusion analyses (e.g. calculating diffusivity from
mean square displacements etc.). If you use this module, please consider
citing the following papers::
Ong, S. P., Mo, Y., Richards, W. D., Miara, L., Lee, H. S., & Ceder, G.
(2013). Phase stability, electrochemical stability and ionic conductivity
of the Li10+-1MP2X12 (M = Ge, Si, Sn, Al or P, and X = O, S or Se) family
of superionic conductors. Energy & Environmental Science, 6(1), 148.
doi:10.1039/c2ee23355j
Mo, Y., Ong, S. P., & Ceder, G. (2012). First Principles Study of the
Li10GeP2S12 Lithium Super Ionic Conductor Material. Chemistry of Materials,
24(1), 15-17. doi:10.1021/cm203303y
"""
__author__ = "Will Richards, Shyue Ping Ong"
__version__ = "0.2"
__maintainer__ = "Will Richards"
__email__ = "wrichard@mit.edu"
__status__ = "Beta"
__date__ = "5/2/13"
class DiffusionAnalyzer(MSONable):
"""
Class for performing diffusion analysis.
.. attribute: diffusivity
Diffusivity in cm^2 / cm
.. attribute: conductivity
Conductivity in mS / cm
.. attribute: diffusivity_components
A vector with diffusivity in the a, b and c directions in cm^2 / cm
.. attribute: conductivity_components
A vector with conductivity in the a, b and c directions in mS / cm
.. attribute: diffusivity_sigma
Std dev in diffusivity in cm^2 / cm. Note that this makes sense only
for non-smoothed analyses.
.. attribute: conductivity_sigma
Std dev in conductivity in mS / cm. Note that this makes sense only
for non-smoothed analyses.
.. attribute: diffusivity_components_sigma
A vector with std dev. in diffusivity in the a, b and c directions in
cm^2 / cm. Note that this makes sense only for non-smoothed analyses.
.. attribute: conductivity_components_sigma
A vector with std dev. in conductivity in the a, b and c directions
in mS / cm. Note that this makes sense only for non-smoothed analyses.
.. attribute: max_framework_displacement
The maximum (drift adjusted) distance of any framework atom from its
starting location in A.
.. attribute: max_ion_displacements
nions x 1 array of the maximum displacement of each individual ion.
.. attribute: msd
nsteps x 1 array of the mean square displacement of specie.
.. attribute: msd_components
nsteps x 3 array of the MSD in each lattice direction of specie.
.. attribute: sq_disp_ions
The square displacement of all ion (both specie and other ions) as a
nions x nsteps array.
.. attribute: dt
Time coordinate array.
"""
def __init__(self, structure, displacements, specie, temperature,
time_step, step_skip, smoothed="max", min_obs=30,
avg_nsteps=1000):
"""
This constructor is meant to be used with pre-processed data.
Other convenient constructors are provided as class methods (see
from_vaspruns and from_files).
Given a matrix of displacements (see arguments below for expected
format), the diffusivity is given by::
D = 1 / 2dt * <mean square displacement>
where d is the dimensionality, t is the time. To obtain a reliable
diffusion estimate, a least squares regression of the MSD against
time to obtain the slope, which is then related to the diffusivity.
For traditional analysis, use smoothed=False and weighted=False.
Args:
structure (Structure): Initial structure.
displacements (array): Numpy array of with shape [site,
time step, axis]
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
temperature (float): Temperature of the diffusion run in Kelvin.
time_step (int): Time step between measurements.
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
smoothed (str): Whether to smooth the MSD, and what mode to smooth.
Supported modes are:
i. "max", which tries to use the maximum #
of data points for each time origin, subject to a
minimum # of observations given by min_obs, and then
weights the observations based on the variance
accordingly. This is the default.
ii. "constant", in which each timestep is averaged over
the number of time_steps given by min_steps.
iii. None / False / any other false-like quantity. No
smoothing.
min_obs (int): Used with smoothed="max". Minimum number of
observations to have before including in the MSD vs dt
calculation. E.g. If a structure has 10 diffusing atoms,
and min_obs = 30, the MSD vs dt will be
calculated up to dt = total_run_time / 3, so that each
diffusing atom is measured at least 3 uncorrelated times.
Only applies in smoothed="max".
avg_nsteps (int): Used with smoothed="constant". Determines the
number of time steps to average over to get the msd for each
timestep. Default of 1000 is usually pretty good.
"""
self.structure = structure
self.disp = displacements
self.specie = specie
self.temperature = temperature
self.time_step = time_step
self.step_skip = step_skip
self.min_obs = min_obs
self.smoothed = smoothed
self.avg_nsteps = avg_nsteps
indices = []
framework_indices = []
for i, site in enumerate(structure):
if site.specie.symbol == specie:
indices.append(i)
else:
framework_indices.append(i)
if self.disp.shape[1] < 2:
self.diffusivity = 0.
self.conductivity = 0.
self.diffusivity_components = np.array([0., 0., 0.])
self.conductivity_components = np.array([0., 0., 0.])
self.max_framework_displacement = 0
else:
framework_disp = self.disp[framework_indices]
drift = np.average(framework_disp, axis=0)[None, :, :]
#drift corrected position
dc = self.disp - drift
df = structure.lattice.get_fractional_coords(dc)
nions, nsteps, dim = dc.shape
if not smoothed:
timesteps = np.arange(0, nsteps)
elif smoothed == "constant":
if nsteps <= avg_nsteps:
raise ValueError('Not enough data to calculate diffusivity')
timesteps = np.arange(0, nsteps - avg_nsteps)
else:
#limit the number of sampled timesteps to 200
min_dt = int(1000 / (self.step_skip * self.time_step))
max_dt = min(len(indices) * nsteps // self.min_obs, nsteps)
if min_dt >= max_dt:
raise ValueError('Not enough data to calculate diffusivity')
timesteps = np.arange(min_dt, max_dt,
max(int((max_dt - min_dt) / 200), 1))
dt = timesteps * self.time_step * self.step_skip
# calculate the smoothed msd values
msd = np.zeros_like(dt, dtype=np.double)
sq_disp_ions = np.zeros((len(dc), len(dt)), dtype=np.double)
msd_components = np.zeros(dt.shape + (3,))
lengths = np.array(self.structure.lattice.abc)[None, None, :]
for i, n in enumerate(timesteps):
if not smoothed:
dx = dc[:, i:i + 1, :]
dcomponents = df[:, i:i + 1, :] * lengths
elif smoothed == "constant":
dx = dc[:, i:i + avg_nsteps, :] - dc[:, 0:avg_nsteps, :]
dcomponents = (df[:, i:i + avg_nsteps, :]
- df[:, 0:avg_nsteps, :]) * lengths
else:
dx = dc[:, n:, :] - dc[:, :-n, :]
dcomponents = (df[:, n:, :] - df[:, :-n, :]) * lengths
sq_disp = dx ** 2
sq_disp_ions[:, i] = np.average(np.sum(sq_disp, axis=2), axis=1)
msd[i] = np.average(sq_disp_ions[:, i][indices])
msd_components[i] = np.average(dcomponents[indices] ** 2,
axis=(0, 1))
def weighted_lstsq(a, b):
if smoothed == "max":
# For max smoothing, we need to weight by variance.
w_root = (1 / dt) ** 0.5
return np.linalg.lstsq(a * w_root[:, None], b * w_root)
else:
return np.linalg.lstsq(a, b)
m_components = np.zeros(3)
m_components_res = np.zeros(3)
a = np.ones((len(dt), 2))
a[:, 0] = dt
for i in range(3):
(m, c), res, rank, s = weighted_lstsq(a, msd_components[:, i])
m_components[i] = max(m, 1e-15)
m_components_res[i] = res[0]
(m, c), res, rank, s = weighted_lstsq(a, msd)
#m shouldn't be negative
m = max(m, 1e-15)
#factor of 10 is to convert from A^2/fs to cm^2/s
#factor of 6 is for dimensionality
conv_factor = get_conversion_factor(self.structure, self.specie,
self.temperature)
self.diffusivity = m / 60
# Calculate the error in the diffusivity using the error in the
# slope from the lst sq.
# Variance in slope = n * Sum Squared Residuals / (n * Sxx - Sx
# ** 2) / (n-2).
n = len(dt)
# Pre-compute the denominator since we will use it later.
# We divide dt by 1000 to avoid overflow errors in some systems (
# e.g., win). This is subsequently corrected where denom is used.
denom = (n * np.sum((dt/1000) ** 2) - np.sum(dt/1000) ** 2) * (n
- 2)
self.diffusivity_std_dev = np.sqrt(n * res[0] / denom) / 60 / 1000
self.conductivity = self.diffusivity * conv_factor
self.conductivity_std_dev = self.diffusivity_std_dev * conv_factor
self.diffusivity_components = m_components / 20
self.diffusivity_components_std_dev = np.sqrt(
n * m_components_res / denom) / 20 / 1000
self.conductivity_components = self.diffusivity_components * \
conv_factor
self.conductivity_components_std_dev = \
self.diffusivity_components_std_dev * conv_factor
# Drift and displacement information.
self.drift = drift
self.corrected_displacements = dc
self.max_ion_displacements = np.max(np.sum(
dc ** 2, axis=-1) ** 0.5, axis=1)
self.max_framework_displacement = \
np.max(self.max_ion_displacements[framework_indices])
self.msd = msd
self.sq_disp_ions = sq_disp_ions
self.msd_components = msd_components
self.dt = dt
self.indices = indices
self.framework_indices = framework_indices
def get_drift_corrected_structures(self, start=None, stop=None, step=None):
"""
Returns an iterator for the drift-corrected structures. Use of
iterator is to reduce memory usage as # of structures in MD can be
huge. You don't often need all the structures all at once.
Args:
start, stop, step (int): applies a start/stop/step to the iterator.
Faster than applying it after generation, as it reduces the
number of structures created.
"""
coords = np.array(self.structure.cart_coords)
species = self.structure.species_and_occu
latt = self.structure.lattice
nsites, nsteps, dim = self.corrected_displacements.shape
for i in range(start or 0, stop or nsteps, step or 1):
yield Structure(
latt, species,
coords + self.corrected_displacements[:, i, :],
coords_are_cartesian=True)
def get_summary_dict(self, include_msd_t=False):
"""
Provides a summary of diffusion information.
Args:
include_msd_t (bool): Whether to include mean square displace and
time data with the data.
Returns:
(dict) of diffusion and conductivity data.
"""
d = {
"D": self.diffusivity,
"D_sigma": self.diffusivity_std_dev,
"S": self.conductivity,
"S_sigma": self.conductivity_std_dev,
"D_components": self.diffusivity_components.tolist(),
"S_components": self.conductivity_components.tolist(),
"D_components_sigma": self.diffusivity_components_std_dev.tolist(),
"S_components_sigma": self.conductivity_components_std_dev.tolist(),
"specie": str(self.specie),
"step_skip": self.step_skip,
"time_step": self.time_step,
"temperature": self.temperature,
"max_framework_displacement": self.max_framework_displacement
}
if include_msd_t:
d["msd"] = self.msd.tolist()
d["msd_components"] = self.msd_components.tolist()
d["dt"] = self.dt.tolist()
return d
def get_framework_rms_plot(self, plt=None, granularity=200, matching_s=None):
"""
Get the plot of rms framework displacement vs time. Useful for checking
for melting, especially if framework atoms can move via paddle-wheel
or similar mechanism (which would show up in max framework displacement
but doesn't constitute melting).
Args:
granularity (int): Number of structures to match
matching_s (Structure): Optionally match to a disordered structure
instead of the first structure in the analyzer. Required when
a secondary mobile ion is present.
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8, plt=plt)
step = (self.corrected_displacements.shape[1] - 1) // (granularity - 1)
f = (matching_s or self.structure).copy()
f.remove_species([self.specie])
sm = StructureMatcher(primitive_cell=False, stol=0.6,
comparator=OrderDisorderElementComparator(),
allow_subset=True)
rms = []
for s in self.get_drift_corrected_structures(step=step):
s.remove_species([self.specie])
d = sm.get_rms_dist(f, s)
if d:
rms.append(d)
else:
rms.append((1, 1))
max_dt = (len(rms) - 1) * step * self.step_skip * self.time_step
if max_dt > 100000:
plot_dt = np.linspace(0, max_dt/1000, len(rms))
unit = 'ps'
else:
plot_dt = np.linspace(0, max_dt, len(rms))
unit = 'fs'
rms = np.array(rms)
plt.plot(plot_dt, rms[:, 0], label='RMS')
plt.plot(plot_dt, rms[:, 1], label='max')
plt.legend(loc='best')
plt.xlabel("Timestep ({})".format(unit))
plt.ylabel("normalized distance")
plt.tight_layout()
return plt
def get_msd_plot(self, plt=None, mode="specie"):
"""
Get the plot of the smoothed msd vs time graph. Useful for
checking convergence. This can be written to an image file.
Args:
plt: A plot object. Defaults to None, which means one will be
generated.
mode (str): Determines type of msd plot. By "species", "sites",
or direction (default).
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8, plt=plt)
if np.max(self.dt) > 100000:
plot_dt = self.dt / 1000
unit = 'ps'
else:
plot_dt = self.dt
unit = 'fs'
if mode == "species":
for sp in sorted(self.structure.composition.keys()):
indices = [i for i, site in enumerate(self.structure) if
site.specie == sp]
sd = np.average(self.sq_disp_ions[indices, :], axis=0)
plt.plot(plot_dt, sd, label=sp.__str__())
plt.legend(loc=2, prop={"size": 20})
elif mode == "sites":
for i, site in enumerate(self.structure):
sd = self.sq_disp_ions[i, :]
plt.plot(plot_dt, sd, label="%s - %d" % (
site.specie.__str__(), i))
plt.legend(loc=2, prop={"size": 20})
else:
# Handle default / invalid mode case
plt.plot(plot_dt, self.msd, 'k')
plt.plot(plot_dt, self.msd_components[:, 0], 'r')
plt.plot(plot_dt, self.msd_components[:, 1], 'g')
plt.plot(plot_dt, self.msd_components[:, 2], 'b')
plt.legend(["Overall", "a", "b", "c"], loc=2, prop={"size": 20})
plt.xlabel("Timestep ({})".format(unit))
plt.ylabel("MSD ($\AA^2$)")
plt.tight_layout()
return plt
def plot_msd(self, mode="default"):
"""
Plot the smoothed msd vs time graph. Useful for checking convergence.
Args:
mode (str): Either "default" (the default, shows only the MSD for
the diffusing specie, and its components), "ions" (individual
square displacements of all ions), or "species" (mean square
displacement by specie).
"""
self.get_msd_plot(mode=mode).show()
def export_msdt(self, filename):
"""
Writes MSD data to a csv file that can be easily plotted in other
software.
Args:
filename (str): Filename. Supported formats are csv and dat. If
the extension is csv, a csv file is written. Otherwise,
a dat format is assumed.
"""
fmt = "csv" if filename.lower().endswith(".csv") else "dat"
delimiter = ", " if fmt == "csv" else " "
with open(filename, "wt") as f:
if fmt == "dat":
f.write("# ")
f.write(delimiter.join(["t", "MSD", "MSD_a", "MSD_b", "MSD_c"]))
f.write("\n")
for dt, msd, msdc in zip(self.dt, self.msd, self.msd_components):
f.write(delimiter.join(["%s" % v for v in [dt, msd] + list(
msdc)]))
f.write("\n")
@classmethod
def from_structures(cls, structures, specie, temperature,
time_step, step_skip, smoothed="max", min_obs=30,
avg_nsteps=1000, initial_disp=None,
initial_structure=None):
"""
Convenient constructor that takes in a list of Structure objects to
perform diffusion analysis.
Args:
structures ([Structure]): list of Structure objects (must be
ordered in sequence of run). E.g., you may have performed
sequential VASP runs to obtain sufficient statistics.
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
temperature (float): Temperature of the diffusion run in Kelvin.
time_step (int): Time step between measurements.
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
smoothed (str): Whether to smooth the MSD, and what mode to smooth.
Supported modes are:
i. "max", which tries to use the maximum #
of data points for each time origin, subject to a
minimum # of observations given by min_obs, and then
weights the observations based on the variance
accordingly. This is the default.
ii. "constant", in which each timestep is averaged over
the same number of observations given by min_obs.
iii. None / False / any other false-like quantity. No
smoothing.
min_obs (int): Used with smoothed="max". Minimum number of
observations to have before including in the MSD vs dt
calculation. E.g. If a structure has 10 diffusing atoms,
and min_obs = 30, the MSD vs dt will be
calculated up to dt = total_run_time / 3, so that each
diffusing atom is measured at least 3 uncorrelated times.
Only applies in smoothed="max".
avg_nsteps (int): Used with smoothed="constant". Determines the
number of time steps to average over to get the msd for each
timestep. Default of 1000 is usually pretty good.
initial_disp (np.ndarray): Sometimes, you need to iteratively
compute estimates of the diffusivity. This supplies an
initial displacement that will be added on to the initial
displacements. Note that this makes sense only when
smoothed=False.
initial_structure (Structure): Like initial_disp, this is used
for iterative computations of estimates of the diffusivity. You
typically need to supply both variables. This stipulates the
initial strcture from which the current set of displacements
are computed.
"""
p = []
for i, s in enumerate(structures):
if i == 0:
structure = s
p.append(np.array(s.frac_coords)[:, None])
if initial_structure is not None:
p.insert(0, np.array(initial_structure.frac_coords)[:, None])
else:
p.insert(0, p[0])
p = np.concatenate(p, axis=1)
dp = p[:, 1:] - p[:, :-1]
dp = dp - np.round(dp)
f_disp = np.cumsum(dp, axis=1)
if initial_disp is not None:
f_disp += structure.lattice.get_fractional_coords(initial_disp)[:,
None, :]
disp = structure.lattice.get_cartesian_coords(f_disp)
return cls(structure, disp, specie, temperature,
time_step, step_skip=step_skip, smoothed=smoothed,
min_obs=min_obs, avg_nsteps=avg_nsteps)
@classmethod
def from_vaspruns(cls, vaspruns, specie, smoothed="max", min_obs=30,
avg_nsteps=1000, initial_disp=None,
initial_structure=None):
"""
Convenient constructor that takes in a list of Vasprun objects to
perform diffusion analysis.
Args:
vaspruns ([Vasprun]): List of Vaspruns (must be ordered in
sequence of MD simulation). E.g., you may have performed
sequential VASP runs to obtain sufficient statistics.
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
min_obs (int): Minimum number of observations to have before
including in the MSD vs dt calculation. E.g. If a structure
has 10 diffusing atoms, and min_obs = 30, the MSD vs dt will be
calculated up to dt = total_run_time / 3, so that each
diffusing atom is measured at least 3 uncorrelated times.
smoothed (str): Whether to smooth the MSD, and what mode to smooth.
Supported modes are:
i. "max", which tries to use the maximum #
of data points for each time origin, subject to a
minimum # of observations given by min_obs, and then
weights the observations based on the variance
accordingly. This is the default.
ii. "constant", in which each timestep is averaged over
the same number of observations given by min_obs.
iii. None / False / any other false-like quantity. No
smoothing.
min_obs (int): Used with smoothed="max". Minimum number of
observations to have before including in the MSD vs dt
calculation. E.g. If a structure has 10 diffusing atoms,
and min_obs = 30, the MSD vs dt will be
calculated up to dt = total_run_time / 3, so that each
diffusing atom is measured at least 3 uncorrelated times.
Only applies in smoothed="max".
avg_nsteps (int): Used with smoothed="constant". Determines the
number of time steps to average over to get the msd for each
timestep. Default of 1000 is usually pretty good.
initial_disp (np.ndarray): Sometimes, you need to iteratively
compute estimates of the diffusivity. This supplies an
initial displacement that will be added on to the initial
displacements. Note that this makes sense only when
smoothed=False.
initial_structure (Structure): Like initial_disp, this is used
for iterative computations of estimates of the diffusivity. You
typically need to supply both variables. This stipulates the
initial strcture from which the current set of displacements
are computed.
"""
def get_structures(vaspruns):
for i, vr in enumerate(vaspruns):
if i == 0:
step_skip = vr.ionic_step_skip or 1
final_structure = vr.initial_structure
temperature = vr.parameters['TEEND']
time_step = vr.parameters['POTIM']
yield step_skip, temperature, time_step
# check that the runs are continuous
fdist = pbc_diff(vr.initial_structure.frac_coords,
final_structure.frac_coords)
if np.any(fdist > 0.001):
raise ValueError('initial and final structures do not '
'match.')
final_structure = vr.final_structure
assert (vr.ionic_step_skip or 1) == step_skip
for s in vr.ionic_steps:
yield s['structure']
s = get_structures(vaspruns)
step_skip, temperature, time_step = next(s)
return cls.from_structures(structures=s, specie=specie,
temperature=temperature, time_step=time_step, step_skip=step_skip,
smoothed=smoothed, min_obs=min_obs, avg_nsteps=avg_nsteps,
initial_disp=initial_disp, initial_structure=initial_structure)
@classmethod
def from_files(cls, filepaths, specie, step_skip=10, smoothed="max",
min_obs=30, avg_nsteps=1000, ncores=None, initial_disp=None,
initial_structure=None):
"""
Convenient constructor that takes in a list of vasprun.xml paths to
perform diffusion analysis.
Args:
filepaths ([str]): List of paths to vasprun.xml files of runs. (
must be ordered in sequence of MD simulation). For example,
you may have done sequential VASP runs and they are in run1,
run2, run3, etc. You should then pass in
["run1/vasprun.xml", "run2/vasprun.xml", ...].
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
smoothed (str): Whether to smooth the MSD, and what mode to smooth.
Supported modes are:
i. "max", which tries to use the maximum #
of data points for each time origin, subject to a
minimum # of observations given by min_obs, and then
weights the observations based on the variance
accordingly. This is the default.
ii. "constant", in which each timestep is averaged over
the same number of observations given by min_obs.
iii. None / False / any other false-like quantity. No
smoothing.
min_obs (int): Used with smoothed="max". Minimum number of
observations to have before including in the MSD vs dt
calculation. E.g. If a structure has 10 diffusing atoms,
and min_obs = 30, the MSD vs dt will be
calculated up to dt = total_run_time / 3, so that each
diffusing atom is measured at least 3 uncorrelated times.
Only applies in smoothed="max".
avg_nsteps (int): Used with smoothed="constant". Determines the
number of time steps to average over to get the msd for each
timestep. Default of 1000 is usually pretty good.
ncores (int): Numbers of cores to use for multiprocessing. Can
speed up vasprun parsing considerably. Defaults to None,
which means serial. It should be noted that if you want to
use multiprocessing, the number of ionic steps in all vasprun
.xml files should be a multiple of the ionic_step_skip.
Otherwise, inconsistent results may arise. Serial mode has no
such restrictions.
initial_disp (np.ndarray): Sometimes, you need to iteratively
compute estimates of the diffusivity. This supplies an
initial displacement that will be added on to the initial
displacements. Note that this makes sense only when
smoothed=False.
initial_structure (Structure): Like initial_disp, this is used
for iterative computations of estimates of the diffusivity. You
typically need to supply both variables. This stipulates the
initial strcture from which the current set of displacements
are computed.
"""
if ncores is not None and len(filepaths) > 1:
import multiprocessing
p = multiprocessing.Pool(ncores)
vaspruns = p.imap(_get_vasprun,
[(fp, step_skip) for fp in filepaths])
analyzer = cls.from_vaspruns(vaspruns, min_obs=min_obs,
smoothed=smoothed, specie=specie, initial_disp=initial_disp,
initial_structure=initial_structure, avg_nsteps=avg_nsteps)
p.close()
p.join()
return analyzer
else:
def vr(filepaths):
offset = 0
for p in filepaths:
v = Vasprun(p, ionic_step_offset=offset,
ionic_step_skip=step_skip)
yield v
# Recompute offset.
offset = (-(v.nionic_steps - offset)) % step_skip
return cls.from_vaspruns(vr(filepaths), min_obs=min_obs,
smoothed=smoothed, specie=specie, initial_disp=initial_disp,
initial_structure=initial_structure, avg_nsteps=avg_nsteps)
def as_dict(self):
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"displacements": self.disp.tolist(),
"specie": self.specie,
"temperature": self.temperature,
"time_step": self.time_step,
"step_skip": self.step_skip,
"min_obs": self.min_obs,
"smoothed": self.smoothed,
"avg_nsteps": self.avg_nsteps
}
@classmethod
def from_dict(cls, d):
structure = Structure.from_dict(d["structure"])
return cls(structure, np.array(d["displacements"]), specie=d["specie"],
temperature=d["temperature"], time_step=d["time_step"],
step_skip=d["step_skip"], min_obs=d["min_obs"],
smoothed=d.get("smoothed", "max"),
avg_nsteps=d.get("avg_nsteps", 1000))
def get_conversion_factor(structure, species, temperature):
"""
Conversion factor to convert between cm^2/s diffusivity measurements and
mS/cm conductivity measurements based on number of atoms of diffusing
species. Note that the charge is based on the oxidation state of the
species (where available), or else the number of valence electrons
(usually a good guess, esp for main group ions).
Args:
structure (Structure): Input structure.
species (Element/Specie): Diffusing species.
temperature (float): Temperature of the diffusion run in Kelvin.
Returns:
Conversion factor.
Conductivity (in mS/cm) = Conversion Factor * Diffusivity (in cm^2/s)
"""
df_sp = get_el_sp(species)
if hasattr(df_sp, "oxi_state"):
z = df_sp.oxi_state
else:
z = df_sp.full_electronic_structure[-1][2]
n = structure.composition[species]
vol = structure.volume * 1e-24 # units cm^3
return 1000 * n / (vol * const.N_A) * z ** 2 * (const.N_A * const.e) ** 2\
/ (const.R * temperature)
def _get_vasprun(args):
"""
Internal method to support multiprocessing.
"""
return Vasprun(args[0], ionic_step_skip=args[1],
parse_dos=False, parse_eigen=False)
def fit_arrhenius(temps, diffusivities):
"""
Returns Ea, c, standard error of Ea from the Arrhenius fit:
D = c * exp(-Ea/kT)
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
"""
t_1 = 1 / np.array(temps)
logd = np.log(diffusivities)
# Do a least squares regression of log(D) vs 1/T
a = np.array([t_1, np.ones(len(temps))]).T
w, res, _, _ = np.linalg.lstsq(a, logd)
w = np.array(w)
n = len(temps)
if n > 2:
std_Ea = (res[0] / (n - 2) / (n * np.var(t_1))) ** 0.5 * const.k / const.e
else:
std_Ea = None
return -w[0] * const.k / const.e, np.exp(w[1]), std_Ea
def get_extrapolated_diffusivity(temps, diffusivities, new_temp):
"""
Returns (Arrhenius) extrapolated diffusivity at new_temp
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
new_temp (float): desired temperature. units: K
Returns:
(float) Diffusivity at extrapolated temp in mS/cm.
"""
Ea, c, _ = fit_arrhenius(temps, diffusivities)
return c * np.exp(-Ea / (const.k / const.e * new_temp))
def get_extrapolated_conductivity(temps, diffusivities, new_temp, structure,
species):
"""
Returns extrapolated mS/cm conductivity.
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
new_temp (float): desired temperature. units: K
structure (structure): Structure used for the diffusivity calculation
species (string/Specie): conducting species
Returns:
(float) Conductivity at extrapolated temp in mS/cm.
"""
return get_extrapolated_diffusivity(temps, diffusivities, new_temp) \
* get_conversion_factor(structure, species, new_temp)
def get_arrhenius_plot(temps, diffusivities, diffusivity_errors=None,
**kwargs):
"""
Returns an Arrhenius plot.
Args:
temps ([float]): A sequence of temperatures.
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity).
diffusivity_errors ([float]): A sequence of errors for the
diffusivities. If None, no error bar is plotted.
\*\*kwargs:
Any keyword args supported by matplotlib.pyplot.plot.
Returns:
A matplotlib.pyplot object. Do plt.show() to show the plot.
"""
Ea, c, _ = fit_arrhenius(temps, diffusivities)
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
# log10 of the arrhenius fit
arr = c * np.exp(-Ea / (const.k / const.e * np.array(temps)))
t_1 = 1000 / np.array(temps)
plt.plot(t_1, diffusivities, 'ko', t_1, arr, 'k--', markersize=10,
**kwargs)
if diffusivity_errors is not None:
n = len(diffusivity_errors)
plt.errorbar(t_1[0:n], diffusivities[0:n], yerr=diffusivity_errors,
fmt='ko', ecolor='k', capthick=2, linewidth=2)
ax = plt.axes()
ax.set_yscale('log')
plt.text(0.6, 0.85, "E$_a$ = {:.0f} meV".format(Ea * 1000),
fontsize=30, transform=plt.axes().transAxes)
plt.ylabel("D (cm$^2$/s)")
plt.xlabel("1000/T (K$^{-1}$)")
plt.tight_layout()
return plt
|
aykol/pymatgen
|
pymatgen/analysis/diffusion_analyzer.py
|
Python
|
mit
| 38,730
|
[
"VASP",
"pymatgen"
] |
245cf3d76e72b7bf9e081680874e79b2cbb0f065a21a6826da81a9b48e2721ea
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.