body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def __init__(self, is_chief, num_ps_replicas, ensemble_handle, center_bias, examples_per_layer, learner_config, features, logits_dimension, loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS, feature_columns=None, use_core_columns=False, output_leaf_index=False, output_leaf_index_modes=None, num_quantiles=100):
"Construct a new GradientBoostedDecisionTreeModel function.\n\n Args:\n is_chief: Whether to build the chief graph.\n num_ps_replicas: Number of parameter server replicas, can be 0.\n ensemble_handle: A handle to the ensemble variable.\n center_bias: Whether to center the bias before growing trees.\n examples_per_layer: Number of examples to accumulate before growing a tree\n layer. It can also be a function that computes the number of examples\n based on the depth of the layer that's being built.\n learner_config: A learner config.\n features: `dict` of `Tensor` objects.\n logits_dimension: An int, the dimension of logits.\n loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`.\n feature_columns: A list of feature columns.\n use_core_columns: A boolean specifying whether core feature columns are\n used.\n output_leaf_index: A boolean variable indicating whether to output leaf\n index into predictions dictionary.\n output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which\n dictates when leaf indices will be outputted. By default, leaf indices\n are only outputted in INFER mode.\n num_quantiles: Number of quantiles to build for numeric feature values.\n\n Raises:\n ValueError: if inputs are not valid.\n "
if (ensemble_handle is None):
raise ValueError('ensemble_handle must be specified.')
if (learner_config is None):
raise ValueError('learner_config must be specified.')
if (learner_config.num_classes < 2):
raise ValueError('Number of classes must be >=2')
self._logits_dimension = logits_dimension
self._is_chief = is_chief
self._num_ps_replicas = num_ps_replicas
self._ensemble_handle = ensemble_handle
self._center_bias = center_bias
self._examples_per_layer = examples_per_layer
if ((loss_reduction != losses.Reduction.SUM) and (loss_reduction != losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)):
raise ValueError(('Invalid loss reduction is provided: %s.' % loss_reduction))
self._loss_reduction = loss_reduction
if (learner_config.multi_class_strategy == learner_pb2.LearnerConfig.MULTI_CLASS_STRATEGY_UNSPECIFIED):
if (logits_dimension == 1):
learner_config.multi_class_strategy = learner_pb2.LearnerConfig.TREE_PER_CLASS
else:
learner_config.multi_class_strategy = learner_pb2.LearnerConfig.DIAGONAL_HESSIAN
if ((logits_dimension == 1) or (learner_config.multi_class_strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS)):
self._gradient_shape = tensor_shape.scalar()
self._hessian_shape = tensor_shape.scalar()
else:
if center_bias:
raise ValueError('Center bias should be False for multiclass.')
self._gradient_shape = tensor_shape.TensorShape([logits_dimension])
if (learner_config.multi_class_strategy == learner_pb2.LearnerConfig.FULL_HESSIAN):
self._hessian_shape = tensor_shape.TensorShape([logits_dimension, logits_dimension])
else:
self._hessian_shape = tensor_shape.TensorShape([logits_dimension])
if (learner_config.growing_mode == learner_pb2.LearnerConfig.GROWING_MODE_UNSPECIFIED):
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
if (learner_config.pruning_mode == learner_pb2.LearnerConfig.PRUNING_MODE_UNSPECIFIED):
learner_config.pruning_mode = learner_pb2.LearnerConfig.POST_PRUNE
if (learner_config.constraints.max_tree_depth == 0):
learner_config.constraints.max_tree_depth = 6
tuner = learner_config.learning_rate_tuner.WhichOneof('tuner')
if (not tuner):
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
self._learner_config = learner_config
self._feature_columns = feature_columns
self._learner_config_serialized = learner_config.SerializeToString()
self._num_quantiles = num_quantiles
self._max_tree_depth = variables.VariableV1(initial_value=self._learner_config.constraints.max_tree_depth)
self._attempted_trees = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), trainable=False, name='attempted_trees')
self._finalized_trees = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), trainable=False, name='finalized_trees')
if (not features):
raise ValueError('Features dictionary must be specified.')
(fc_names, dense_floats, sparse_float_indices, sparse_float_values, sparse_float_shapes, sparse_int_indices, sparse_int_values, sparse_int_shapes) = extract_features(features, self._feature_columns, use_core_columns)
logging.info(('Active Feature Columns: ' + str(fc_names)))
logging.info(('Learner config: ' + str(learner_config)))
self._fc_names = fc_names
self._dense_floats = dense_floats
self._sparse_float_indices = sparse_float_indices
self._sparse_float_values = sparse_float_values
self._sparse_float_shapes = sparse_float_shapes
self._sparse_int_indices = sparse_int_indices
self._sparse_int_values = sparse_int_values
self._sparse_int_shapes = sparse_int_shapes
self._reduce_dim = ((self._learner_config.multi_class_strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS) and (learner_config.num_classes == 2))
if (output_leaf_index_modes is None):
output_leaf_index_modes = [learn.ModeKeys.INFER]
elif (not all(((mode in (learn.ModeKeys.TRAIN, learn.ModeKeys.EVAL, learn.ModeKeys.INFER)) for mode in output_leaf_index_modes))):
raise ValueError('output_leaf_index_modes should only contain ModeKeys.')
self._output_leaf_index = output_leaf_index
self._output_leaf_index_modes = output_leaf_index_modes
| -1,757,462,702,641,173,800
|
Construct a new GradientBoostedDecisionTreeModel function.
Args:
is_chief: Whether to build the chief graph.
num_ps_replicas: Number of parameter server replicas, can be 0.
ensemble_handle: A handle to the ensemble variable.
center_bias: Whether to center the bias before growing trees.
examples_per_layer: Number of examples to accumulate before growing a tree
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
learner_config: A learner config.
features: `dict` of `Tensor` objects.
logits_dimension: An int, the dimension of logits.
loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`.
feature_columns: A list of feature columns.
use_core_columns: A boolean specifying whether core feature columns are
used.
output_leaf_index: A boolean variable indicating whether to output leaf
index into predictions dictionary.
output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which
dictates when leaf indices will be outputted. By default, leaf indices
are only outputted in INFER mode.
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: if inputs are not valid.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
__init__
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def __init__(self, is_chief, num_ps_replicas, ensemble_handle, center_bias, examples_per_layer, learner_config, features, logits_dimension, loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS, feature_columns=None, use_core_columns=False, output_leaf_index=False, output_leaf_index_modes=None, num_quantiles=100):
"Construct a new GradientBoostedDecisionTreeModel function.\n\n Args:\n is_chief: Whether to build the chief graph.\n num_ps_replicas: Number of parameter server replicas, can be 0.\n ensemble_handle: A handle to the ensemble variable.\n center_bias: Whether to center the bias before growing trees.\n examples_per_layer: Number of examples to accumulate before growing a tree\n layer. It can also be a function that computes the number of examples\n based on the depth of the layer that's being built.\n learner_config: A learner config.\n features: `dict` of `Tensor` objects.\n logits_dimension: An int, the dimension of logits.\n loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`.\n feature_columns: A list of feature columns.\n use_core_columns: A boolean specifying whether core feature columns are\n used.\n output_leaf_index: A boolean variable indicating whether to output leaf\n index into predictions dictionary.\n output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which\n dictates when leaf indices will be outputted. By default, leaf indices\n are only outputted in INFER mode.\n num_quantiles: Number of quantiles to build for numeric feature values.\n\n Raises:\n ValueError: if inputs are not valid.\n "
if (ensemble_handle is None):
raise ValueError('ensemble_handle must be specified.')
if (learner_config is None):
raise ValueError('learner_config must be specified.')
if (learner_config.num_classes < 2):
raise ValueError('Number of classes must be >=2')
self._logits_dimension = logits_dimension
self._is_chief = is_chief
self._num_ps_replicas = num_ps_replicas
self._ensemble_handle = ensemble_handle
self._center_bias = center_bias
self._examples_per_layer = examples_per_layer
if ((loss_reduction != losses.Reduction.SUM) and (loss_reduction != losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)):
raise ValueError(('Invalid loss reduction is provided: %s.' % loss_reduction))
self._loss_reduction = loss_reduction
if (learner_config.multi_class_strategy == learner_pb2.LearnerConfig.MULTI_CLASS_STRATEGY_UNSPECIFIED):
if (logits_dimension == 1):
learner_config.multi_class_strategy = learner_pb2.LearnerConfig.TREE_PER_CLASS
else:
learner_config.multi_class_strategy = learner_pb2.LearnerConfig.DIAGONAL_HESSIAN
if ((logits_dimension == 1) or (learner_config.multi_class_strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS)):
self._gradient_shape = tensor_shape.scalar()
self._hessian_shape = tensor_shape.scalar()
else:
if center_bias:
raise ValueError('Center bias should be False for multiclass.')
self._gradient_shape = tensor_shape.TensorShape([logits_dimension])
if (learner_config.multi_class_strategy == learner_pb2.LearnerConfig.FULL_HESSIAN):
self._hessian_shape = tensor_shape.TensorShape([logits_dimension, logits_dimension])
else:
self._hessian_shape = tensor_shape.TensorShape([logits_dimension])
if (learner_config.growing_mode == learner_pb2.LearnerConfig.GROWING_MODE_UNSPECIFIED):
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
if (learner_config.pruning_mode == learner_pb2.LearnerConfig.PRUNING_MODE_UNSPECIFIED):
learner_config.pruning_mode = learner_pb2.LearnerConfig.POST_PRUNE
if (learner_config.constraints.max_tree_depth == 0):
learner_config.constraints.max_tree_depth = 6
tuner = learner_config.learning_rate_tuner.WhichOneof('tuner')
if (not tuner):
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
self._learner_config = learner_config
self._feature_columns = feature_columns
self._learner_config_serialized = learner_config.SerializeToString()
self._num_quantiles = num_quantiles
self._max_tree_depth = variables.VariableV1(initial_value=self._learner_config.constraints.max_tree_depth)
self._attempted_trees = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), trainable=False, name='attempted_trees')
self._finalized_trees = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), trainable=False, name='finalized_trees')
if (not features):
raise ValueError('Features dictionary must be specified.')
(fc_names, dense_floats, sparse_float_indices, sparse_float_values, sparse_float_shapes, sparse_int_indices, sparse_int_values, sparse_int_shapes) = extract_features(features, self._feature_columns, use_core_columns)
logging.info(('Active Feature Columns: ' + str(fc_names)))
logging.info(('Learner config: ' + str(learner_config)))
self._fc_names = fc_names
self._dense_floats = dense_floats
self._sparse_float_indices = sparse_float_indices
self._sparse_float_values = sparse_float_values
self._sparse_float_shapes = sparse_float_shapes
self._sparse_int_indices = sparse_int_indices
self._sparse_int_values = sparse_int_values
self._sparse_int_shapes = sparse_int_shapes
self._reduce_dim = ((self._learner_config.multi_class_strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS) and (learner_config.num_classes == 2))
if (output_leaf_index_modes is None):
output_leaf_index_modes = [learn.ModeKeys.INFER]
elif (not all(((mode in (learn.ModeKeys.TRAIN, learn.ModeKeys.EVAL, learn.ModeKeys.INFER)) for mode in output_leaf_index_modes))):
raise ValueError('output_leaf_index_modes should only contain ModeKeys.')
self._output_leaf_index = output_leaf_index
self._output_leaf_index_modes = output_leaf_index_modes
|
def _predict_and_return_dict(self, ensemble_handle, ensemble_stamp, mode):
'Runs prediction and returns a dictionary of the prediction results.\n\n Args:\n ensemble_handle: ensemble resource handle.\n ensemble_stamp: stamp of ensemble resource.\n mode: learn.ModeKeys.TRAIN or EVAL or INFER.\n\n Returns:\n a dictionary of prediction results -\n ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,\n NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.\n '
ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle, ensemble_stamp)
num_handlers = ((len(self._dense_floats) + len(self._sparse_float_shapes)) + len(self._sparse_int_shapes))
used_handlers = model_ops.tree_ensemble_used_handlers(ensemble_handle, ensemble_stamp, num_all_handlers=num_handlers)
(apply_dropout, seed) = _dropout_params(mode, ensemble_stats)
with ops.control_dependencies(ensemble_stats):
leaf_index = None
if (self._output_leaf_index and (mode in self._output_leaf_index_modes)):
(predictions, _, leaf_index) = prediction_ops.gradient_trees_prediction_verbose(ensemble_handle, seed, self._dense_floats, self._sparse_float_indices, self._sparse_float_values, self._sparse_float_shapes, self._sparse_int_indices, self._sparse_int_values, self._sparse_int_shapes, learner_config=self._learner_config_serialized, apply_dropout=apply_dropout, apply_averaging=(mode != learn.ModeKeys.TRAIN), use_locking=True, center_bias=self._center_bias, reduce_dim=self._reduce_dim)
else:
leaf_index = None
(predictions, _) = prediction_ops.gradient_trees_prediction(ensemble_handle, seed, self._dense_floats, self._sparse_float_indices, self._sparse_float_values, self._sparse_float_shapes, self._sparse_int_indices, self._sparse_int_values, self._sparse_int_shapes, learner_config=self._learner_config_serialized, apply_dropout=apply_dropout, apply_averaging=(mode != learn.ModeKeys.TRAIN), use_locking=True, center_bias=self._center_bias, reduce_dim=self._reduce_dim)
partition_ids = prediction_ops.gradient_trees_partition_examples(ensemble_handle, self._dense_floats, self._sparse_float_indices, self._sparse_float_values, self._sparse_float_shapes, self._sparse_int_indices, self._sparse_int_values, self._sparse_int_shapes, use_locking=True)
return _make_predictions_dict(ensemble_stamp, predictions, partition_ids, ensemble_stats, used_handlers, leaf_index)
| 531,411,416,396,902,140
|
Runs prediction and returns a dictionary of the prediction results.
Args:
ensemble_handle: ensemble resource handle.
ensemble_stamp: stamp of ensemble resource.
mode: learn.ModeKeys.TRAIN or EVAL or INFER.
Returns:
a dictionary of prediction results -
ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,
NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
_predict_and_return_dict
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def _predict_and_return_dict(self, ensemble_handle, ensemble_stamp, mode):
'Runs prediction and returns a dictionary of the prediction results.\n\n Args:\n ensemble_handle: ensemble resource handle.\n ensemble_stamp: stamp of ensemble resource.\n mode: learn.ModeKeys.TRAIN or EVAL or INFER.\n\n Returns:\n a dictionary of prediction results -\n ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,\n NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.\n '
ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle, ensemble_stamp)
num_handlers = ((len(self._dense_floats) + len(self._sparse_float_shapes)) + len(self._sparse_int_shapes))
used_handlers = model_ops.tree_ensemble_used_handlers(ensemble_handle, ensemble_stamp, num_all_handlers=num_handlers)
(apply_dropout, seed) = _dropout_params(mode, ensemble_stats)
with ops.control_dependencies(ensemble_stats):
leaf_index = None
if (self._output_leaf_index and (mode in self._output_leaf_index_modes)):
(predictions, _, leaf_index) = prediction_ops.gradient_trees_prediction_verbose(ensemble_handle, seed, self._dense_floats, self._sparse_float_indices, self._sparse_float_values, self._sparse_float_shapes, self._sparse_int_indices, self._sparse_int_values, self._sparse_int_shapes, learner_config=self._learner_config_serialized, apply_dropout=apply_dropout, apply_averaging=(mode != learn.ModeKeys.TRAIN), use_locking=True, center_bias=self._center_bias, reduce_dim=self._reduce_dim)
else:
leaf_index = None
(predictions, _) = prediction_ops.gradient_trees_prediction(ensemble_handle, seed, self._dense_floats, self._sparse_float_indices, self._sparse_float_values, self._sparse_float_shapes, self._sparse_int_indices, self._sparse_int_values, self._sparse_int_shapes, learner_config=self._learner_config_serialized, apply_dropout=apply_dropout, apply_averaging=(mode != learn.ModeKeys.TRAIN), use_locking=True, center_bias=self._center_bias, reduce_dim=self._reduce_dim)
partition_ids = prediction_ops.gradient_trees_partition_examples(ensemble_handle, self._dense_floats, self._sparse_float_indices, self._sparse_float_values, self._sparse_float_shapes, self._sparse_int_indices, self._sparse_int_values, self._sparse_int_shapes, use_locking=True)
return _make_predictions_dict(ensemble_stamp, predictions, partition_ids, ensemble_stats, used_handlers, leaf_index)
|
def predict(self, mode):
'Returns predictions given the features and mode.\n\n Args:\n mode: Mode the graph is running in (train|predict|eval).\n\n Returns:\n A dict of predictions tensors.\n\n Raises:\n ValueError: if features is not valid.\n '
input_deps = ((self._dense_floats + self._sparse_float_indices) + self._sparse_int_indices)
if (not input_deps):
raise ValueError('No input tensors for prediction.')
ensemble_stamp = model_ops.tree_ensemble_stamp_token(self._ensemble_handle)
if (self._ensemble_handle.device != input_deps[0].device):
with ops.name_scope('local_ensemble', 'TreeEnsembleVariable') as name:
local_ensemble_handle = gen_model_ops.decision_tree_ensemble_resource_handle_op(name=name)
create_op = gen_model_ops.create_tree_ensemble_variable(local_ensemble_handle, stamp_token=(- 1), tree_ensemble_config='')
with ops.control_dependencies([create_op]):
local_stamp = model_ops.tree_ensemble_stamp_token(local_ensemble_handle)
def _refresh_local_ensemble_fn():
with ops.control_dependencies([input_deps[0]]):
(ensemble_stamp, serialized_model) = model_ops.tree_ensemble_serialize(self._ensemble_handle)
with ops.control_dependencies([create_op]):
return (model_ops.tree_ensemble_deserialize(local_ensemble_handle, stamp_token=ensemble_stamp, tree_ensemble_config=serialized_model), ensemble_stamp)
(refresh_local_ensemble, ensemble_stamp) = control_flow_ops.cond(math_ops.not_equal(ensemble_stamp, local_stamp), _refresh_local_ensemble_fn, (lambda : (control_flow_ops.no_op(), ensemble_stamp)))
with ops.control_dependencies([refresh_local_ensemble]):
return self._predict_and_return_dict(local_ensemble_handle, ensemble_stamp, mode)
else:
with ops.device(self._ensemble_handle.device):
return self._predict_and_return_dict(self._ensemble_handle, ensemble_stamp, mode)
| 1,914,042,199,805,028,600
|
Returns predictions given the features and mode.
Args:
mode: Mode the graph is running in (train|predict|eval).
Returns:
A dict of predictions tensors.
Raises:
ValueError: if features is not valid.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
predict
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def predict(self, mode):
'Returns predictions given the features and mode.\n\n Args:\n mode: Mode the graph is running in (train|predict|eval).\n\n Returns:\n A dict of predictions tensors.\n\n Raises:\n ValueError: if features is not valid.\n '
input_deps = ((self._dense_floats + self._sparse_float_indices) + self._sparse_int_indices)
if (not input_deps):
raise ValueError('No input tensors for prediction.')
ensemble_stamp = model_ops.tree_ensemble_stamp_token(self._ensemble_handle)
if (self._ensemble_handle.device != input_deps[0].device):
with ops.name_scope('local_ensemble', 'TreeEnsembleVariable') as name:
local_ensemble_handle = gen_model_ops.decision_tree_ensemble_resource_handle_op(name=name)
create_op = gen_model_ops.create_tree_ensemble_variable(local_ensemble_handle, stamp_token=(- 1), tree_ensemble_config=)
with ops.control_dependencies([create_op]):
local_stamp = model_ops.tree_ensemble_stamp_token(local_ensemble_handle)
def _refresh_local_ensemble_fn():
with ops.control_dependencies([input_deps[0]]):
(ensemble_stamp, serialized_model) = model_ops.tree_ensemble_serialize(self._ensemble_handle)
with ops.control_dependencies([create_op]):
return (model_ops.tree_ensemble_deserialize(local_ensemble_handle, stamp_token=ensemble_stamp, tree_ensemble_config=serialized_model), ensemble_stamp)
(refresh_local_ensemble, ensemble_stamp) = control_flow_ops.cond(math_ops.not_equal(ensemble_stamp, local_stamp), _refresh_local_ensemble_fn, (lambda : (control_flow_ops.no_op(), ensemble_stamp)))
with ops.control_dependencies([refresh_local_ensemble]):
return self._predict_and_return_dict(local_ensemble_handle, ensemble_stamp, mode)
else:
with ops.device(self._ensemble_handle.device):
return self._predict_and_return_dict(self._ensemble_handle, ensemble_stamp, mode)
|
def update_stats(self, loss, predictions_dict):
'Update the accumulators with stats from this batch.\n\n Args:\n loss: A scalar tensor representing average loss of examples.\n predictions_dict: Dictionary of Rank 2 `Tensor` representing information\n about predictions per example.\n\n Returns:\n Three values:\n - An op that adds a new tree to the ensemble, and\n - An op that increments the stamp but removes all the trees and resets\n the handlers. This can be used to reset the state of the ensemble.\n - A dict containing the training state.\n\n Raises:\n ValueError: if inputs are not valid.\n '
input_deps = ((self._dense_floats + self._sparse_float_indices) + self._sparse_int_indices)
worker_device = input_deps[0].device
predictions = predictions_dict[PREDICTIONS]
partition_ids = predictions_dict[PARTITION_IDS]
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
gradients = gradients_impl.gradients(loss, predictions, name='Gradients', colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)[0]
strategy = self._learner_config.multi_class_strategy
class_id = self._get_class_id(predictions_dict)
if (strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS):
if (self._logits_dimension == 1):
hessians = gradients_impl.gradients(gradients, predictions, name='Hessian', colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)[0]
squeezed_gradients = array_ops.squeeze(gradients, axis=[1])
squeezed_hessians = array_ops.squeeze(hessians, axis=[1])
else:
hessian_list = self._diagonal_hessian(gradients, predictions)
hessians = array_ops.stack(hessian_list, axis=1)
squeezed_gradients = array_ops.squeeze(_get_column_by_index(gradients, class_id))
squeezed_hessians = array_ops.squeeze(_get_column_by_index(hessians, class_id))
else:
if (strategy == learner_pb2.LearnerConfig.FULL_HESSIAN):
hessian_list = self._full_hessian(gradients, predictions)
else:
hessian_list = self._diagonal_hessian(gradients, predictions)
squeezed_gradients = gradients
hessians = array_ops.stack(hessian_list, axis=1)
squeezed_hessians = hessians
weights = self._get_weights(self._hessian_shape, squeezed_hessians)
fc_name_idx = 0
handlers = []
init_stamp_token = constant_op.constant(0, dtype=dtypes.int64)
l1_regularization = constant_op.constant(self._learner_config.regularization.l1, dtypes.float32)
l2_regularization = constant_op.constant(self._learner_config.regularization.l2, dtypes.float32)
tree_complexity_regularization = constant_op.constant(self._learner_config.regularization.tree_complexity, dtypes.float32)
min_node_weight = constant_op.constant(self._learner_config.constraints.min_node_weight, dtypes.float32)
loss_uses_sum_reduction = (self._loss_reduction == losses.Reduction.SUM)
loss_uses_sum_reduction = constant_op.constant(loss_uses_sum_reduction)
weak_learner_type = constant_op.constant(self._learner_config.weak_learner_type)
num_quantiles = self._num_quantiles
epsilon = (1.0 / num_quantiles)
strategy_tensor = constant_op.constant(strategy)
with ops.device(self._get_replica_device_setter(worker_device)):
for dense_float_column_idx in range(len(self._dense_floats)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(ordinal_split_handler.DenseSplitHandler(l1_regularization=l1_regularization, l2_regularization=l2_regularization, tree_complexity_regularization=tree_complexity_regularization, min_node_weight=min_node_weight, feature_column_group_id=constant_op.constant(dense_float_column_idx), epsilon=epsilon, num_quantiles=num_quantiles, dense_float_column=self._dense_floats[dense_float_column_idx], name=fc_name, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, multiclass_strategy=strategy_tensor, init_stamp_token=init_stamp_token, loss_uses_sum_reduction=loss_uses_sum_reduction, weak_learner_type=weak_learner_type))
fc_name_idx += 1
for sparse_float_column_idx in range(len(self._sparse_float_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(ordinal_split_handler.SparseSplitHandler(l1_regularization=l1_regularization, l2_regularization=l2_regularization, tree_complexity_regularization=tree_complexity_regularization, min_node_weight=min_node_weight, feature_column_group_id=constant_op.constant(sparse_float_column_idx), epsilon=epsilon, num_quantiles=num_quantiles, sparse_float_column=sparse_tensor.SparseTensor(self._sparse_float_indices[sparse_float_column_idx], self._sparse_float_values[sparse_float_column_idx], self._sparse_float_shapes[sparse_float_column_idx]), name=fc_name, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, multiclass_strategy=strategy_tensor, init_stamp_token=init_stamp_token, loss_uses_sum_reduction=loss_uses_sum_reduction))
fc_name_idx += 1
for sparse_int_column_idx in range(len(self._sparse_int_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(categorical_split_handler.EqualitySplitHandler(l1_regularization=l1_regularization, l2_regularization=l2_regularization, tree_complexity_regularization=tree_complexity_regularization, min_node_weight=min_node_weight, feature_column_group_id=constant_op.constant(sparse_int_column_idx), sparse_int_column=sparse_tensor.SparseTensor(self._sparse_int_indices[sparse_int_column_idx], self._sparse_int_values[sparse_int_column_idx], self._sparse_int_shapes[sparse_int_column_idx]), name=fc_name, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, multiclass_strategy=strategy_tensor, init_stamp_token=init_stamp_token, loss_uses_sum_reduction=loss_uses_sum_reduction, weak_learner_type=weak_learner_type))
fc_name_idx += 1
num_layer_examples = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), name='num_layer_examples', trainable=False)
num_layer_steps = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), name='num_layer_steps', trainable=False)
num_layers = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), name='num_layers', trainable=False)
active_tree = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), name='active_tree', trainable=False)
active_layer = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), name='active_layer', trainable=False)
continue_centering = variables.VariableV1(initial_value=self._center_bias, name='continue_centering', trainable=False)
bias_stats_accumulator = stats_accumulator_ops.StatsAccumulator(stamp_token=0, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, name='BiasAccumulator')
steps_accumulator = stats_accumulator_ops.StatsAccumulator(stamp_token=0, gradient_shape=tensor_shape.scalar(), hessian_shape=tensor_shape.scalar(), name='StepsAccumulator')
summary.scalar('layer_stats/num_examples', num_layer_examples)
summary.scalar('layer_stats/num_steps', num_layer_steps)
summary.scalar('ensemble_stats/active_tree', active_tree)
summary.scalar('ensemble_stats/active_layer', active_layer)
stats_update_ops = []
stats_update_ops.append(control_flow_ops.cond(continue_centering, self._make_update_bias_stats_fn(ensemble_stamp, predictions, gradients, bias_stats_accumulator), control_flow_ops.no_op))
handler_reads = collections.OrderedDict()
for handler in handlers:
handler_reads[handler] = handler.scheduled_reads()
handler_results = batch_ops_utils.run_handler_scheduled_ops(handler_reads, ensemble_stamp, worker_device)
per_handler_updates = collections.OrderedDict()
subsampling_type = self._learner_config.WhichOneof('feature_fraction')
if (subsampling_type == 'feature_fraction_per_level'):
seed = predictions_dict[NUM_LAYERS_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(shape=[len(handlers)], seed=[seed, 1])
active_handlers_next_layer = stateless.stateless_random_uniform(shape=[len(handlers)], seed=[(seed + 1), 1])
active_handlers = array_ops.stack([active_handlers_current_layer, active_handlers_next_layer], axis=1)
active_handlers = (active_handlers < self._learner_config.feature_fraction_per_level)
elif (subsampling_type == 'feature_fraction_per_tree'):
seed = predictions_dict[NUM_TREES_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(shape=[len(handlers)], seed=[seed, 2])
active_handlers_current_layer = (active_handlers_current_layer < self._learner_config.feature_fraction_per_tree)
active_handlers = array_ops.stack([active_handlers_current_layer, array_ops.ones([len(handlers)], dtype=dtypes.bool)], axis=1)
else:
active_handlers = array_ops.ones([len(handlers), 2], dtype=dtypes.bool)
if self._learner_config.constraints.max_number_of_unique_feature_columns:
target = self._learner_config.constraints.max_number_of_unique_feature_columns
def _feature_selection_active_handlers():
used_handlers = array_ops.reshape(predictions_dict[USED_HANDLERS_MASK], [(- 1), 1])
used_handlers = array_ops.concat([used_handlers, used_handlers], axis=1)
return math_ops.logical_and(used_handlers, active_handlers)
active_handlers = control_flow_ops.cond((predictions_dict[NUM_USED_HANDLERS] >= target), _feature_selection_active_handlers, (lambda : active_handlers))
empty_hess_shape = ([1] + self._hessian_shape.as_list())
empty_grad_shape = ([1] + self._gradient_shape.as_list())
empty_gradients = constant_op.constant([], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant([], dtype=dtypes.float32, shape=empty_hess_shape)
active_handlers = array_ops.unstack(active_handlers, axis=0)
for handler_idx in range(len(handlers)):
handler = handlers[handler_idx]
is_active = active_handlers[handler_idx]
(updates, scheduled_updates) = handler.update_stats(ensemble_stamp, partition_ids, squeezed_gradients, squeezed_hessians, empty_gradients, empty_hessians, weights, is_active, handler_results[handler])
stats_update_ops.append(updates)
per_handler_updates[handler] = scheduled_updates
update_results = batch_ops_utils.run_handler_scheduled_ops(per_handler_updates, ensemble_stamp, worker_device)
for update in update_results.values():
stats_update_ops += update
training_state = GBDTTrainingState(num_layer_examples=num_layer_examples, num_layer_steps=num_layer_steps, num_layers=num_layers, active_tree=active_tree, active_layer=active_layer, continue_centering=continue_centering, bias_stats_accumulator=bias_stats_accumulator, steps_accumulator=steps_accumulator, handlers=handlers)
reset_op = control_flow_ops.no_op()
if self._is_chief:
(stamp_token, _) = model_ops.tree_ensemble_serialize(self._ensemble_handle)
next_stamp_token = (stamp_token + 1)
reset_ops = []
for handler in handlers:
reset_ops.append(handler.reset(stamp_token, next_stamp_token))
if self._center_bias:
reset_ops.append(bias_stats_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(steps_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(self._finalized_trees.assign(0).op)
reset_ops.append(self._attempted_trees.assign(0).op)
reset_ops.append(model_ops.tree_ensemble_deserialize(self._ensemble_handle, stamp_token=next_stamp_token, tree_ensemble_config='', name='reset_gbdt'))
reset_op = control_flow_ops.group([reset_ops])
return (stats_update_ops, reset_op, training_state)
| -2,046,874,659,883,087,400
|
Update the accumulators with stats from this batch.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
Returns:
Three values:
- An op that adds a new tree to the ensemble, and
- An op that increments the stamp but removes all the trees and resets
the handlers. This can be used to reset the state of the ensemble.
- A dict containing the training state.
Raises:
ValueError: if inputs are not valid.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
update_stats
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def update_stats(self, loss, predictions_dict):
'Update the accumulators with stats from this batch.\n\n Args:\n loss: A scalar tensor representing average loss of examples.\n predictions_dict: Dictionary of Rank 2 `Tensor` representing information\n about predictions per example.\n\n Returns:\n Three values:\n - An op that adds a new tree to the ensemble, and\n - An op that increments the stamp but removes all the trees and resets\n the handlers. This can be used to reset the state of the ensemble.\n - A dict containing the training state.\n\n Raises:\n ValueError: if inputs are not valid.\n '
input_deps = ((self._dense_floats + self._sparse_float_indices) + self._sparse_int_indices)
worker_device = input_deps[0].device
predictions = predictions_dict[PREDICTIONS]
partition_ids = predictions_dict[PARTITION_IDS]
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
gradients = gradients_impl.gradients(loss, predictions, name='Gradients', colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)[0]
strategy = self._learner_config.multi_class_strategy
class_id = self._get_class_id(predictions_dict)
if (strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS):
if (self._logits_dimension == 1):
hessians = gradients_impl.gradients(gradients, predictions, name='Hessian', colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)[0]
squeezed_gradients = array_ops.squeeze(gradients, axis=[1])
squeezed_hessians = array_ops.squeeze(hessians, axis=[1])
else:
hessian_list = self._diagonal_hessian(gradients, predictions)
hessians = array_ops.stack(hessian_list, axis=1)
squeezed_gradients = array_ops.squeeze(_get_column_by_index(gradients, class_id))
squeezed_hessians = array_ops.squeeze(_get_column_by_index(hessians, class_id))
else:
if (strategy == learner_pb2.LearnerConfig.FULL_HESSIAN):
hessian_list = self._full_hessian(gradients, predictions)
else:
hessian_list = self._diagonal_hessian(gradients, predictions)
squeezed_gradients = gradients
hessians = array_ops.stack(hessian_list, axis=1)
squeezed_hessians = hessians
weights = self._get_weights(self._hessian_shape, squeezed_hessians)
fc_name_idx = 0
handlers = []
init_stamp_token = constant_op.constant(0, dtype=dtypes.int64)
l1_regularization = constant_op.constant(self._learner_config.regularization.l1, dtypes.float32)
l2_regularization = constant_op.constant(self._learner_config.regularization.l2, dtypes.float32)
tree_complexity_regularization = constant_op.constant(self._learner_config.regularization.tree_complexity, dtypes.float32)
min_node_weight = constant_op.constant(self._learner_config.constraints.min_node_weight, dtypes.float32)
loss_uses_sum_reduction = (self._loss_reduction == losses.Reduction.SUM)
loss_uses_sum_reduction = constant_op.constant(loss_uses_sum_reduction)
weak_learner_type = constant_op.constant(self._learner_config.weak_learner_type)
num_quantiles = self._num_quantiles
epsilon = (1.0 / num_quantiles)
strategy_tensor = constant_op.constant(strategy)
with ops.device(self._get_replica_device_setter(worker_device)):
for dense_float_column_idx in range(len(self._dense_floats)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(ordinal_split_handler.DenseSplitHandler(l1_regularization=l1_regularization, l2_regularization=l2_regularization, tree_complexity_regularization=tree_complexity_regularization, min_node_weight=min_node_weight, feature_column_group_id=constant_op.constant(dense_float_column_idx), epsilon=epsilon, num_quantiles=num_quantiles, dense_float_column=self._dense_floats[dense_float_column_idx], name=fc_name, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, multiclass_strategy=strategy_tensor, init_stamp_token=init_stamp_token, loss_uses_sum_reduction=loss_uses_sum_reduction, weak_learner_type=weak_learner_type))
fc_name_idx += 1
for sparse_float_column_idx in range(len(self._sparse_float_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(ordinal_split_handler.SparseSplitHandler(l1_regularization=l1_regularization, l2_regularization=l2_regularization, tree_complexity_regularization=tree_complexity_regularization, min_node_weight=min_node_weight, feature_column_group_id=constant_op.constant(sparse_float_column_idx), epsilon=epsilon, num_quantiles=num_quantiles, sparse_float_column=sparse_tensor.SparseTensor(self._sparse_float_indices[sparse_float_column_idx], self._sparse_float_values[sparse_float_column_idx], self._sparse_float_shapes[sparse_float_column_idx]), name=fc_name, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, multiclass_strategy=strategy_tensor, init_stamp_token=init_stamp_token, loss_uses_sum_reduction=loss_uses_sum_reduction))
fc_name_idx += 1
for sparse_int_column_idx in range(len(self._sparse_int_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(categorical_split_handler.EqualitySplitHandler(l1_regularization=l1_regularization, l2_regularization=l2_regularization, tree_complexity_regularization=tree_complexity_regularization, min_node_weight=min_node_weight, feature_column_group_id=constant_op.constant(sparse_int_column_idx), sparse_int_column=sparse_tensor.SparseTensor(self._sparse_int_indices[sparse_int_column_idx], self._sparse_int_values[sparse_int_column_idx], self._sparse_int_shapes[sparse_int_column_idx]), name=fc_name, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, multiclass_strategy=strategy_tensor, init_stamp_token=init_stamp_token, loss_uses_sum_reduction=loss_uses_sum_reduction, weak_learner_type=weak_learner_type))
fc_name_idx += 1
num_layer_examples = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), name='num_layer_examples', trainable=False)
num_layer_steps = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), name='num_layer_steps', trainable=False)
num_layers = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), name='num_layers', trainable=False)
active_tree = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), name='active_tree', trainable=False)
active_layer = variables.VariableV1(initial_value=array_ops.zeros([], dtypes.int64), name='active_layer', trainable=False)
continue_centering = variables.VariableV1(initial_value=self._center_bias, name='continue_centering', trainable=False)
bias_stats_accumulator = stats_accumulator_ops.StatsAccumulator(stamp_token=0, gradient_shape=self._gradient_shape, hessian_shape=self._hessian_shape, name='BiasAccumulator')
steps_accumulator = stats_accumulator_ops.StatsAccumulator(stamp_token=0, gradient_shape=tensor_shape.scalar(), hessian_shape=tensor_shape.scalar(), name='StepsAccumulator')
summary.scalar('layer_stats/num_examples', num_layer_examples)
summary.scalar('layer_stats/num_steps', num_layer_steps)
summary.scalar('ensemble_stats/active_tree', active_tree)
summary.scalar('ensemble_stats/active_layer', active_layer)
stats_update_ops = []
stats_update_ops.append(control_flow_ops.cond(continue_centering, self._make_update_bias_stats_fn(ensemble_stamp, predictions, gradients, bias_stats_accumulator), control_flow_ops.no_op))
handler_reads = collections.OrderedDict()
for handler in handlers:
handler_reads[handler] = handler.scheduled_reads()
handler_results = batch_ops_utils.run_handler_scheduled_ops(handler_reads, ensemble_stamp, worker_device)
per_handler_updates = collections.OrderedDict()
subsampling_type = self._learner_config.WhichOneof('feature_fraction')
if (subsampling_type == 'feature_fraction_per_level'):
seed = predictions_dict[NUM_LAYERS_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(shape=[len(handlers)], seed=[seed, 1])
active_handlers_next_layer = stateless.stateless_random_uniform(shape=[len(handlers)], seed=[(seed + 1), 1])
active_handlers = array_ops.stack([active_handlers_current_layer, active_handlers_next_layer], axis=1)
active_handlers = (active_handlers < self._learner_config.feature_fraction_per_level)
elif (subsampling_type == 'feature_fraction_per_tree'):
seed = predictions_dict[NUM_TREES_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(shape=[len(handlers)], seed=[seed, 2])
active_handlers_current_layer = (active_handlers_current_layer < self._learner_config.feature_fraction_per_tree)
active_handlers = array_ops.stack([active_handlers_current_layer, array_ops.ones([len(handlers)], dtype=dtypes.bool)], axis=1)
else:
active_handlers = array_ops.ones([len(handlers), 2], dtype=dtypes.bool)
if self._learner_config.constraints.max_number_of_unique_feature_columns:
target = self._learner_config.constraints.max_number_of_unique_feature_columns
def _feature_selection_active_handlers():
used_handlers = array_ops.reshape(predictions_dict[USED_HANDLERS_MASK], [(- 1), 1])
used_handlers = array_ops.concat([used_handlers, used_handlers], axis=1)
return math_ops.logical_and(used_handlers, active_handlers)
active_handlers = control_flow_ops.cond((predictions_dict[NUM_USED_HANDLERS] >= target), _feature_selection_active_handlers, (lambda : active_handlers))
empty_hess_shape = ([1] + self._hessian_shape.as_list())
empty_grad_shape = ([1] + self._gradient_shape.as_list())
empty_gradients = constant_op.constant([], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant([], dtype=dtypes.float32, shape=empty_hess_shape)
active_handlers = array_ops.unstack(active_handlers, axis=0)
for handler_idx in range(len(handlers)):
handler = handlers[handler_idx]
is_active = active_handlers[handler_idx]
(updates, scheduled_updates) = handler.update_stats(ensemble_stamp, partition_ids, squeezed_gradients, squeezed_hessians, empty_gradients, empty_hessians, weights, is_active, handler_results[handler])
stats_update_ops.append(updates)
per_handler_updates[handler] = scheduled_updates
update_results = batch_ops_utils.run_handler_scheduled_ops(per_handler_updates, ensemble_stamp, worker_device)
for update in update_results.values():
stats_update_ops += update
training_state = GBDTTrainingState(num_layer_examples=num_layer_examples, num_layer_steps=num_layer_steps, num_layers=num_layers, active_tree=active_tree, active_layer=active_layer, continue_centering=continue_centering, bias_stats_accumulator=bias_stats_accumulator, steps_accumulator=steps_accumulator, handlers=handlers)
reset_op = control_flow_ops.no_op()
if self._is_chief:
(stamp_token, _) = model_ops.tree_ensemble_serialize(self._ensemble_handle)
next_stamp_token = (stamp_token + 1)
reset_ops = []
for handler in handlers:
reset_ops.append(handler.reset(stamp_token, next_stamp_token))
if self._center_bias:
reset_ops.append(bias_stats_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(steps_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(self._finalized_trees.assign(0).op)
reset_ops.append(self._attempted_trees.assign(0).op)
reset_ops.append(model_ops.tree_ensemble_deserialize(self._ensemble_handle, stamp_token=next_stamp_token, tree_ensemble_config=, name='reset_gbdt'))
reset_op = control_flow_ops.group([reset_ops])
return (stats_update_ops, reset_op, training_state)
|
def increment_step_counter_and_maybe_update_ensemble(self, predictions_dict, training_state):
'Increments number of visited examples and grows the ensemble.\n\n If the number of visited examples reaches the target examples_per_layer,\n ensemble is updated.\n\n Args:\n predictions_dict: Dictionary of Rank 2 `Tensor` representing information\n about predictions per example.\n training_state: `dict` returned by update_stats.\n\n Returns:\n An op that updates the counters and potientially grows the ensemble.\n '
batch_size = math_ops.cast(array_ops.shape(predictions_dict[PREDICTIONS])[0], dtypes.float32)
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
steps_accumulator = training_state.steps_accumulator
num_layer_examples = training_state.num_layer_examples
num_layer_steps = training_state.num_layer_steps
active_layer = training_state.active_layer
add_step_op = steps_accumulator.add(ensemble_stamp, [0], [[0, 0]], [batch_size], [1.0])
ensemble_update_ops = [add_step_op]
class_id = self._get_class_id(predictions_dict)
with ops.control_dependencies([add_step_op]):
if self._is_chief:
dropout_seed = predictions_dict[NUM_TREES_ATTEMPTED]
(_, _, _, _, acc_examples, acc_steps) = steps_accumulator.serialize()
acc_examples = math_ops.cast(acc_examples[0], dtypes.int64)
acc_steps = math_ops.cast(acc_steps[0], dtypes.int64)
ensemble_update_ops.append(num_layer_examples.assign(acc_examples))
ensemble_update_ops.append(num_layer_steps.assign(acc_steps))
examples_per_layer = self._examples_per_layer
if callable(examples_per_layer):
examples_per_layer = examples_per_layer(active_layer)
ensemble_update_ops.append(control_flow_ops.cond((acc_examples >= examples_per_layer), self.make_update_ensemble_fn(ensemble_stamp, training_state, dropout_seed, class_id), control_flow_ops.no_op))
return control_flow_ops.group(*ensemble_update_ops)
| 1,781,436,108,389,791,700
|
Increments number of visited examples and grows the ensemble.
If the number of visited examples reaches the target examples_per_layer,
ensemble is updated.
Args:
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
training_state: `dict` returned by update_stats.
Returns:
An op that updates the counters and potientially grows the ensemble.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
increment_step_counter_and_maybe_update_ensemble
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def increment_step_counter_and_maybe_update_ensemble(self, predictions_dict, training_state):
'Increments number of visited examples and grows the ensemble.\n\n If the number of visited examples reaches the target examples_per_layer,\n ensemble is updated.\n\n Args:\n predictions_dict: Dictionary of Rank 2 `Tensor` representing information\n about predictions per example.\n training_state: `dict` returned by update_stats.\n\n Returns:\n An op that updates the counters and potientially grows the ensemble.\n '
batch_size = math_ops.cast(array_ops.shape(predictions_dict[PREDICTIONS])[0], dtypes.float32)
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
steps_accumulator = training_state.steps_accumulator
num_layer_examples = training_state.num_layer_examples
num_layer_steps = training_state.num_layer_steps
active_layer = training_state.active_layer
add_step_op = steps_accumulator.add(ensemble_stamp, [0], [[0, 0]], [batch_size], [1.0])
ensemble_update_ops = [add_step_op]
class_id = self._get_class_id(predictions_dict)
with ops.control_dependencies([add_step_op]):
if self._is_chief:
dropout_seed = predictions_dict[NUM_TREES_ATTEMPTED]
(_, _, _, _, acc_examples, acc_steps) = steps_accumulator.serialize()
acc_examples = math_ops.cast(acc_examples[0], dtypes.int64)
acc_steps = math_ops.cast(acc_steps[0], dtypes.int64)
ensemble_update_ops.append(num_layer_examples.assign(acc_examples))
ensemble_update_ops.append(num_layer_steps.assign(acc_steps))
examples_per_layer = self._examples_per_layer
if callable(examples_per_layer):
examples_per_layer = examples_per_layer(active_layer)
ensemble_update_ops.append(control_flow_ops.cond((acc_examples >= examples_per_layer), self.make_update_ensemble_fn(ensemble_stamp, training_state, dropout_seed, class_id), control_flow_ops.no_op))
return control_flow_ops.group(*ensemble_update_ops)
|
def make_update_ensemble_fn(self, ensemble_stamp, training_state, dropout_seed, class_id):
'A method to create the function which updates the tree ensemble.'
learning_rate_tuner = self._learner_config.learning_rate_tuner.WhichOneof('tuner')
if ((learning_rate_tuner == 'fixed') or (learning_rate_tuner == 'dropout')):
tuner = getattr(self._learner_config.learning_rate_tuner, learning_rate_tuner)
learning_rate = tuner.learning_rate
else:
raise ValueError('Line search learning rate is not yet supported.')
def _update_ensemble():
'A method to update the tree ensemble.'
next_ensemble_stamp = (ensemble_stamp + 1)
(_, _, _, bias_grads, bias_hess) = training_state.bias_stats_accumulator.flush(ensemble_stamp, next_ensemble_stamp)
are_splits_ready_list = []
partition_ids_list = []
gains_list = []
split_info_list = []
for handler in training_state.handlers:
(are_splits_ready, partition_ids, gains, split_info) = handler.make_splits(ensemble_stamp, next_ensemble_stamp, class_id)
are_splits_ready_list.append(are_splits_ready)
partition_ids_list.append(partition_ids)
gains_list.append(gains)
split_info_list.append(split_info)
split_sizes = array_ops.reshape(array_ops.shape_n(partition_ids_list), [len(partition_ids_list)])
partition_ids = array_ops.concat(partition_ids_list, axis=0)
gains = array_ops.concat(gains_list, axis=0)
split_infos = array_ops.concat(split_info_list, axis=0)
are_all_splits_ready = math_ops.reduce_all(array_ops.stack(are_splits_ready_list, axis=0, name='stack_handler_readiness'))
def _center_bias_fn():
delta_updates = array_ops.where((bias_hess > 0), ((- bias_grads) / bias_hess), array_ops.zeros_like(bias_grads))
center_bias = training_ops.center_tree_ensemble_bias(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, delta_updates=delta_updates, learner_config=self._learner_config_serialized)
return training_state.continue_centering.assign(center_bias)
def _grow_ensemble_ready_fn():
sizes = array_ops.unstack(split_sizes)
partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0))
if (self._learner_config.weak_learner_type == learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE):
sizes = len(training_state.handlers)
gains_list = list(array_ops.split(gains, sizes, axis=0))
split_info_list = list(array_ops.split(split_infos, sizes, axis=0))
return training_ops.grow_tree_ensemble(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, learning_rate=learning_rate, partition_ids=partition_ids_list, gains=gains_list, splits=split_info_list, learner_config=self._learner_config_serialized, dropout_seed=dropout_seed, center_bias=self._center_bias, max_tree_depth=self._max_tree_depth, weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_not_ready_fn():
return training_ops.grow_tree_ensemble(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, learning_rate=0, partition_ids=[], gains=[], splits=[], learner_config=self._learner_config_serialized, dropout_seed=dropout_seed, center_bias=self._center_bias, max_tree_depth=self._max_tree_depth, weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_fn():
return control_flow_ops.cond(are_all_splits_ready, _grow_ensemble_ready_fn, _grow_ensemble_not_ready_fn)
update_ops = [are_all_splits_ready]
if self._center_bias:
update_model = control_flow_ops.cond(training_state.continue_centering, _center_bias_fn, _grow_ensemble_fn)
else:
update_model = _grow_ensemble_fn()
update_ops.append(update_model)
with ops.control_dependencies([update_model]):
stats = training_ops.tree_ensemble_stats(self._ensemble_handle, stamp_token=next_ensemble_stamp)
update_ops.append(self._finalized_trees.assign(stats.num_trees))
update_ops.append(self._attempted_trees.assign(stats.attempted_trees))
update_ops.append(training_state.num_layers.assign(stats.num_layers))
update_ops.append(training_state.active_tree.assign(stats.active_tree))
update_ops.append(training_state.active_layer.assign(stats.active_layer))
update_ops.extend(training_state.steps_accumulator.flush(ensemble_stamp, next_ensemble_stamp))
return control_flow_ops.group(*update_ops, name='update_ensemble')
return _update_ensemble
| -8,148,777,286,915,548,000
|
A method to create the function which updates the tree ensemble.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
make_update_ensemble_fn
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def make_update_ensemble_fn(self, ensemble_stamp, training_state, dropout_seed, class_id):
learning_rate_tuner = self._learner_config.learning_rate_tuner.WhichOneof('tuner')
if ((learning_rate_tuner == 'fixed') or (learning_rate_tuner == 'dropout')):
tuner = getattr(self._learner_config.learning_rate_tuner, learning_rate_tuner)
learning_rate = tuner.learning_rate
else:
raise ValueError('Line search learning rate is not yet supported.')
def _update_ensemble():
'A method to update the tree ensemble.'
next_ensemble_stamp = (ensemble_stamp + 1)
(_, _, _, bias_grads, bias_hess) = training_state.bias_stats_accumulator.flush(ensemble_stamp, next_ensemble_stamp)
are_splits_ready_list = []
partition_ids_list = []
gains_list = []
split_info_list = []
for handler in training_state.handlers:
(are_splits_ready, partition_ids, gains, split_info) = handler.make_splits(ensemble_stamp, next_ensemble_stamp, class_id)
are_splits_ready_list.append(are_splits_ready)
partition_ids_list.append(partition_ids)
gains_list.append(gains)
split_info_list.append(split_info)
split_sizes = array_ops.reshape(array_ops.shape_n(partition_ids_list), [len(partition_ids_list)])
partition_ids = array_ops.concat(partition_ids_list, axis=0)
gains = array_ops.concat(gains_list, axis=0)
split_infos = array_ops.concat(split_info_list, axis=0)
are_all_splits_ready = math_ops.reduce_all(array_ops.stack(are_splits_ready_list, axis=0, name='stack_handler_readiness'))
def _center_bias_fn():
delta_updates = array_ops.where((bias_hess > 0), ((- bias_grads) / bias_hess), array_ops.zeros_like(bias_grads))
center_bias = training_ops.center_tree_ensemble_bias(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, delta_updates=delta_updates, learner_config=self._learner_config_serialized)
return training_state.continue_centering.assign(center_bias)
def _grow_ensemble_ready_fn():
sizes = array_ops.unstack(split_sizes)
partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0))
if (self._learner_config.weak_learner_type == learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE):
sizes = len(training_state.handlers)
gains_list = list(array_ops.split(gains, sizes, axis=0))
split_info_list = list(array_ops.split(split_infos, sizes, axis=0))
return training_ops.grow_tree_ensemble(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, learning_rate=learning_rate, partition_ids=partition_ids_list, gains=gains_list, splits=split_info_list, learner_config=self._learner_config_serialized, dropout_seed=dropout_seed, center_bias=self._center_bias, max_tree_depth=self._max_tree_depth, weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_not_ready_fn():
return training_ops.grow_tree_ensemble(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, learning_rate=0, partition_ids=[], gains=[], splits=[], learner_config=self._learner_config_serialized, dropout_seed=dropout_seed, center_bias=self._center_bias, max_tree_depth=self._max_tree_depth, weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_fn():
return control_flow_ops.cond(are_all_splits_ready, _grow_ensemble_ready_fn, _grow_ensemble_not_ready_fn)
update_ops = [are_all_splits_ready]
if self._center_bias:
update_model = control_flow_ops.cond(training_state.continue_centering, _center_bias_fn, _grow_ensemble_fn)
else:
update_model = _grow_ensemble_fn()
update_ops.append(update_model)
with ops.control_dependencies([update_model]):
stats = training_ops.tree_ensemble_stats(self._ensemble_handle, stamp_token=next_ensemble_stamp)
update_ops.append(self._finalized_trees.assign(stats.num_trees))
update_ops.append(self._attempted_trees.assign(stats.attempted_trees))
update_ops.append(training_state.num_layers.assign(stats.num_layers))
update_ops.append(training_state.active_tree.assign(stats.active_tree))
update_ops.append(training_state.active_layer.assign(stats.active_layer))
update_ops.extend(training_state.steps_accumulator.flush(ensemble_stamp, next_ensemble_stamp))
return control_flow_ops.group(*update_ops, name='update_ensemble')
return _update_ensemble
|
def train(self, loss, predictions_dict, labels):
'Updates the accumalator stats and grows the ensemble.\n\n Args:\n loss: A scalar tensor representing average loss of examples.\n predictions_dict: Dictionary of Rank 2 `Tensor` representing information\n about predictions per example.\n labels: Rank 2 `Tensor` representing labels per example. Has no effect\n on the training and is only kept for backward compatibility.\n\n Returns:\n An op that adds a new tree to the ensemble.\n\n Raises:\n ValueError: if inputs are not valid.\n '
del labels
(update_op, _, training_state) = self.update_stats(loss, predictions_dict)
with ops.control_dependencies(update_op):
return self.increment_step_counter_and_maybe_update_ensemble(predictions_dict, training_state)
| -7,892,417,426,828,169,000
|
Updates the accumalator stats and grows the ensemble.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
labels: Rank 2 `Tensor` representing labels per example. Has no effect
on the training and is only kept for backward compatibility.
Returns:
An op that adds a new tree to the ensemble.
Raises:
ValueError: if inputs are not valid.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
train
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def train(self, loss, predictions_dict, labels):
'Updates the accumalator stats and grows the ensemble.\n\n Args:\n loss: A scalar tensor representing average loss of examples.\n predictions_dict: Dictionary of Rank 2 `Tensor` representing information\n about predictions per example.\n labels: Rank 2 `Tensor` representing labels per example. Has no effect\n on the training and is only kept for backward compatibility.\n\n Returns:\n An op that adds a new tree to the ensemble.\n\n Raises:\n ValueError: if inputs are not valid.\n '
del labels
(update_op, _, training_state) = self.update_stats(loss, predictions_dict)
with ops.control_dependencies(update_op):
return self.increment_step_counter_and_maybe_update_ensemble(predictions_dict, training_state)
|
def _get_weights(self, hessian_shape, hessians):
'Derives weights to be used based on hessians and multiclass strategy.'
if (hessian_shape == tensor_shape.scalar()):
weights = hessians
elif (len(hessian_shape.dims) == 1):
weights = math_ops.reduce_sum(hessians, axis=1)
else:
weights = math_ops.trace(hessians)
return weights
| 4,915,859,988,237,479,000
|
Derives weights to be used based on hessians and multiclass strategy.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
_get_weights
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def _get_weights(self, hessian_shape, hessians):
if (hessian_shape == tensor_shape.scalar()):
weights = hessians
elif (len(hessian_shape.dims) == 1):
weights = math_ops.reduce_sum(hessians, axis=1)
else:
weights = math_ops.trace(hessians)
return weights
|
def _full_hessian(self, grads, predictions):
'Prepares hessians for full-hessian multiclass strategy.'
gradients_list = array_ops.unstack(grads, num=self._logits_dimension, axis=1)
hessian_rows = []
for row in range(self._logits_dimension):
hessian_row = gradients_impl.gradients(gradients_list[row], predictions, name=('Hessian_%d' % row), colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
hessian_rows.append(hessian_row)
return hessian_rows
| -7,186,014,364,833,011,000
|
Prepares hessians for full-hessian multiclass strategy.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
_full_hessian
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def _full_hessian(self, grads, predictions):
gradients_list = array_ops.unstack(grads, num=self._logits_dimension, axis=1)
hessian_rows = []
for row in range(self._logits_dimension):
hessian_row = gradients_impl.gradients(gradients_list[row], predictions, name=('Hessian_%d' % row), colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
hessian_rows.append(hessian_row)
return hessian_rows
|
def _diagonal_hessian(self, grads, predictions):
'Prepares hessians for diagonal-hessian multiclass mode.'
diag_hessian_list = []
gradients_list = array_ops.unstack(grads, num=self._logits_dimension, axis=1)
for (row, row_grads) in enumerate(gradients_list):
hessian_row = gradients_impl.gradients(row_grads, predictions, name=('Hessian_%d' % row), colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
elem = array_ops.transpose(hessian_row)[row]
diag_hessian_list.append(elem)
return diag_hessian_list
| 4,202,547,455,968,546,300
|
Prepares hessians for diagonal-hessian multiclass mode.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
_diagonal_hessian
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def _diagonal_hessian(self, grads, predictions):
diag_hessian_list = []
gradients_list = array_ops.unstack(grads, num=self._logits_dimension, axis=1)
for (row, row_grads) in enumerate(gradients_list):
hessian_row = gradients_impl.gradients(row_grads, predictions, name=('Hessian_%d' % row), colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
elem = array_ops.transpose(hessian_row)[row]
diag_hessian_list.append(elem)
return diag_hessian_list
|
def _get_replica_device_setter(self, worker_device):
'Creates a replica device setter.'
ps_tasks = self._num_ps_replicas
ps_ops = ['Variable', 'VariableV2', 'DecisionTreeEnsembleResourceHandleOp', 'StatsAccumulatorScalarResourceHandleOp', 'StatsAccumulatorTensorResourceHandleOp']
ps_strategy = _OpRoundRobinStrategy(ps_ops, ps_tasks)
return device_setter.replica_device_setter(worker_device=worker_device, ps_tasks=ps_tasks, merge_devices=True, ps_ops=ps_ops, ps_strategy=ps_strategy)
| 501,583,640,530,627,840
|
Creates a replica device setter.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
_get_replica_device_setter
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def _get_replica_device_setter(self, worker_device):
ps_tasks = self._num_ps_replicas
ps_ops = ['Variable', 'VariableV2', 'DecisionTreeEnsembleResourceHandleOp', 'StatsAccumulatorScalarResourceHandleOp', 'StatsAccumulatorTensorResourceHandleOp']
ps_strategy = _OpRoundRobinStrategy(ps_ops, ps_tasks)
return device_setter.replica_device_setter(worker_device=worker_device, ps_tasks=ps_tasks, merge_devices=True, ps_ops=ps_ops, ps_strategy=ps_strategy)
|
def _make_update_bias_stats_fn(self, ensemble_stamp, predictions, gradients, bias_stats_accumulator):
'A method to create the function which updates the bias stats.'
def _update_bias_stats():
'A method to update the bias stats.'
grads_sum = math_ops.reduce_sum(gradients, 0)
hess = gradients_impl.gradients(grads_sum, predictions, name='Hessians', colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)[0]
hess_sum = math_ops.reduce_sum(hess, 0)
partition_ids = math_ops.range(self._logits_dimension)
feature_ids = array_ops.zeros([self._logits_dimension, 2], dtype=dtypes.int64)
add_stats_op = bias_stats_accumulator.add(ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum)
return control_flow_ops.group(*[add_stats_op], name='update_bias_stats')
return _update_bias_stats
| -705,706,515,454,461,800
|
A method to create the function which updates the bias stats.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
_make_update_bias_stats_fn
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def _make_update_bias_stats_fn(self, ensemble_stamp, predictions, gradients, bias_stats_accumulator):
def _update_bias_stats():
'A method to update the bias stats.'
grads_sum = math_ops.reduce_sum(gradients, 0)
hess = gradients_impl.gradients(grads_sum, predictions, name='Hessians', colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)[0]
hess_sum = math_ops.reduce_sum(hess, 0)
partition_ids = math_ops.range(self._logits_dimension)
feature_ids = array_ops.zeros([self._logits_dimension, 2], dtype=dtypes.int64)
add_stats_op = bias_stats_accumulator.add(ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum)
return control_flow_ops.group(*[add_stats_op], name='update_bias_stats')
return _update_bias_stats
|
def _update_ensemble():
'A method to update the tree ensemble.'
next_ensemble_stamp = (ensemble_stamp + 1)
(_, _, _, bias_grads, bias_hess) = training_state.bias_stats_accumulator.flush(ensemble_stamp, next_ensemble_stamp)
are_splits_ready_list = []
partition_ids_list = []
gains_list = []
split_info_list = []
for handler in training_state.handlers:
(are_splits_ready, partition_ids, gains, split_info) = handler.make_splits(ensemble_stamp, next_ensemble_stamp, class_id)
are_splits_ready_list.append(are_splits_ready)
partition_ids_list.append(partition_ids)
gains_list.append(gains)
split_info_list.append(split_info)
split_sizes = array_ops.reshape(array_ops.shape_n(partition_ids_list), [len(partition_ids_list)])
partition_ids = array_ops.concat(partition_ids_list, axis=0)
gains = array_ops.concat(gains_list, axis=0)
split_infos = array_ops.concat(split_info_list, axis=0)
are_all_splits_ready = math_ops.reduce_all(array_ops.stack(are_splits_ready_list, axis=0, name='stack_handler_readiness'))
def _center_bias_fn():
delta_updates = array_ops.where((bias_hess > 0), ((- bias_grads) / bias_hess), array_ops.zeros_like(bias_grads))
center_bias = training_ops.center_tree_ensemble_bias(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, delta_updates=delta_updates, learner_config=self._learner_config_serialized)
return training_state.continue_centering.assign(center_bias)
def _grow_ensemble_ready_fn():
sizes = array_ops.unstack(split_sizes)
partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0))
if (self._learner_config.weak_learner_type == learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE):
sizes = len(training_state.handlers)
gains_list = list(array_ops.split(gains, sizes, axis=0))
split_info_list = list(array_ops.split(split_infos, sizes, axis=0))
return training_ops.grow_tree_ensemble(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, learning_rate=learning_rate, partition_ids=partition_ids_list, gains=gains_list, splits=split_info_list, learner_config=self._learner_config_serialized, dropout_seed=dropout_seed, center_bias=self._center_bias, max_tree_depth=self._max_tree_depth, weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_not_ready_fn():
return training_ops.grow_tree_ensemble(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, learning_rate=0, partition_ids=[], gains=[], splits=[], learner_config=self._learner_config_serialized, dropout_seed=dropout_seed, center_bias=self._center_bias, max_tree_depth=self._max_tree_depth, weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_fn():
return control_flow_ops.cond(are_all_splits_ready, _grow_ensemble_ready_fn, _grow_ensemble_not_ready_fn)
update_ops = [are_all_splits_ready]
if self._center_bias:
update_model = control_flow_ops.cond(training_state.continue_centering, _center_bias_fn, _grow_ensemble_fn)
else:
update_model = _grow_ensemble_fn()
update_ops.append(update_model)
with ops.control_dependencies([update_model]):
stats = training_ops.tree_ensemble_stats(self._ensemble_handle, stamp_token=next_ensemble_stamp)
update_ops.append(self._finalized_trees.assign(stats.num_trees))
update_ops.append(self._attempted_trees.assign(stats.attempted_trees))
update_ops.append(training_state.num_layers.assign(stats.num_layers))
update_ops.append(training_state.active_tree.assign(stats.active_tree))
update_ops.append(training_state.active_layer.assign(stats.active_layer))
update_ops.extend(training_state.steps_accumulator.flush(ensemble_stamp, next_ensemble_stamp))
return control_flow_ops.group(*update_ops, name='update_ensemble')
| -781,230,400,113,602,400
|
A method to update the tree ensemble.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
_update_ensemble
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def _update_ensemble():
next_ensemble_stamp = (ensemble_stamp + 1)
(_, _, _, bias_grads, bias_hess) = training_state.bias_stats_accumulator.flush(ensemble_stamp, next_ensemble_stamp)
are_splits_ready_list = []
partition_ids_list = []
gains_list = []
split_info_list = []
for handler in training_state.handlers:
(are_splits_ready, partition_ids, gains, split_info) = handler.make_splits(ensemble_stamp, next_ensemble_stamp, class_id)
are_splits_ready_list.append(are_splits_ready)
partition_ids_list.append(partition_ids)
gains_list.append(gains)
split_info_list.append(split_info)
split_sizes = array_ops.reshape(array_ops.shape_n(partition_ids_list), [len(partition_ids_list)])
partition_ids = array_ops.concat(partition_ids_list, axis=0)
gains = array_ops.concat(gains_list, axis=0)
split_infos = array_ops.concat(split_info_list, axis=0)
are_all_splits_ready = math_ops.reduce_all(array_ops.stack(are_splits_ready_list, axis=0, name='stack_handler_readiness'))
def _center_bias_fn():
delta_updates = array_ops.where((bias_hess > 0), ((- bias_grads) / bias_hess), array_ops.zeros_like(bias_grads))
center_bias = training_ops.center_tree_ensemble_bias(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, delta_updates=delta_updates, learner_config=self._learner_config_serialized)
return training_state.continue_centering.assign(center_bias)
def _grow_ensemble_ready_fn():
sizes = array_ops.unstack(split_sizes)
partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0))
if (self._learner_config.weak_learner_type == learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE):
sizes = len(training_state.handlers)
gains_list = list(array_ops.split(gains, sizes, axis=0))
split_info_list = list(array_ops.split(split_infos, sizes, axis=0))
return training_ops.grow_tree_ensemble(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, learning_rate=learning_rate, partition_ids=partition_ids_list, gains=gains_list, splits=split_info_list, learner_config=self._learner_config_serialized, dropout_seed=dropout_seed, center_bias=self._center_bias, max_tree_depth=self._max_tree_depth, weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_not_ready_fn():
return training_ops.grow_tree_ensemble(tree_ensemble_handle=self._ensemble_handle, stamp_token=ensemble_stamp, next_stamp_token=next_ensemble_stamp, learning_rate=0, partition_ids=[], gains=[], splits=[], learner_config=self._learner_config_serialized, dropout_seed=dropout_seed, center_bias=self._center_bias, max_tree_depth=self._max_tree_depth, weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_fn():
return control_flow_ops.cond(are_all_splits_ready, _grow_ensemble_ready_fn, _grow_ensemble_not_ready_fn)
update_ops = [are_all_splits_ready]
if self._center_bias:
update_model = control_flow_ops.cond(training_state.continue_centering, _center_bias_fn, _grow_ensemble_fn)
else:
update_model = _grow_ensemble_fn()
update_ops.append(update_model)
with ops.control_dependencies([update_model]):
stats = training_ops.tree_ensemble_stats(self._ensemble_handle, stamp_token=next_ensemble_stamp)
update_ops.append(self._finalized_trees.assign(stats.num_trees))
update_ops.append(self._attempted_trees.assign(stats.attempted_trees))
update_ops.append(training_state.num_layers.assign(stats.num_layers))
update_ops.append(training_state.active_tree.assign(stats.active_tree))
update_ops.append(training_state.active_layer.assign(stats.active_layer))
update_ops.extend(training_state.steps_accumulator.flush(ensemble_stamp, next_ensemble_stamp))
return control_flow_ops.group(*update_ops, name='update_ensemble')
|
def _update_bias_stats():
'A method to update the bias stats.'
grads_sum = math_ops.reduce_sum(gradients, 0)
hess = gradients_impl.gradients(grads_sum, predictions, name='Hessians', colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)[0]
hess_sum = math_ops.reduce_sum(hess, 0)
partition_ids = math_ops.range(self._logits_dimension)
feature_ids = array_ops.zeros([self._logits_dimension, 2], dtype=dtypes.int64)
add_stats_op = bias_stats_accumulator.add(ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum)
return control_flow_ops.group(*[add_stats_op], name='update_bias_stats')
| -5,184,848,394,841,910,000
|
A method to update the bias stats.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
_update_bias_stats
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def _update_bias_stats():
grads_sum = math_ops.reduce_sum(gradients, 0)
hess = gradients_impl.gradients(grads_sum, predictions, name='Hessians', colocate_gradients_with_ops=False, gate_gradients=0, aggregation_method=None)[0]
hess_sum = math_ops.reduce_sum(hess, 0)
partition_ids = math_ops.range(self._logits_dimension)
feature_ids = array_ops.zeros([self._logits_dimension, 2], dtype=dtypes.int64)
add_stats_op = bias_stats_accumulator.add(ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum)
return control_flow_ops.group(*[add_stats_op], name='update_bias_stats')
|
def __init__(self, transfer_id=None):
'UpdateIndirectPartnerAccountResponse - a model defined in huaweicloud sdk'
super().__init__()
self._transfer_id = None
self.discriminator = None
if (transfer_id is not None):
self.transfer_id = transfer_id
| 8,037,283,255,066,327,000
|
UpdateIndirectPartnerAccountResponse - a model defined in huaweicloud sdk
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py
|
__init__
|
Lencof/huaweicloud-sdk-python-v3
|
python
|
def __init__(self, transfer_id=None):
super().__init__()
self._transfer_id = None
self.discriminator = None
if (transfer_id is not None):
self.transfer_id = transfer_id
|
@property
def transfer_id(self):
'Gets the transfer_id of this UpdateIndirectPartnerAccountResponse.\n\n 事务流水ID,只有成功响应才会返回。\n\n :return: The transfer_id of this UpdateIndirectPartnerAccountResponse.\n :rtype: str\n '
return self._transfer_id
| -3,009,237,835,561,159,700
|
Gets the transfer_id of this UpdateIndirectPartnerAccountResponse.
事务流水ID,只有成功响应才会返回。
:return: The transfer_id of this UpdateIndirectPartnerAccountResponse.
:rtype: str
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py
|
transfer_id
|
Lencof/huaweicloud-sdk-python-v3
|
python
|
@property
def transfer_id(self):
'Gets the transfer_id of this UpdateIndirectPartnerAccountResponse.\n\n 事务流水ID,只有成功响应才会返回。\n\n :return: The transfer_id of this UpdateIndirectPartnerAccountResponse.\n :rtype: str\n '
return self._transfer_id
|
@transfer_id.setter
def transfer_id(self, transfer_id):
'Sets the transfer_id of this UpdateIndirectPartnerAccountResponse.\n\n 事务流水ID,只有成功响应才会返回。\n\n :param transfer_id: The transfer_id of this UpdateIndirectPartnerAccountResponse.\n :type: str\n '
self._transfer_id = transfer_id
| 3,366,307,169,122,716,000
|
Sets the transfer_id of this UpdateIndirectPartnerAccountResponse.
事务流水ID,只有成功响应才会返回。
:param transfer_id: The transfer_id of this UpdateIndirectPartnerAccountResponse.
:type: str
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py
|
transfer_id
|
Lencof/huaweicloud-sdk-python-v3
|
python
|
@transfer_id.setter
def transfer_id(self, transfer_id):
'Sets the transfer_id of this UpdateIndirectPartnerAccountResponse.\n\n 事务流水ID,只有成功响应才会返回。\n\n :param transfer_id: The transfer_id of this UpdateIndirectPartnerAccountResponse.\n :type: str\n '
self._transfer_id = transfer_id
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result
| 2,594,216,033,120,720,000
|
Returns the model properties as a dict
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py
|
to_dict
|
Lencof/huaweicloud-sdk-python-v3
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
| 5,849,158,643,760,736,000
|
Returns the string representation of the model
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py
|
to_str
|
Lencof/huaweicloud-sdk-python-v3
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py
|
__repr__
|
Lencof/huaweicloud-sdk-python-v3
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, UpdateIndirectPartnerAccountResponse)):
return False
return (self.__dict__ == other.__dict__)
| -5,450,993,162,929,922,000
|
Returns true if both objects are equal
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py
|
__eq__
|
Lencof/huaweicloud-sdk-python-v3
|
python
|
def __eq__(self, other):
if (not isinstance(other, UpdateIndirectPartnerAccountResponse)):
return False
return (self.__dict__ == other.__dict__)
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
| 7,764,124,047,908,058,000
|
Returns true if both objects are not equal
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py
|
__ne__
|
Lencof/huaweicloud-sdk-python-v3
|
python
|
def __ne__(self, other):
return (not (self == other))
|
def set_segment_redundancy(alarm):
'\n Set lost_redundancy to segment when redundant object is down\n :param alarm:\n :return:\n '
if alarm.root:
return
mo = alarm.managed_object
seg = mo.segment
if (seg.is_redundant and (not seg.lost_redundancy)):
u = mo.data.uplinks
if (len(u) > 1):
logger.info('[%s] Redundancy lost for %s', alarm.id, seg.name)
seg.set_lost_redundancy(True)
| 4,618,587,306,445,554,000
|
Set lost_redundancy to segment when redundant object is down
:param alarm:
:return:
|
fm/handlers/alarm/segment.py
|
set_segment_redundancy
|
sbworth/getnoc
|
python
|
def set_segment_redundancy(alarm):
'\n Set lost_redundancy to segment when redundant object is down\n :param alarm:\n :return:\n '
if alarm.root:
return
mo = alarm.managed_object
seg = mo.segment
if (seg.is_redundant and (not seg.lost_redundancy)):
u = mo.data.uplinks
if (len(u) > 1):
logger.info('[%s] Redundancy lost for %s', alarm.id, seg.name)
seg.set_lost_redundancy(True)
|
def check_segment_redundancy(alarm):
'\n Reset lost_redundancy from segment when all redundant objects\n are up\n :param alarm:\n :return:\n '
mo = alarm.managed_object
seg = mo.segment
if ((not seg.is_redundant) or (not seg.lost_redundancy)):
return
u = mo.data.uplinks
if (len(u) < 2):
return
seg_objects = list(seg.managed_objects.values_list('id', flat=True))
alarms = [d['managed_object'] for d in ActiveAlarm._get_collection().find({'managed_object': {'$in': seg_objects}}, {'_id': 0, 'managed_object': 1}) if (d['managed_object'] != mo.id)]
uplinks = ManagedObject.uplinks_for_objects(alarms)
if (not any((x for x in uplinks.values() if (len(x) > 1)))):
logger.info('[%s] Redundancy recovered for %s', alarm.id, seg.name)
seg.set_lost_redundancy(False)
| 5,985,817,990,810,562,000
|
Reset lost_redundancy from segment when all redundant objects
are up
:param alarm:
:return:
|
fm/handlers/alarm/segment.py
|
check_segment_redundancy
|
sbworth/getnoc
|
python
|
def check_segment_redundancy(alarm):
'\n Reset lost_redundancy from segment when all redundant objects\n are up\n :param alarm:\n :return:\n '
mo = alarm.managed_object
seg = mo.segment
if ((not seg.is_redundant) or (not seg.lost_redundancy)):
return
u = mo.data.uplinks
if (len(u) < 2):
return
seg_objects = list(seg.managed_objects.values_list('id', flat=True))
alarms = [d['managed_object'] for d in ActiveAlarm._get_collection().find({'managed_object': {'$in': seg_objects}}, {'_id': 0, 'managed_object': 1}) if (d['managed_object'] != mo.id)]
uplinks = ManagedObject.uplinks_for_objects(alarms)
if (not any((x for x in uplinks.values() if (len(x) > 1)))):
logger.info('[%s] Redundancy recovered for %s', alarm.id, seg.name)
seg.set_lost_redundancy(False)
|
def decompose(S, n_components=None, transformer=None, sort=False, fit=True, **kwargs):
"Decompose a feature matrix.\n\n Given a spectrogram `S`, produce a decomposition into `components`\n and `activations` such that `S ~= components.dot(activations)`.\n\n By default, this is done with with non-negative matrix factorization (NMF),\n but any `sklearn.decomposition`-type object will work.\n\n\n Parameters\n ----------\n S : np.ndarray [shape=(n_features, n_samples), dtype=float]\n The input feature matrix (e.g., magnitude spectrogram)\n\n n_components : int > 0 [scalar] or None\n number of desired components\n\n if None, then `n_features` components are used\n\n transformer : None or object\n If None, use `sklearn.decomposition.NMF`\n\n Otherwise, any object with a similar interface to NMF should work.\n `transformer` must follow the scikit-learn convention, where\n input data is `(n_samples, n_features)`.\n\n `transformer.fit_transform()` will be run on `S.T` (not `S`),\n the return value of which is stored (transposed) as `activations`\n\n The components will be retrieved as `transformer.components_.T`\n\n `S ~= np.dot(activations, transformer.components_).T`\n\n or equivalently:\n `S ~= np.dot(transformer.components_.T, activations.T)`\n\n sort : bool\n If `True`, components are sorted by ascending peak frequency.\n\n .. note:: If used with `transformer`, sorting is applied to copies\n of the decomposition parameters, and not to `transformer`'s\n internal parameters.\n\n fit : bool\n If `True`, components are estimated from the input ``S``.\n\n If `False`, components are assumed to be pre-computed and stored\n in ``transformer``, and are not changed.\n\n kwargs : Additional keyword arguments to the default transformer\n `sklearn.decomposition.NMF`\n\n\n Returns\n -------\n components: np.ndarray [shape=(n_features, n_components)]\n matrix of components (basis elements).\n\n activations: np.ndarray [shape=(n_components, n_samples)]\n transformed matrix/activation matrix\n\n\n Raises\n ------\n ParameterError\n if `fit` is False and no `transformer` object is provided.\n\n\n See Also\n --------\n sklearn.decomposition : SciKit-Learn matrix decomposition modules\n\n\n Examples\n --------\n Decompose a magnitude spectrogram into 32 components with NMF\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> S = np.abs(librosa.stft(y))\n >>> comps, acts = librosa.decompose.decompose(S, n_components=8)\n >>> comps\n array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],\n [ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],\n ...,\n [ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],\n [ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])\n >>> acts\n array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],\n [ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],\n ...,\n [ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],\n [ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])\n\n\n Sort components by ascending peak frequency\n\n >>> comps, acts = librosa.decompose.decompose(S, n_components=16,\n ... sort=True)\n\n\n Or with sparse dictionary learning\n\n >>> import sklearn.decomposition\n >>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)\n >>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(10,8))\n >>> plt.subplot(3, 1, 1)\n >>> librosa.display.specshow(librosa.amplitude_to_db(S,\n ... ref=np.max),\n ... y_axis='log', x_axis='time')\n >>> plt.title('Input spectrogram')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.subplot(3, 2, 3)\n >>> librosa.display.specshow(librosa.amplitude_to_db(comps,\n ... ref=np.max),\n ... y_axis='log')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Components')\n >>> plt.subplot(3, 2, 4)\n >>> librosa.display.specshow(acts, x_axis='time')\n >>> plt.ylabel('Components')\n >>> plt.title('Activations')\n >>> plt.colorbar()\n >>> plt.subplot(3, 1, 3)\n >>> S_approx = comps.dot(acts)\n >>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,\n ... ref=np.max),\n ... y_axis='log', x_axis='time')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Reconstructed spectrogram')\n >>> plt.tight_layout()\n "
if (transformer is None):
if (fit is False):
raise ParameterError('fit must be True if transformer is None')
transformer = sklearn.decomposition.NMF(n_components=n_components, **kwargs)
if (n_components is None):
n_components = S.shape[0]
if fit:
activations = transformer.fit_transform(S.T).T
else:
activations = transformer.transform(S.T).T
components = transformer.components_.T
if sort:
(components, idx) = util.axis_sort(components, index=True)
activations = activations[idx]
return (components, activations)
| -4,230,850,838,342,372,400
|
Decompose a feature matrix.
Given a spectrogram `S`, produce a decomposition into `components`
and `activations` such that `S ~= components.dot(activations)`.
By default, this is done with with non-negative matrix factorization (NMF),
but any `sklearn.decomposition`-type object will work.
Parameters
----------
S : np.ndarray [shape=(n_features, n_samples), dtype=float]
The input feature matrix (e.g., magnitude spectrogram)
n_components : int > 0 [scalar] or None
number of desired components
if None, then `n_features` components are used
transformer : None or object
If None, use `sklearn.decomposition.NMF`
Otherwise, any object with a similar interface to NMF should work.
`transformer` must follow the scikit-learn convention, where
input data is `(n_samples, n_features)`.
`transformer.fit_transform()` will be run on `S.T` (not `S`),
the return value of which is stored (transposed) as `activations`
The components will be retrieved as `transformer.components_.T`
`S ~= np.dot(activations, transformer.components_).T`
or equivalently:
`S ~= np.dot(transformer.components_.T, activations.T)`
sort : bool
If `True`, components are sorted by ascending peak frequency.
.. note:: If used with `transformer`, sorting is applied to copies
of the decomposition parameters, and not to `transformer`'s
internal parameters.
fit : bool
If `True`, components are estimated from the input ``S``.
If `False`, components are assumed to be pre-computed and stored
in ``transformer``, and are not changed.
kwargs : Additional keyword arguments to the default transformer
`sklearn.decomposition.NMF`
Returns
-------
components: np.ndarray [shape=(n_features, n_components)]
matrix of components (basis elements).
activations: np.ndarray [shape=(n_components, n_samples)]
transformed matrix/activation matrix
Raises
------
ParameterError
if `fit` is False and no `transformer` object is provided.
See Also
--------
sklearn.decomposition : SciKit-Learn matrix decomposition modules
Examples
--------
Decompose a magnitude spectrogram into 32 components with NMF
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> comps, acts = librosa.decompose.decompose(S, n_components=8)
>>> comps
array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],
[ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],
...,
[ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],
[ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])
>>> acts
array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],
[ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],
...,
[ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],
[ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])
Sort components by ascending peak frequency
>>> comps, acts = librosa.decompose.decompose(S, n_components=16,
... sort=True)
Or with sparse dictionary learning
>>> import sklearn.decomposition
>>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)
>>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10,8))
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Input spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(3, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(comps,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Components')
>>> plt.subplot(3, 2, 4)
>>> librosa.display.specshow(acts, x_axis='time')
>>> plt.ylabel('Components')
>>> plt.title('Activations')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> S_approx = comps.dot(acts)
>>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Reconstructed spectrogram')
>>> plt.tight_layout()
|
librosa/decompose.py
|
decompose
|
ElisaIzrailova/librosa
|
python
|
def decompose(S, n_components=None, transformer=None, sort=False, fit=True, **kwargs):
"Decompose a feature matrix.\n\n Given a spectrogram `S`, produce a decomposition into `components`\n and `activations` such that `S ~= components.dot(activations)`.\n\n By default, this is done with with non-negative matrix factorization (NMF),\n but any `sklearn.decomposition`-type object will work.\n\n\n Parameters\n ----------\n S : np.ndarray [shape=(n_features, n_samples), dtype=float]\n The input feature matrix (e.g., magnitude spectrogram)\n\n n_components : int > 0 [scalar] or None\n number of desired components\n\n if None, then `n_features` components are used\n\n transformer : None or object\n If None, use `sklearn.decomposition.NMF`\n\n Otherwise, any object with a similar interface to NMF should work.\n `transformer` must follow the scikit-learn convention, where\n input data is `(n_samples, n_features)`.\n\n `transformer.fit_transform()` will be run on `S.T` (not `S`),\n the return value of which is stored (transposed) as `activations`\n\n The components will be retrieved as `transformer.components_.T`\n\n `S ~= np.dot(activations, transformer.components_).T`\n\n or equivalently:\n `S ~= np.dot(transformer.components_.T, activations.T)`\n\n sort : bool\n If `True`, components are sorted by ascending peak frequency.\n\n .. note:: If used with `transformer`, sorting is applied to copies\n of the decomposition parameters, and not to `transformer`'s\n internal parameters.\n\n fit : bool\n If `True`, components are estimated from the input ``S``.\n\n If `False`, components are assumed to be pre-computed and stored\n in ``transformer``, and are not changed.\n\n kwargs : Additional keyword arguments to the default transformer\n `sklearn.decomposition.NMF`\n\n\n Returns\n -------\n components: np.ndarray [shape=(n_features, n_components)]\n matrix of components (basis elements).\n\n activations: np.ndarray [shape=(n_components, n_samples)]\n transformed matrix/activation matrix\n\n\n Raises\n ------\n ParameterError\n if `fit` is False and no `transformer` object is provided.\n\n\n See Also\n --------\n sklearn.decomposition : SciKit-Learn matrix decomposition modules\n\n\n Examples\n --------\n Decompose a magnitude spectrogram into 32 components with NMF\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file())\n >>> S = np.abs(librosa.stft(y))\n >>> comps, acts = librosa.decompose.decompose(S, n_components=8)\n >>> comps\n array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],\n [ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],\n ...,\n [ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],\n [ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])\n >>> acts\n array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],\n [ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],\n ...,\n [ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],\n [ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])\n\n\n Sort components by ascending peak frequency\n\n >>> comps, acts = librosa.decompose.decompose(S, n_components=16,\n ... sort=True)\n\n\n Or with sparse dictionary learning\n\n >>> import sklearn.decomposition\n >>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)\n >>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(10,8))\n >>> plt.subplot(3, 1, 1)\n >>> librosa.display.specshow(librosa.amplitude_to_db(S,\n ... ref=np.max),\n ... y_axis='log', x_axis='time')\n >>> plt.title('Input spectrogram')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.subplot(3, 2, 3)\n >>> librosa.display.specshow(librosa.amplitude_to_db(comps,\n ... ref=np.max),\n ... y_axis='log')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Components')\n >>> plt.subplot(3, 2, 4)\n >>> librosa.display.specshow(acts, x_axis='time')\n >>> plt.ylabel('Components')\n >>> plt.title('Activations')\n >>> plt.colorbar()\n >>> plt.subplot(3, 1, 3)\n >>> S_approx = comps.dot(acts)\n >>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,\n ... ref=np.max),\n ... y_axis='log', x_axis='time')\n >>> plt.colorbar(format='%+2.0f dB')\n >>> plt.title('Reconstructed spectrogram')\n >>> plt.tight_layout()\n "
if (transformer is None):
if (fit is False):
raise ParameterError('fit must be True if transformer is None')
transformer = sklearn.decomposition.NMF(n_components=n_components, **kwargs)
if (n_components is None):
n_components = S.shape[0]
if fit:
activations = transformer.fit_transform(S.T).T
else:
activations = transformer.transform(S.T).T
components = transformer.components_.T
if sort:
(components, idx) = util.axis_sort(components, index=True)
activations = activations[idx]
return (components, activations)
|
@cache(level=30)
def hpss(S, kernel_size=31, power=2.0, mask=False, margin=1.0):
'Median-filtering harmonic percussive source separation (HPSS).\n\n If `margin = 1.0`, decomposes an input spectrogram `S = H + P`\n where `H` contains the harmonic components,\n and `P` contains the percussive components.\n\n If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`\n where `R` contains residual components not included in `H` or `P`.\n\n This implementation is based upon the algorithm described by [1]_ and [2]_.\n\n .. [1] Fitzgerald, Derry.\n "Harmonic/percussive separation using median filtering."\n 13th International Conference on Digital Audio Effects (DAFX10),\n Graz, Austria, 2010.\n\n .. [2] Driedger, Müller, Disch.\n "Extending harmonic-percussive separation of audio."\n 15th International Society for Music Information Retrieval Conference (ISMIR 2014),\n Taipei, Taiwan, 2014.\n\n Parameters\n ----------\n S : np.ndarray [shape=(d, n)]\n input spectrogram. May be real (magnitude) or complex.\n\n kernel_size : int or tuple (kernel_harmonic, kernel_percussive)\n kernel size(s) for the median filters.\n\n - If scalar, the same size is used for both harmonic and percussive.\n - If tuple, the first value specifies the width of the\n harmonic filter, and the second value specifies the width\n of the percussive filter.\n\n power : float > 0 [scalar]\n Exponent for the Wiener filter when constructing soft mask matrices.\n\n mask : bool\n Return the masking matrices instead of components.\n\n Masking matrices contain non-negative real values that\n can be used to measure the assignment of energy from `S`\n into harmonic or percussive components.\n\n Components can be recovered by multiplying `S * mask_H`\n or `S * mask_P`.\n\n\n margin : float or tuple (margin_harmonic, margin_percussive)\n margin size(s) for the masks (as described in [2]_)\n\n - If scalar, the same size is used for both harmonic and percussive.\n - If tuple, the first value specifies the margin of the\n harmonic mask, and the second value specifies the margin\n of the percussive mask.\n\n Returns\n -------\n harmonic : np.ndarray [shape=(d, n)]\n harmonic component (or mask)\n\n percussive : np.ndarray [shape=(d, n)]\n percussive component (or mask)\n\n\n See Also\n --------\n util.softmask\n\n Notes\n -----\n This function caches at level 30.\n\n Examples\n --------\n Separate into harmonic and percussive\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)\n >>> D = librosa.stft(y)\n >>> H, P = librosa.decompose.hpss(D)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> plt.subplot(3, 1, 1)\n >>> librosa.display.specshow(librosa.amplitude_to_db(D,\n ... ref=np.max),\n ... y_axis=\'log\')\n >>> plt.colorbar(format=\'%+2.0f dB\')\n >>> plt.title(\'Full power spectrogram\')\n >>> plt.subplot(3, 1, 2)\n >>> librosa.display.specshow(librosa.amplitude_to_db(H,\n ... ref=np.max),\n ... y_axis=\'log\')\n >>> plt.colorbar(format=\'%+2.0f dB\')\n >>> plt.title(\'Harmonic power spectrogram\')\n >>> plt.subplot(3, 1, 3)\n >>> librosa.display.specshow(librosa.amplitude_to_db(P,\n ... ref=np.max),\n ... y_axis=\'log\')\n >>> plt.colorbar(format=\'%+2.0f dB\')\n >>> plt.title(\'Percussive power spectrogram\')\n >>> plt.tight_layout()\n\n\n Or with a narrower horizontal filter\n\n >>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))\n\n Just get harmonic/percussive masks, not the spectra\n\n >>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)\n >>> mask_H\n array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],\n [ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],\n ...,\n [ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],\n [ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)\n >>> mask_P\n array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],\n [ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],\n ...,\n [ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],\n [ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)\n\n Separate into harmonic/percussive/residual components by using a margin > 1.0\n\n >>> H, P = librosa.decompose.hpss(D, margin=3.0)\n >>> R = D - (H+P)\n >>> y_harm = librosa.core.istft(H)\n >>> y_perc = librosa.core.istft(P)\n >>> y_resi = librosa.core.istft(R)\n\n\n Get a more isolated percussive component by widening its margin\n\n >>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))\n\n '
if np.iscomplexobj(S):
(S, phase) = core.magphase(S)
else:
phase = 1
if np.isscalar(kernel_size):
win_harm = kernel_size
win_perc = kernel_size
else:
win_harm = kernel_size[0]
win_perc = kernel_size[1]
if np.isscalar(margin):
margin_harm = margin
margin_perc = margin
else:
margin_harm = margin[0]
margin_perc = margin[1]
if ((margin_harm < 1) or (margin_perc < 1)):
raise ParameterError('Margins must be >= 1.0. A typical range is between 1 and 10.')
harm = np.empty_like(S)
harm[:] = median_filter(S, size=(1, win_harm), mode='reflect')
perc = np.empty_like(S)
perc[:] = median_filter(S, size=(win_perc, 1), mode='reflect')
split_zeros = ((margin_harm == 1) and (margin_perc == 1))
mask_harm = util.softmask(harm, (perc * margin_harm), power=power, split_zeros=split_zeros)
mask_perc = util.softmask(perc, (harm * margin_perc), power=power, split_zeros=split_zeros)
if mask:
return (mask_harm, mask_perc)
return (((S * mask_harm) * phase), ((S * mask_perc) * phase))
| -601,029,398,121,857,800
|
Median-filtering harmonic percussive source separation (HPSS).
If `margin = 1.0`, decomposes an input spectrogram `S = H + P`
where `H` contains the harmonic components,
and `P` contains the percussive components.
If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`
where `R` contains residual components not included in `H` or `P`.
This implementation is based upon the algorithm described by [1]_ and [2]_.
.. [1] Fitzgerald, Derry.
"Harmonic/percussive separation using median filtering."
13th International Conference on Digital Audio Effects (DAFX10),
Graz, Austria, 2010.
.. [2] Driedger, Müller, Disch.
"Extending harmonic-percussive separation of audio."
15th International Society for Music Information Retrieval Conference (ISMIR 2014),
Taipei, Taiwan, 2014.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input spectrogram. May be real (magnitude) or complex.
kernel_size : int or tuple (kernel_harmonic, kernel_percussive)
kernel size(s) for the median filters.
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the width of the
harmonic filter, and the second value specifies the width
of the percussive filter.
power : float > 0 [scalar]
Exponent for the Wiener filter when constructing soft mask matrices.
mask : bool
Return the masking matrices instead of components.
Masking matrices contain non-negative real values that
can be used to measure the assignment of energy from `S`
into harmonic or percussive components.
Components can be recovered by multiplying `S * mask_H`
or `S * mask_P`.
margin : float or tuple (margin_harmonic, margin_percussive)
margin size(s) for the masks (as described in [2]_)
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the margin of the
harmonic mask, and the second value specifies the margin
of the percussive mask.
Returns
-------
harmonic : np.ndarray [shape=(d, n)]
harmonic component (or mask)
percussive : np.ndarray [shape=(d, n)]
percussive component (or mask)
See Also
--------
util.softmask
Notes
-----
This function caches at level 30.
Examples
--------
Separate into harmonic and percussive
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> D = librosa.stft(y)
>>> H, P = librosa.decompose.hpss(D)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Full power spectrogram')
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(H,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Harmonic power spectrogram')
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(P,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Percussive power spectrogram')
>>> plt.tight_layout()
Or with a narrower horizontal filter
>>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))
Just get harmonic/percussive masks, not the spectra
>>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)
>>> mask_H
array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],
[ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],
...,
[ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],
[ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)
>>> mask_P
array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],
[ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],
...,
[ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],
[ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)
Separate into harmonic/percussive/residual components by using a margin > 1.0
>>> H, P = librosa.decompose.hpss(D, margin=3.0)
>>> R = D - (H+P)
>>> y_harm = librosa.core.istft(H)
>>> y_perc = librosa.core.istft(P)
>>> y_resi = librosa.core.istft(R)
Get a more isolated percussive component by widening its margin
>>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))
|
librosa/decompose.py
|
hpss
|
ElisaIzrailova/librosa
|
python
|
@cache(level=30)
def hpss(S, kernel_size=31, power=2.0, mask=False, margin=1.0):
'Median-filtering harmonic percussive source separation (HPSS).\n\n If `margin = 1.0`, decomposes an input spectrogram `S = H + P`\n where `H` contains the harmonic components,\n and `P` contains the percussive components.\n\n If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`\n where `R` contains residual components not included in `H` or `P`.\n\n This implementation is based upon the algorithm described by [1]_ and [2]_.\n\n .. [1] Fitzgerald, Derry.\n "Harmonic/percussive separation using median filtering."\n 13th International Conference on Digital Audio Effects (DAFX10),\n Graz, Austria, 2010.\n\n .. [2] Driedger, Müller, Disch.\n "Extending harmonic-percussive separation of audio."\n 15th International Society for Music Information Retrieval Conference (ISMIR 2014),\n Taipei, Taiwan, 2014.\n\n Parameters\n ----------\n S : np.ndarray [shape=(d, n)]\n input spectrogram. May be real (magnitude) or complex.\n\n kernel_size : int or tuple (kernel_harmonic, kernel_percussive)\n kernel size(s) for the median filters.\n\n - If scalar, the same size is used for both harmonic and percussive.\n - If tuple, the first value specifies the width of the\n harmonic filter, and the second value specifies the width\n of the percussive filter.\n\n power : float > 0 [scalar]\n Exponent for the Wiener filter when constructing soft mask matrices.\n\n mask : bool\n Return the masking matrices instead of components.\n\n Masking matrices contain non-negative real values that\n can be used to measure the assignment of energy from `S`\n into harmonic or percussive components.\n\n Components can be recovered by multiplying `S * mask_H`\n or `S * mask_P`.\n\n\n margin : float or tuple (margin_harmonic, margin_percussive)\n margin size(s) for the masks (as described in [2]_)\n\n - If scalar, the same size is used for both harmonic and percussive.\n - If tuple, the first value specifies the margin of the\n harmonic mask, and the second value specifies the margin\n of the percussive mask.\n\n Returns\n -------\n harmonic : np.ndarray [shape=(d, n)]\n harmonic component (or mask)\n\n percussive : np.ndarray [shape=(d, n)]\n percussive component (or mask)\n\n\n See Also\n --------\n util.softmask\n\n Notes\n -----\n This function caches at level 30.\n\n Examples\n --------\n Separate into harmonic and percussive\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)\n >>> D = librosa.stft(y)\n >>> H, P = librosa.decompose.hpss(D)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure()\n >>> plt.subplot(3, 1, 1)\n >>> librosa.display.specshow(librosa.amplitude_to_db(D,\n ... ref=np.max),\n ... y_axis=\'log\')\n >>> plt.colorbar(format=\'%+2.0f dB\')\n >>> plt.title(\'Full power spectrogram\')\n >>> plt.subplot(3, 1, 2)\n >>> librosa.display.specshow(librosa.amplitude_to_db(H,\n ... ref=np.max),\n ... y_axis=\'log\')\n >>> plt.colorbar(format=\'%+2.0f dB\')\n >>> plt.title(\'Harmonic power spectrogram\')\n >>> plt.subplot(3, 1, 3)\n >>> librosa.display.specshow(librosa.amplitude_to_db(P,\n ... ref=np.max),\n ... y_axis=\'log\')\n >>> plt.colorbar(format=\'%+2.0f dB\')\n >>> plt.title(\'Percussive power spectrogram\')\n >>> plt.tight_layout()\n\n\n Or with a narrower horizontal filter\n\n >>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))\n\n Just get harmonic/percussive masks, not the spectra\n\n >>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)\n >>> mask_H\n array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],\n [ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],\n ...,\n [ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],\n [ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)\n >>> mask_P\n array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],\n [ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],\n ...,\n [ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],\n [ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)\n\n Separate into harmonic/percussive/residual components by using a margin > 1.0\n\n >>> H, P = librosa.decompose.hpss(D, margin=3.0)\n >>> R = D - (H+P)\n >>> y_harm = librosa.core.istft(H)\n >>> y_perc = librosa.core.istft(P)\n >>> y_resi = librosa.core.istft(R)\n\n\n Get a more isolated percussive component by widening its margin\n\n >>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))\n\n '
if np.iscomplexobj(S):
(S, phase) = core.magphase(S)
else:
phase = 1
if np.isscalar(kernel_size):
win_harm = kernel_size
win_perc = kernel_size
else:
win_harm = kernel_size[0]
win_perc = kernel_size[1]
if np.isscalar(margin):
margin_harm = margin
margin_perc = margin
else:
margin_harm = margin[0]
margin_perc = margin[1]
if ((margin_harm < 1) or (margin_perc < 1)):
raise ParameterError('Margins must be >= 1.0. A typical range is between 1 and 10.')
harm = np.empty_like(S)
harm[:] = median_filter(S, size=(1, win_harm), mode='reflect')
perc = np.empty_like(S)
perc[:] = median_filter(S, size=(win_perc, 1), mode='reflect')
split_zeros = ((margin_harm == 1) and (margin_perc == 1))
mask_harm = util.softmask(harm, (perc * margin_harm), power=power, split_zeros=split_zeros)
mask_perc = util.softmask(perc, (harm * margin_perc), power=power, split_zeros=split_zeros)
if mask:
return (mask_harm, mask_perc)
return (((S * mask_harm) * phase), ((S * mask_perc) * phase))
|
@cache(level=30)
def nn_filter(S, rec=None, aggregate=None, axis=(- 1), **kwargs):
'Filtering by nearest-neighbors.\n\n Each data point (e.g, spectrogram column) is replaced\n by aggregating its nearest neighbors in feature space.\n\n This can be useful for de-noising a spectrogram or feature matrix.\n\n The non-local means method [1]_ can be recovered by providing a\n weighted recurrence matrix as input and specifying `aggregate=np.average`.\n\n Similarly, setting `aggregate=np.median` produces sparse de-noising\n as in REPET-SIM [2]_.\n\n .. [1] Buades, A., Coll, B., & Morel, J. M.\n (2005, June). A non-local algorithm for image denoising.\n In Computer Vision and Pattern Recognition, 2005.\n CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.\n\n .. [2] Rafii, Z., & Pardo, B.\n (2012, October). "Music/Voice Separation Using the Similarity Matrix."\n International Society for Music Information Retrieval Conference, 2012.\n\n Parameters\n ----------\n S : np.ndarray\n The input data (spectrogram) to filter\n\n rec : (optional) scipy.sparse.spmatrix or np.ndarray\n Optionally, a pre-computed nearest-neighbor matrix\n as provided by `librosa.segment.recurrence_matrix`\n\n aggregate : function\n aggregation function (default: `np.mean`)\n\n If `aggregate=np.average`, then a weighted average is\n computed according to the (per-row) weights in `rec`.\n\n For all other aggregation functions, all neighbors\n are treated equally.\n\n\n axis : int\n The axis along which to filter (by default, columns)\n\n kwargs\n Additional keyword arguments provided to\n `librosa.segment.recurrence_matrix` if `rec` is not provided\n\n Returns\n -------\n S_filtered : np.ndarray\n The filtered data\n\n Raises\n ------\n ParameterError\n if `rec` is provided and its shape is incompatible with `S`.\n\n See also\n --------\n decompose\n hpss\n librosa.segment.recurrence_matrix\n\n\n Notes\n -----\n This function caches at level 30.\n\n\n Examples\n --------\n\n De-noise a chromagram by non-local median filtering.\n By default this would use euclidean distance to select neighbors,\n but this can be overridden directly by setting the `metric` parameter.\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(),\n ... offset=30, duration=10)\n >>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)\n >>> chroma_med = librosa.decompose.nn_filter(chroma,\n ... aggregate=np.median,\n ... metric=\'cosine\')\n\n To use non-local means, provide an affinity matrix and `aggregate=np.average`.\n\n >>> rec = librosa.segment.recurrence_matrix(chroma, mode=\'affinity\',\n ... metric=\'cosine\', sparse=True)\n >>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,\n ... aggregate=np.average)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(10, 8))\n >>> plt.subplot(5, 1, 1)\n >>> librosa.display.specshow(chroma, y_axis=\'chroma\')\n >>> plt.colorbar()\n >>> plt.title(\'Unfiltered\')\n >>> plt.subplot(5, 1, 2)\n >>> librosa.display.specshow(chroma_med, y_axis=\'chroma\')\n >>> plt.colorbar()\n >>> plt.title(\'Median-filtered\')\n >>> plt.subplot(5, 1, 3)\n >>> librosa.display.specshow(chroma_nlm, y_axis=\'chroma\')\n >>> plt.colorbar()\n >>> plt.title(\'Non-local means\')\n >>> plt.subplot(5, 1, 4)\n >>> librosa.display.specshow(chroma - chroma_med,\n ... y_axis=\'chroma\')\n >>> plt.colorbar()\n >>> plt.title(\'Original - median\')\n >>> plt.subplot(5, 1, 5)\n >>> librosa.display.specshow(chroma - chroma_nlm,\n ... y_axis=\'chroma\', x_axis=\'time\')\n >>> plt.colorbar()\n >>> plt.title(\'Original - NLM\')\n >>> plt.tight_layout()\n '
if (aggregate is None):
aggregate = np.mean
if (rec is None):
kwargs = dict(kwargs)
kwargs['sparse'] = True
rec = segment.recurrence_matrix(S, axis=axis, **kwargs)
elif (not scipy.sparse.issparse(rec)):
rec = scipy.sparse.csr_matrix(rec)
if ((rec.shape[0] != S.shape[axis]) or (rec.shape[0] != rec.shape[1])):
raise ParameterError('Invalid self-similarity matrix shape rec.shape={} for S.shape={}'.format(rec.shape, S.shape))
return __nn_filter_helper(rec.data, rec.indices, rec.indptr, S.swapaxes(0, axis), aggregate).swapaxes(0, axis)
| 6,552,659,050,913,915,000
|
Filtering by nearest-neighbors.
Each data point (e.g, spectrogram column) is replaced
by aggregating its nearest neighbors in feature space.
This can be useful for de-noising a spectrogram or feature matrix.
The non-local means method [1]_ can be recovered by providing a
weighted recurrence matrix as input and specifying `aggregate=np.average`.
Similarly, setting `aggregate=np.median` produces sparse de-noising
as in REPET-SIM [2]_.
.. [1] Buades, A., Coll, B., & Morel, J. M.
(2005, June). A non-local algorithm for image denoising.
In Computer Vision and Pattern Recognition, 2005.
CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.
.. [2] Rafii, Z., & Pardo, B.
(2012, October). "Music/Voice Separation Using the Similarity Matrix."
International Society for Music Information Retrieval Conference, 2012.
Parameters
----------
S : np.ndarray
The input data (spectrogram) to filter
rec : (optional) scipy.sparse.spmatrix or np.ndarray
Optionally, a pre-computed nearest-neighbor matrix
as provided by `librosa.segment.recurrence_matrix`
aggregate : function
aggregation function (default: `np.mean`)
If `aggregate=np.average`, then a weighted average is
computed according to the (per-row) weights in `rec`.
For all other aggregation functions, all neighbors
are treated equally.
axis : int
The axis along which to filter (by default, columns)
kwargs
Additional keyword arguments provided to
`librosa.segment.recurrence_matrix` if `rec` is not provided
Returns
-------
S_filtered : np.ndarray
The filtered data
Raises
------
ParameterError
if `rec` is provided and its shape is incompatible with `S`.
See also
--------
decompose
hpss
librosa.segment.recurrence_matrix
Notes
-----
This function caches at level 30.
Examples
--------
De-noise a chromagram by non-local median filtering.
By default this would use euclidean distance to select neighbors,
but this can be overridden directly by setting the `metric` parameter.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=10)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> chroma_med = librosa.decompose.nn_filter(chroma,
... aggregate=np.median,
... metric='cosine')
To use non-local means, provide an affinity matrix and `aggregate=np.average`.
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',
... metric='cosine', sparse=True)
>>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,
... aggregate=np.average)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 8))
>>> plt.subplot(5, 1, 1)
>>> librosa.display.specshow(chroma, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Unfiltered')
>>> plt.subplot(5, 1, 2)
>>> librosa.display.specshow(chroma_med, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Median-filtered')
>>> plt.subplot(5, 1, 3)
>>> librosa.display.specshow(chroma_nlm, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Non-local means')
>>> plt.subplot(5, 1, 4)
>>> librosa.display.specshow(chroma - chroma_med,
... y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Original - median')
>>> plt.subplot(5, 1, 5)
>>> librosa.display.specshow(chroma - chroma_nlm,
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original - NLM')
>>> plt.tight_layout()
|
librosa/decompose.py
|
nn_filter
|
ElisaIzrailova/librosa
|
python
|
@cache(level=30)
def nn_filter(S, rec=None, aggregate=None, axis=(- 1), **kwargs):
'Filtering by nearest-neighbors.\n\n Each data point (e.g, spectrogram column) is replaced\n by aggregating its nearest neighbors in feature space.\n\n This can be useful for de-noising a spectrogram or feature matrix.\n\n The non-local means method [1]_ can be recovered by providing a\n weighted recurrence matrix as input and specifying `aggregate=np.average`.\n\n Similarly, setting `aggregate=np.median` produces sparse de-noising\n as in REPET-SIM [2]_.\n\n .. [1] Buades, A., Coll, B., & Morel, J. M.\n (2005, June). A non-local algorithm for image denoising.\n In Computer Vision and Pattern Recognition, 2005.\n CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.\n\n .. [2] Rafii, Z., & Pardo, B.\n (2012, October). "Music/Voice Separation Using the Similarity Matrix."\n International Society for Music Information Retrieval Conference, 2012.\n\n Parameters\n ----------\n S : np.ndarray\n The input data (spectrogram) to filter\n\n rec : (optional) scipy.sparse.spmatrix or np.ndarray\n Optionally, a pre-computed nearest-neighbor matrix\n as provided by `librosa.segment.recurrence_matrix`\n\n aggregate : function\n aggregation function (default: `np.mean`)\n\n If `aggregate=np.average`, then a weighted average is\n computed according to the (per-row) weights in `rec`.\n\n For all other aggregation functions, all neighbors\n are treated equally.\n\n\n axis : int\n The axis along which to filter (by default, columns)\n\n kwargs\n Additional keyword arguments provided to\n `librosa.segment.recurrence_matrix` if `rec` is not provided\n\n Returns\n -------\n S_filtered : np.ndarray\n The filtered data\n\n Raises\n ------\n ParameterError\n if `rec` is provided and its shape is incompatible with `S`.\n\n See also\n --------\n decompose\n hpss\n librosa.segment.recurrence_matrix\n\n\n Notes\n -----\n This function caches at level 30.\n\n\n Examples\n --------\n\n De-noise a chromagram by non-local median filtering.\n By default this would use euclidean distance to select neighbors,\n but this can be overridden directly by setting the `metric` parameter.\n\n >>> y, sr = librosa.load(librosa.util.example_audio_file(),\n ... offset=30, duration=10)\n >>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)\n >>> chroma_med = librosa.decompose.nn_filter(chroma,\n ... aggregate=np.median,\n ... metric=\'cosine\')\n\n To use non-local means, provide an affinity matrix and `aggregate=np.average`.\n\n >>> rec = librosa.segment.recurrence_matrix(chroma, mode=\'affinity\',\n ... metric=\'cosine\', sparse=True)\n >>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,\n ... aggregate=np.average)\n\n >>> import matplotlib.pyplot as plt\n >>> plt.figure(figsize=(10, 8))\n >>> plt.subplot(5, 1, 1)\n >>> librosa.display.specshow(chroma, y_axis=\'chroma\')\n >>> plt.colorbar()\n >>> plt.title(\'Unfiltered\')\n >>> plt.subplot(5, 1, 2)\n >>> librosa.display.specshow(chroma_med, y_axis=\'chroma\')\n >>> plt.colorbar()\n >>> plt.title(\'Median-filtered\')\n >>> plt.subplot(5, 1, 3)\n >>> librosa.display.specshow(chroma_nlm, y_axis=\'chroma\')\n >>> plt.colorbar()\n >>> plt.title(\'Non-local means\')\n >>> plt.subplot(5, 1, 4)\n >>> librosa.display.specshow(chroma - chroma_med,\n ... y_axis=\'chroma\')\n >>> plt.colorbar()\n >>> plt.title(\'Original - median\')\n >>> plt.subplot(5, 1, 5)\n >>> librosa.display.specshow(chroma - chroma_nlm,\n ... y_axis=\'chroma\', x_axis=\'time\')\n >>> plt.colorbar()\n >>> plt.title(\'Original - NLM\')\n >>> plt.tight_layout()\n '
if (aggregate is None):
aggregate = np.mean
if (rec is None):
kwargs = dict(kwargs)
kwargs['sparse'] = True
rec = segment.recurrence_matrix(S, axis=axis, **kwargs)
elif (not scipy.sparse.issparse(rec)):
rec = scipy.sparse.csr_matrix(rec)
if ((rec.shape[0] != S.shape[axis]) or (rec.shape[0] != rec.shape[1])):
raise ParameterError('Invalid self-similarity matrix shape rec.shape={} for S.shape={}'.format(rec.shape, S.shape))
return __nn_filter_helper(rec.data, rec.indices, rec.indptr, S.swapaxes(0, axis), aggregate).swapaxes(0, axis)
|
def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
'Nearest-neighbor filter helper function.\n\n This is an internal function, not for use outside of the decompose module.\n\n It applies the nearest-neighbor filter to S, assuming that the first index\n corresponds to observations.\n\n Parameters\n ----------\n R_data, R_indices, R_ptr : np.ndarrays\n The `data`, `indices`, and `indptr` of a scipy.sparse matrix\n\n S : np.ndarray\n The observation data to filter\n\n aggregate : callable\n The aggregation operator\n\n\n Returns\n -------\n S_out : np.ndarray like S\n The filtered data array\n '
s_out = np.empty_like(S)
for i in range((len(R_ptr) - 1)):
targets = R_indices[R_ptr[i]:R_ptr[(i + 1)]]
if (not len(targets)):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if (aggregate is np.average):
weights = R_data[R_ptr[i]:R_ptr[(i + 1)]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out
| 8,874,733,337,976,750,000
|
Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
|
librosa/decompose.py
|
__nn_filter_helper
|
ElisaIzrailova/librosa
|
python
|
def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
'Nearest-neighbor filter helper function.\n\n This is an internal function, not for use outside of the decompose module.\n\n It applies the nearest-neighbor filter to S, assuming that the first index\n corresponds to observations.\n\n Parameters\n ----------\n R_data, R_indices, R_ptr : np.ndarrays\n The `data`, `indices`, and `indptr` of a scipy.sparse matrix\n\n S : np.ndarray\n The observation data to filter\n\n aggregate : callable\n The aggregation operator\n\n\n Returns\n -------\n S_out : np.ndarray like S\n The filtered data array\n '
s_out = np.empty_like(S)
for i in range((len(R_ptr) - 1)):
targets = R_indices[R_ptr[i]:R_ptr[(i + 1)]]
if (not len(targets)):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if (aggregate is np.average):
weights = R_data[R_ptr[i]:R_ptr[(i + 1)]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out
|
def __init__(self, *args, **kwds):
'\n Constructor. Any message fields that are implicitly/explicitly\n set to None will be assigned a default value. The recommend\n use is keyword arguments as this is more robust to future message\n changes. You cannot mix in-order arguments and keyword arguments.\n\n The available fields are:\n model_name,joint_trajectory,model_pose,set_model_pose,disable_physics_updates\n\n :param args: complete set of field values, in .msg order\n :param kwds: use keyword arguments corresponding to message field names\n to set specific fields.\n '
if (args or kwds):
super(SetJointTrajectoryRequest, self).__init__(*args, **kwds)
if (self.model_name is None):
self.model_name = ''
if (self.joint_trajectory is None):
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if (self.model_pose is None):
self.model_pose = geometry_msgs.msg.Pose()
if (self.set_model_pose is None):
self.set_model_pose = False
if (self.disable_physics_updates is None):
self.disable_physics_updates = False
else:
self.model_name = ''
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
self.model_pose = geometry_msgs.msg.Pose()
self.set_model_pose = False
self.disable_physics_updates = False
| 3,792,384,007,490,749,000
|
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
model_name,joint_trajectory,model_pose,set_model_pose,disable_physics_updates
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
__init__
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def __init__(self, *args, **kwds):
'\n Constructor. Any message fields that are implicitly/explicitly\n set to None will be assigned a default value. The recommend\n use is keyword arguments as this is more robust to future message\n changes. You cannot mix in-order arguments and keyword arguments.\n\n The available fields are:\n model_name,joint_trajectory,model_pose,set_model_pose,disable_physics_updates\n\n :param args: complete set of field values, in .msg order\n :param kwds: use keyword arguments corresponding to message field names\n to set specific fields.\n '
if (args or kwds):
super(SetJointTrajectoryRequest, self).__init__(*args, **kwds)
if (self.model_name is None):
self.model_name =
if (self.joint_trajectory is None):
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if (self.model_pose is None):
self.model_pose = geometry_msgs.msg.Pose()
if (self.set_model_pose is None):
self.set_model_pose = False
if (self.disable_physics_updates is None):
self.disable_physics_updates = False
else:
self.model_name =
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
self.model_pose = geometry_msgs.msg.Pose()
self.set_model_pose = False
self.disable_physics_updates = False
|
def _get_types(self):
'\n internal API method\n '
return self._slot_types
| 840,424,092,067,405,300
|
internal API method
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
_get_types
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def _get_types(self):
'\n \n '
return self._slot_types
|
def serialize(self, buff):
'\n serialize message into buffer\n :param buff: buffer, ``StringIO``\n '
try:
_x = self.model_name
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs))
_x = self.joint_trajectory.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
length = len(self.joint_trajectory.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.joint_names:
length = len(val1)
if (python3 or (type(val1) == unicode)):
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack(('<I%ss' % length), length, val1))
length = len(self.joint_trajectory.points)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.points:
length = len(val1.positions)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(struct.pack(pattern, *val1.positions))
length = len(val1.velocities)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(struct.pack(pattern, *val1.velocities))
length = len(val1.accelerations)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(struct.pack(pattern, *val1.accelerations))
length = len(val1.effort)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(struct.pack(pattern, *val1.effort))
_v1 = val1.time_from_start
_x = _v1
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_x = self
buff.write(_get_struct_7d2B().pack(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))))
| 8,435,202,572,428,503,000
|
serialize message into buffer
:param buff: buffer, ``StringIO``
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
serialize
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def serialize(self, buff):
'\n serialize message into buffer\n :param buff: buffer, ``StringIO``\n '
try:
_x = self.model_name
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs))
_x = self.joint_trajectory.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
length = len(self.joint_trajectory.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.joint_names:
length = len(val1)
if (python3 or (type(val1) == unicode)):
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack(('<I%ss' % length), length, val1))
length = len(self.joint_trajectory.points)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.points:
length = len(val1.positions)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(struct.pack(pattern, *val1.positions))
length = len(val1.velocities)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(struct.pack(pattern, *val1.velocities))
length = len(val1.accelerations)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(struct.pack(pattern, *val1.accelerations))
length = len(val1.effort)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(struct.pack(pattern, *val1.effort))
_v1 = val1.time_from_start
_x = _v1
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_x = self
buff.write(_get_struct_7d2B().pack(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))))
|
def deserialize(self, str):
'\n unpack serialized message in str into this message instance\n :param str: byte array of serialized message, ``str``\n '
try:
if (self.joint_trajectory is None):
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if (self.model_pose is None):
self.model_pose = geometry_msgs.msg.Pose()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model_name = str[start:end].decode('utf-8')
else:
self.model_name = str[start:end]
_x = self
start = end
end += 12
(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_trajectory.header.frame_id = str[start:end].decode('utf-8')
else:
self.joint_trajectory.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.joint_trajectory.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.points = []
for i in range(0, length):
val1 = trajectory_msgs.msg.JointTrajectoryPoint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.positions = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.velocities = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.accelerations = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.effort = struct.unpack(pattern, str[start:end])
_v2 = val1.time_from_start
_x = _v2
start = end
end += 8
(_x.secs, _x.nsecs) = _get_struct_2i().unpack(str[start:end])
self.joint_trajectory.points.append(val1)
_x = self
start = end
end += 58
(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates) = _get_struct_7d2B().unpack(str[start:end])
self.set_model_pose = bool(self.set_model_pose)
self.disable_physics_updates = bool(self.disable_physics_updates)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
| 71,315,805,887,924,400
|
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
deserialize
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def deserialize(self, str):
'\n unpack serialized message in str into this message instance\n :param str: byte array of serialized message, ``str``\n '
try:
if (self.joint_trajectory is None):
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if (self.model_pose is None):
self.model_pose = geometry_msgs.msg.Pose()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model_name = str[start:end].decode('utf-8')
else:
self.model_name = str[start:end]
_x = self
start = end
end += 12
(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_trajectory.header.frame_id = str[start:end].decode('utf-8')
else:
self.joint_trajectory.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.joint_trajectory.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.points = []
for i in range(0, length):
val1 = trajectory_msgs.msg.JointTrajectoryPoint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.positions = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.velocities = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.accelerations = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.effort = struct.unpack(pattern, str[start:end])
_v2 = val1.time_from_start
_x = _v2
start = end
end += 8
(_x.secs, _x.nsecs) = _get_struct_2i().unpack(str[start:end])
self.joint_trajectory.points.append(val1)
_x = self
start = end
end += 58
(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates) = _get_struct_7d2B().unpack(str[start:end])
self.set_model_pose = bool(self.set_model_pose)
self.disable_physics_updates = bool(self.disable_physics_updates)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
|
def serialize_numpy(self, buff, numpy):
'\n serialize message with numpy array types into buffer\n :param buff: buffer, ``StringIO``\n :param numpy: numpy python module\n '
try:
_x = self.model_name
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs))
_x = self.joint_trajectory.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
length = len(self.joint_trajectory.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.joint_names:
length = len(val1)
if (python3 or (type(val1) == unicode)):
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack(('<I%ss' % length), length, val1))
length = len(self.joint_trajectory.points)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.points:
length = len(val1.positions)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(val1.positions.tostring())
length = len(val1.velocities)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(val1.velocities.tostring())
length = len(val1.accelerations)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(val1.accelerations.tostring())
length = len(val1.effort)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(val1.effort.tostring())
_v3 = val1.time_from_start
_x = _v3
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_x = self
buff.write(_get_struct_7d2B().pack(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))))
| -8,851,787,001,122,039,000
|
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
serialize_numpy
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def serialize_numpy(self, buff, numpy):
'\n serialize message with numpy array types into buffer\n :param buff: buffer, ``StringIO``\n :param numpy: numpy python module\n '
try:
_x = self.model_name
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs))
_x = self.joint_trajectory.header.frame_id
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
length = len(self.joint_trajectory.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.joint_names:
length = len(val1)
if (python3 or (type(val1) == unicode)):
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack(('<I%ss' % length), length, val1))
length = len(self.joint_trajectory.points)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.points:
length = len(val1.positions)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(val1.positions.tostring())
length = len(val1.velocities)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(val1.velocities.tostring())
length = len(val1.accelerations)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(val1.accelerations.tostring())
length = len(val1.effort)
buff.write(_struct_I.pack(length))
pattern = ('<%sd' % length)
buff.write(val1.effort.tostring())
_v3 = val1.time_from_start
_x = _v3
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_x = self
buff.write(_get_struct_7d2B().pack(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))))
|
def deserialize_numpy(self, str, numpy):
'\n unpack serialized message in str into this message instance using numpy for array types\n :param str: byte array of serialized message, ``str``\n :param numpy: numpy python module\n '
try:
if (self.joint_trajectory is None):
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if (self.model_pose is None):
self.model_pose = geometry_msgs.msg.Pose()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model_name = str[start:end].decode('utf-8')
else:
self.model_name = str[start:end]
_x = self
start = end
end += 12
(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_trajectory.header.frame_id = str[start:end].decode('utf-8')
else:
self.joint_trajectory.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.joint_trajectory.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.points = []
for i in range(0, length):
val1 = trajectory_msgs.msg.JointTrajectoryPoint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.positions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.velocities = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.accelerations = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
_v4 = val1.time_from_start
_x = _v4
start = end
end += 8
(_x.secs, _x.nsecs) = _get_struct_2i().unpack(str[start:end])
self.joint_trajectory.points.append(val1)
_x = self
start = end
end += 58
(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates) = _get_struct_7d2B().unpack(str[start:end])
self.set_model_pose = bool(self.set_model_pose)
self.disable_physics_updates = bool(self.disable_physics_updates)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
| 1,277,172,783,877,668,600
|
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
deserialize_numpy
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def deserialize_numpy(self, str, numpy):
'\n unpack serialized message in str into this message instance using numpy for array types\n :param str: byte array of serialized message, ``str``\n :param numpy: numpy python module\n '
try:
if (self.joint_trajectory is None):
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if (self.model_pose is None):
self.model_pose = geometry_msgs.msg.Pose()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model_name = str[start:end].decode('utf-8')
else:
self.model_name = str[start:end]
_x = self
start = end
end += 12
(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_trajectory.header.frame_id = str[start:end].decode('utf-8')
else:
self.joint_trajectory.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.joint_trajectory.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.points = []
for i in range(0, length):
val1 = trajectory_msgs.msg.JointTrajectoryPoint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.positions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.velocities = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.accelerations = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = ('<%sd' % length)
start = end
end += struct.calcsize(pattern)
val1.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
_v4 = val1.time_from_start
_x = _v4
start = end
end += 8
(_x.secs, _x.nsecs) = _get_struct_2i().unpack(str[start:end])
self.joint_trajectory.points.append(val1)
_x = self
start = end
end += 58
(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates) = _get_struct_7d2B().unpack(str[start:end])
self.set_model_pose = bool(self.set_model_pose)
self.disable_physics_updates = bool(self.disable_physics_updates)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
|
def __init__(self, *args, **kwds):
'\n Constructor. Any message fields that are implicitly/explicitly\n set to None will be assigned a default value. The recommend\n use is keyword arguments as this is more robust to future message\n changes. You cannot mix in-order arguments and keyword arguments.\n\n The available fields are:\n success,status_message\n\n :param args: complete set of field values, in .msg order\n :param kwds: use keyword arguments corresponding to message field names\n to set specific fields.\n '
if (args or kwds):
super(SetJointTrajectoryResponse, self).__init__(*args, **kwds)
if (self.success is None):
self.success = False
if (self.status_message is None):
self.status_message = ''
else:
self.success = False
self.status_message = ''
| -2,537,933,800,810,868,700
|
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,status_message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
__init__
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def __init__(self, *args, **kwds):
'\n Constructor. Any message fields that are implicitly/explicitly\n set to None will be assigned a default value. The recommend\n use is keyword arguments as this is more robust to future message\n changes. You cannot mix in-order arguments and keyword arguments.\n\n The available fields are:\n success,status_message\n\n :param args: complete set of field values, in .msg order\n :param kwds: use keyword arguments corresponding to message field names\n to set specific fields.\n '
if (args or kwds):
super(SetJointTrajectoryResponse, self).__init__(*args, **kwds)
if (self.success is None):
self.success = False
if (self.status_message is None):
self.status_message =
else:
self.success = False
self.status_message =
|
def _get_types(self):
'\n internal API method\n '
return self._slot_types
| 840,424,092,067,405,300
|
internal API method
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
_get_types
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def _get_types(self):
'\n \n '
return self._slot_types
|
def serialize(self, buff):
'\n serialize message into buffer\n :param buff: buffer, ``StringIO``\n '
try:
buff.write(_get_struct_B().pack(self.success))
_x = self.status_message
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))))
| 4,799,889,647,904,635,000
|
serialize message into buffer
:param buff: buffer, ``StringIO``
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
serialize
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def serialize(self, buff):
'\n serialize message into buffer\n :param buff: buffer, ``StringIO``\n '
try:
buff.write(_get_struct_B().pack(self.success))
_x = self.status_message
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))))
|
def deserialize(self, str):
'\n unpack serialized message in str into this message instance\n :param str: byte array of serialized message, ``str``\n '
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e)
| -5,420,559,687,702,289,000
|
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
deserialize
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def deserialize(self, str):
'\n unpack serialized message in str into this message instance\n :param str: byte array of serialized message, ``str``\n '
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e)
|
def serialize_numpy(self, buff, numpy):
'\n serialize message with numpy array types into buffer\n :param buff: buffer, ``StringIO``\n :param numpy: numpy python module\n '
try:
buff.write(_get_struct_B().pack(self.success))
_x = self.status_message
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))))
| -4,722,480,347,249,835,000
|
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
serialize_numpy
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def serialize_numpy(self, buff, numpy):
'\n serialize message with numpy array types into buffer\n :param buff: buffer, ``StringIO``\n :param numpy: numpy python module\n '
try:
buff.write(_get_struct_B().pack(self.success))
_x = self.status_message
length = len(_x)
if (python3 or (type(_x) == unicode)):
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack(('<I%ss' % length), length, _x))
except struct.error as se:
self._check_types(struct.error(("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))))
except TypeError as te:
self._check_types(ValueError(("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))))
|
def deserialize_numpy(self, str, numpy):
'\n unpack serialized message in str into this message instance using numpy for array types\n :param str: byte array of serialized message, ``str``\n :param numpy: numpy python module\n '
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e)
| -8,541,184,696,130,646,000
|
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
|
files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py
|
deserialize_numpy
|
Filipe-Douglas-Slam/slam_lidar_kinect
|
python
|
def deserialize_numpy(self, str, numpy):
'\n unpack serialized message in str into this message instance using numpy for array types\n :param str: byte array of serialized message, ``str``\n :param numpy: numpy python module\n '
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e)
|
def onehot_encode_seq(sequence, m=0, padding=False):
'Converts a given IUPAC DNA sequence to a one-hot \n encoded DNA sequence. \n '
import numpy as np
import torch
valid_keys = ['a', 'c', 'g', 't', 'u', 'n', 'r', 'y', 's', 'w', 'k', 'm']
nucs = {'a': 0, 'c': 1, 'g': 2, 't': 3, 'u': 3}
if padding:
assert (m != 0), 'If using padding, m should be bigger than 0'
padding_mat = np.tile(0.25, ((m - 1), 4))
onehot = np.tile(0.0, (len(sequence), 4))
for (i, char) in enumerate(sequence.lower()):
if (char not in valid_keys):
sys.exit('invalid char in sequence (choose from acgt and nryswkm)')
elif (char == 'n'):
onehot[i, :] = 0.25
elif (char == 'r'):
onehot[(i, (0, 2))] = 0.5
elif (char == 'y'):
onehot[(i, (1, 3))] = 0.5
elif (char == 's'):
onehot[(i, (1, 2))] = 0.5
elif (char == 'w'):
onehot[(i, (0, 3))] = 0.5
elif (char == 'k'):
onehot[(i, (2, 3))] = 0.5
elif (char == 'm'):
onehot[(i, (0, 1))] = 0.5
else:
onehot[(i, nucs[char])] = 1
if padding:
onehot = np.concatenate((padding_mat, onehot, padding_mat))
return onehot
| 4,279,557,635,030,340,600
|
Converts a given IUPAC DNA sequence to a one-hot
encoded DNA sequence.
|
bin/scover_utils.py
|
onehot_encode_seq
|
jacobhepkema/scover
|
python
|
def onehot_encode_seq(sequence, m=0, padding=False):
'Converts a given IUPAC DNA sequence to a one-hot \n encoded DNA sequence. \n '
import numpy as np
import torch
valid_keys = ['a', 'c', 'g', 't', 'u', 'n', 'r', 'y', 's', 'w', 'k', 'm']
nucs = {'a': 0, 'c': 1, 'g': 2, 't': 3, 'u': 3}
if padding:
assert (m != 0), 'If using padding, m should be bigger than 0'
padding_mat = np.tile(0.25, ((m - 1), 4))
onehot = np.tile(0.0, (len(sequence), 4))
for (i, char) in enumerate(sequence.lower()):
if (char not in valid_keys):
sys.exit('invalid char in sequence (choose from acgt and nryswkm)')
elif (char == 'n'):
onehot[i, :] = 0.25
elif (char == 'r'):
onehot[(i, (0, 2))] = 0.5
elif (char == 'y'):
onehot[(i, (1, 3))] = 0.5
elif (char == 's'):
onehot[(i, (1, 2))] = 0.5
elif (char == 'w'):
onehot[(i, (0, 3))] = 0.5
elif (char == 'k'):
onehot[(i, (2, 3))] = 0.5
elif (char == 'm'):
onehot[(i, (0, 1))] = 0.5
else:
onehot[(i, nucs[char])] = 1
if padding:
onehot = np.concatenate((padding_mat, onehot, padding_mat))
return onehot
|
def save_meme(motifs_ppm_dict, output_file='found_motifs.meme'):
"Saves the found PPMs (given as dictionary) to a file that's\n compatible with MEME suite applications.\n "
import pandas as pd
meme_string = ['MEME version 4', '', 'ALPHABET= ACGT', '', 'strands: + -', '']
for (idx, key) in enumerate(motifs_ppm_dict.keys()):
curr_motif = pd.DataFrame(motifs_ppm_dict[key])
s1 = ('MOTIF ' + str(key))
s2 = ((('letter-probability matrix: alength= ' + str(curr_motif.shape[1])) + ' w= ') + str(curr_motif.shape[0]))
s3 = curr_motif.to_csv(sep='\t', index=False, header=False)
meme_string = (meme_string + [s1, s2, s3])
meme_string = '\n'.join(meme_string)
with open(output_file, 'w') as the_file:
the_file.write(meme_string)
print('wrote meme list')
| -4,665,332,980,607,600,000
|
Saves the found PPMs (given as dictionary) to a file that's
compatible with MEME suite applications.
|
bin/scover_utils.py
|
save_meme
|
jacobhepkema/scover
|
python
|
def save_meme(motifs_ppm_dict, output_file='found_motifs.meme'):
"Saves the found PPMs (given as dictionary) to a file that's\n compatible with MEME suite applications.\n "
import pandas as pd
meme_string = ['MEME version 4', , 'ALPHABET= ACGT', , 'strands: + -', ]
for (idx, key) in enumerate(motifs_ppm_dict.keys()):
curr_motif = pd.DataFrame(motifs_ppm_dict[key])
s1 = ('MOTIF ' + str(key))
s2 = ((('letter-probability matrix: alength= ' + str(curr_motif.shape[1])) + ' w= ') + str(curr_motif.shape[0]))
s3 = curr_motif.to_csv(sep='\t', index=False, header=False)
meme_string = (meme_string + [s1, s2, s3])
meme_string = '\n'.join(meme_string)
with open(output_file, 'w') as the_file:
the_file.write(meme_string)
print('wrote meme list')
|
def align_conv_filters(model, input_seqs, m, train_ind):
'Aligns the convolutional filters of a given scover model back\n to the given input sequences at the given indices. \n '
import numpy as np
import torch
from tqdm import trange
activation_seqs = input_seqs[train_ind]
with torch.no_grad():
model.eval()
activations = model.conv_1(activation_seqs).cpu().detach().numpy().squeeze()
n_seq = activation_seqs.shape[0]
activation_seqs = activation_seqs.squeeze()
seq_len = activation_seqs.shape[1]
d = activations.shape[1]
motifs_pfm_dict = dict()
motifs_ppm_dict = dict()
for filter_num in trange(d):
curr_activation = activations[:, filter_num, :]
seq_has_pos_vals = np.argwhere((np.amax(curr_activation, axis=1) > 0))[:, 0]
if (seq_has_pos_vals.shape[0] > 10):
per_seq_where_max_pos = np.argmax(curr_activation[seq_has_pos_vals], axis=1)
curr_activation_seqs = activation_seqs[seq_has_pos_vals]
curr_str_list = []
for i in range(seq_has_pos_vals.shape[0]):
curr_max = per_seq_where_max_pos[i]
curr_str_list.append(curr_activation_seqs[i][curr_max:(curr_max + m)])
sequence_array = np.stack(curr_str_list)
sequence_array_summed = np.sum(sequence_array, axis=0)
motifs_pfm_dict[str(filter_num)] = sequence_array_summed
row_sums = np.sum(sequence_array_summed, axis=1)
sequence_array_summed = np.nan_to_num((sequence_array_summed / row_sums[:, np.newaxis]))
motifs_ppm_dict[str(filter_num)] = sequence_array_summed
return (motifs_pfm_dict, motifs_ppm_dict)
| 611,642,633,021,770,500
|
Aligns the convolutional filters of a given scover model back
to the given input sequences at the given indices.
|
bin/scover_utils.py
|
align_conv_filters
|
jacobhepkema/scover
|
python
|
def align_conv_filters(model, input_seqs, m, train_ind):
'Aligns the convolutional filters of a given scover model back\n to the given input sequences at the given indices. \n '
import numpy as np
import torch
from tqdm import trange
activation_seqs = input_seqs[train_ind]
with torch.no_grad():
model.eval()
activations = model.conv_1(activation_seqs).cpu().detach().numpy().squeeze()
n_seq = activation_seqs.shape[0]
activation_seqs = activation_seqs.squeeze()
seq_len = activation_seqs.shape[1]
d = activations.shape[1]
motifs_pfm_dict = dict()
motifs_ppm_dict = dict()
for filter_num in trange(d):
curr_activation = activations[:, filter_num, :]
seq_has_pos_vals = np.argwhere((np.amax(curr_activation, axis=1) > 0))[:, 0]
if (seq_has_pos_vals.shape[0] > 10):
per_seq_where_max_pos = np.argmax(curr_activation[seq_has_pos_vals], axis=1)
curr_activation_seqs = activation_seqs[seq_has_pos_vals]
curr_str_list = []
for i in range(seq_has_pos_vals.shape[0]):
curr_max = per_seq_where_max_pos[i]
curr_str_list.append(curr_activation_seqs[i][curr_max:(curr_max + m)])
sequence_array = np.stack(curr_str_list)
sequence_array_summed = np.sum(sequence_array, axis=0)
motifs_pfm_dict[str(filter_num)] = sequence_array_summed
row_sums = np.sum(sequence_array_summed, axis=1)
sequence_array_summed = np.nan_to_num((sequence_array_summed / row_sums[:, np.newaxis]))
motifs_ppm_dict[str(filter_num)] = sequence_array_summed
return (motifs_pfm_dict, motifs_ppm_dict)
|
def randomize_sequences(sequences):
'Randomly permutes a set of DNA sequences.\n '
import random
shuffled_seqs = []
for seq in sequences:
shuffled_seqs.append(''.join(random.sample(seq, len(seq))))
return shuffled_seqs
| 5,263,260,572,859,589,000
|
Randomly permutes a set of DNA sequences.
|
bin/scover_utils.py
|
randomize_sequences
|
jacobhepkema/scover
|
python
|
def randomize_sequences(sequences):
'\n '
import random
shuffled_seqs = []
for seq in sequences:
shuffled_seqs.append(.join(random.sample(seq, len(seq))))
return shuffled_seqs
|
def check_response_errors(response, session):
'\n Checks status of response response and throws appropriate error.\n '
if (response['status'] != 'ok'):
from wi.utils.auth import logout
error_code = response['status']
error_msg = get_error(error_code)
raise RestErrorException(error_msg)
return response
| -1,008,439,569,765,546,400
|
Checks status of response response and throws appropriate error.
|
src/wi/utils/__init__.py
|
check_response_errors
|
cc1-cloud/cc1
|
python
|
def check_response_errors(response, session):
'\n \n '
if (response['status'] != 'ok'):
from wi.utils.auth import logout
error_code = response['status']
error_msg = get_error(error_code)
raise RestErrorException(error_msg)
return response
|
def get_dict_from_list(list_of_dicts, key_value, key='id'):
'\n Returns dictionary with key: @prm{key} equal to @prm{key_value} from a\n list of dictionaries: @prm{list_of_dicts}.\n '
for dictionary in list_of_dicts:
if (dictionary.get(key) == None):
raise Exception((('No key: ' + key) + ' in dictionary.'))
if (dictionary.get(key) == key_value):
return dictionary
return None
| -5,721,012,959,760,796,000
|
Returns dictionary with key: @prm{key} equal to @prm{key_value} from a
list of dictionaries: @prm{list_of_dicts}.
|
src/wi/utils/__init__.py
|
get_dict_from_list
|
cc1-cloud/cc1
|
python
|
def get_dict_from_list(list_of_dicts, key_value, key='id'):
'\n Returns dictionary with key: @prm{key} equal to @prm{key_value} from a\n list of dictionaries: @prm{list_of_dicts}.\n '
for dictionary in list_of_dicts:
if (dictionary.get(key) == None):
raise Exception((('No key: ' + key) + ' in dictionary.'))
if (dictionary.get(key) == key_value):
return dictionary
return None
|
def get_dicts_from_list(list_of_dicts, list_of_key_values, key='id'):
'\n Returns list of dictionaries with keys: @prm{key} equal to one from list\n @prm{list_of_key_values} from a list of dictionaries: @prm{list_of_dicts}.\n '
ret = []
for dictionary in list_of_dicts:
if (dictionary.get(key) == None):
raise Exception((('No key: ' + key) + ' in dictionary.'))
if (dictionary.get(key) in list_of_key_values):
ret.append(dictionary)
return ret
| -8,346,077,972,838,045,000
|
Returns list of dictionaries with keys: @prm{key} equal to one from list
@prm{list_of_key_values} from a list of dictionaries: @prm{list_of_dicts}.
|
src/wi/utils/__init__.py
|
get_dicts_from_list
|
cc1-cloud/cc1
|
python
|
def get_dicts_from_list(list_of_dicts, list_of_key_values, key='id'):
'\n Returns list of dictionaries with keys: @prm{key} equal to one from list\n @prm{list_of_key_values} from a list of dictionaries: @prm{list_of_dicts}.\n '
ret = []
for dictionary in list_of_dicts:
if (dictionary.get(key) == None):
raise Exception((('No key: ' + key) + ' in dictionary.'))
if (dictionary.get(key) in list_of_key_values):
ret.append(dictionary)
return ret
|
def ParseOption():
'Parse command line options.'
parser = optparse.OptionParser()
parser.add_option('--qtdir', dest='qtdir')
parser.add_option('--target', dest='target')
(opts, _) = parser.parse_args()
return opts
| 4,984,111,987,636,987,000
|
Parse command line options.
|
src/build_tools/change_reference_mac.py
|
ParseOption
|
dancerj/mozc
|
python
|
def ParseOption():
parser = optparse.OptionParser()
parser.add_option('--qtdir', dest='qtdir')
parser.add_option('--target', dest='target')
(opts, _) = parser.parse_args()
return opts
|
def fix(val):
'strip off _: from nodeIDs... as they are not valid NCNames'
if val.startswith('_:'):
return val[2:]
else:
return val
| -7,506,203,427,036,628,000
|
strip off _: from nodeIDs... as they are not valid NCNames
|
rdflib/plugins/serializers/rdfxml.py
|
fix
|
GreenfishK/rdflib
|
python
|
def fix(val):
if val.startswith('_:'):
return val[2:]
else:
return val
|
def dmsp_map_interpolate_NN_smooth_great_circle(lat_dmsp, lon_dmsp, lat_map, lon_map, Obs_map, k=5, tol=1.5):
'\n generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance\n '
tol = np.deg2rad(tol)
dmsp_points = np.deg2rad(np.hstack((lat_dmsp.flatten().reshape((- 1), 1), lon_dmsp.flatten().reshape((- 1), 1))))
map_points = np.deg2rad(np.hstack((lat_map.flatten().reshape((- 1), 1), lon_map.flatten().reshape((- 1), 1))))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.NearestNeighbors(n_neighbors=k, radius=tol, metric='haversine')
model.fit(map_points)
neighbors = model.kneighbors(dmsp_points, return_distance=True)
obs_interp = np.empty(N_points)
for i in range(N_points):
distances = neighbors[0][i]
inds = neighbors[1][i]
weights = (distances / np.nansum(distances))
obs_interp[i] = np.nansum((obs_val[inds] * weights))
return obs_interp
| -6,599,027,326,757,895,000
|
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
|
LBH_to_eflux/helper_funcs.py
|
dmsp_map_interpolate_NN_smooth_great_circle
|
jali7001/LBH_to_E_flux
|
python
|
def dmsp_map_interpolate_NN_smooth_great_circle(lat_dmsp, lon_dmsp, lat_map, lon_map, Obs_map, k=5, tol=1.5):
'\n \n '
tol = np.deg2rad(tol)
dmsp_points = np.deg2rad(np.hstack((lat_dmsp.flatten().reshape((- 1), 1), lon_dmsp.flatten().reshape((- 1), 1))))
map_points = np.deg2rad(np.hstack((lat_map.flatten().reshape((- 1), 1), lon_map.flatten().reshape((- 1), 1))))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.NearestNeighbors(n_neighbors=k, radius=tol, metric='haversine')
model.fit(map_points)
neighbors = model.kneighbors(dmsp_points, return_distance=True)
obs_interp = np.empty(N_points)
for i in range(N_points):
distances = neighbors[0][i]
inds = neighbors[1][i]
weights = (distances / np.nansum(distances))
obs_interp[i] = np.nansum((obs_val[inds] * weights))
return obs_interp
|
def latlt2polar(lat, lt, hemisphere):
'\n Converts an array of latitude and lt points to polar for a top-down dialplot (latitude in degrees, LT in hours)\n i.e. makes latitude the radial quantity and MLT the azimuthal \n\n get the radial displacement (referenced to down from northern pole if we want to do a top down on the north, \n or up from south pole if visa-versa)\n '
from numpy import pi
if (hemisphere == 'N'):
r = (90.0 - lat)
elif (hemisphere == 'S'):
r = (90.0 - ((- 1) * lat))
else:
raise ValueError(('%s is not a valid hemisphere, N or S, please!' % hemisphere))
theta = (((lt / 24.0) * 2) * pi)
return (r, theta)
| -7,718,299,923,529,573,000
|
Converts an array of latitude and lt points to polar for a top-down dialplot (latitude in degrees, LT in hours)
i.e. makes latitude the radial quantity and MLT the azimuthal
get the radial displacement (referenced to down from northern pole if we want to do a top down on the north,
or up from south pole if visa-versa)
|
LBH_to_eflux/helper_funcs.py
|
latlt2polar
|
jali7001/LBH_to_E_flux
|
python
|
def latlt2polar(lat, lt, hemisphere):
'\n Converts an array of latitude and lt points to polar for a top-down dialplot (latitude in degrees, LT in hours)\n i.e. makes latitude the radial quantity and MLT the azimuthal \n\n get the radial displacement (referenced to down from northern pole if we want to do a top down on the north, \n or up from south pole if visa-versa)\n '
from numpy import pi
if (hemisphere == 'N'):
r = (90.0 - lat)
elif (hemisphere == 'S'):
r = (90.0 - ((- 1) * lat))
else:
raise ValueError(('%s is not a valid hemisphere, N or S, please!' % hemisphere))
theta = (((lt / 24.0) * 2) * pi)
return (r, theta)
|
def polar2dial(ax):
'\n Turns a matplotlib axes polar plot into a dial plot\n '
ax.set_theta_zero_location('S')
theta_label_values = ((np.array([0.0, 3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0]) * 180.0) / 12)
theta_labels = [('%d:00' % int(((th / 180.0) * 12))) for th in theta_label_values.flatten().tolist()]
ax.set_thetagrids(theta_label_values, labels=theta_labels)
r_label_values = (90.0 - np.array([80.0, 70.0, 60.0, 50.0, 40.0]))
r_labels = [('$%d^{o}$' % int((90.0 - rv))) for rv in r_label_values.flatten().tolist()]
ax.set_rgrids(r_label_values, labels=r_labels)
ax.set_rlim([0.0, 40.0])
| 4,135,101,090,055,548,400
|
Turns a matplotlib axes polar plot into a dial plot
|
LBH_to_eflux/helper_funcs.py
|
polar2dial
|
jali7001/LBH_to_E_flux
|
python
|
def polar2dial(ax):
'\n \n '
ax.set_theta_zero_location('S')
theta_label_values = ((np.array([0.0, 3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0]) * 180.0) / 12)
theta_labels = [('%d:00' % int(((th / 180.0) * 12))) for th in theta_label_values.flatten().tolist()]
ax.set_thetagrids(theta_label_values, labels=theta_labels)
r_label_values = (90.0 - np.array([80.0, 70.0, 60.0, 50.0, 40.0]))
r_labels = [('$%d^{o}$' % int((90.0 - rv))) for rv in r_label_values.flatten().tolist()]
ax.set_rgrids(r_label_values, labels=r_labels)
ax.set_rlim([0.0, 40.0])
|
def dmsp_map_interpolate(X_dmsp, Y_dmsp, X_map, Y_map, tolerance=0.5):
'\n generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance\n '
indices = scipy.interpolate.griddata((X_map, Y_map), np.arange(len(X_map.flatten())), (X_dmsp, Y_dmsp), method='nearest')
mask = ((abs((X_map[indices] - X_dmsp)) < tolerance) & (abs((Y_map[indices] - Y_dmsp)) < tolerance))
return (indices, mask)
| 2,021,138,743,614,363,400
|
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
|
LBH_to_eflux/helper_funcs.py
|
dmsp_map_interpolate
|
jali7001/LBH_to_E_flux
|
python
|
def dmsp_map_interpolate(X_dmsp, Y_dmsp, X_map, Y_map, tolerance=0.5):
'\n \n '
indices = scipy.interpolate.griddata((X_map, Y_map), np.arange(len(X_map.flatten())), (X_dmsp, Y_dmsp), method='nearest')
mask = ((abs((X_map[indices] - X_dmsp)) < tolerance) & (abs((Y_map[indices] - Y_dmsp)) < tolerance))
return (indices, mask)
|
def dmsp_map_interpolate_NN_smooth(X_dmsp, Y_dmsp, X_map, Y_map, Obs_map, k=5, tol=3):
'\n generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance\n '
dmsp_points = np.hstack((X_dmsp.flatten().reshape((- 1), 1), Y_dmsp.flatten().reshape((- 1), 1)))
map_points = np.hstack((X_map.flatten().reshape((- 1), 1), Y_map.flatten().reshape((- 1), 1)))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.BallTree(map_points, leaf_size=40)
(dists, inds) = model.query(dmsp_points, k=k)
obs_interp = np.empty(N_points)
for i in range(N_points):
norm = LA.norm(dists[i])
if (norm > tol):
obs_interp[i] = np.nan
else:
weights = (dists[i] / np.nansum(dists[i]))
obs_interp[i] = np.nansum((obs_val[inds[i]] * weights))
return obs_interp
| 9,011,506,937,454,772,000
|
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
|
LBH_to_eflux/helper_funcs.py
|
dmsp_map_interpolate_NN_smooth
|
jali7001/LBH_to_E_flux
|
python
|
def dmsp_map_interpolate_NN_smooth(X_dmsp, Y_dmsp, X_map, Y_map, Obs_map, k=5, tol=3):
'\n \n '
dmsp_points = np.hstack((X_dmsp.flatten().reshape((- 1), 1), Y_dmsp.flatten().reshape((- 1), 1)))
map_points = np.hstack((X_map.flatten().reshape((- 1), 1), Y_map.flatten().reshape((- 1), 1)))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.BallTree(map_points, leaf_size=40)
(dists, inds) = model.query(dmsp_points, k=k)
obs_interp = np.empty(N_points)
for i in range(N_points):
norm = LA.norm(dists[i])
if (norm > tol):
obs_interp[i] = np.nan
else:
weights = (dists[i] / np.nansum(dists[i]))
obs_interp[i] = np.nansum((obs_val[inds[i]] * weights))
return obs_interp
|
def dmsp_map_interpolate_NN_smooth_great_circle(lat_dmsp, lon_dmsp, lat_map, lon_map, Obs_map, k=5, tol=1.5):
'\n generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance\n '
tol = np.deg2rad(tol)
dmsp_points = np.deg2rad(np.hstack((lat_dmsp.flatten().reshape((- 1), 1), lon_dmsp.flatten().reshape((- 1), 1))))
map_points = np.deg2rad(np.hstack((lat_map.flatten().reshape((- 1), 1), lon_map.flatten().reshape((- 1), 1))))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.NearestNeighbors(n_neighbors=k, radius=tol, metric='haversine')
model.fit(map_points)
neighbors = model.kneighbors(dmsp_points, return_distance=True)
obs_interp = np.empty(N_points)
for i in range(N_points):
distances = neighbors[0][i]
inds = neighbors[1][i]
weights = (distances / np.nansum(distances))
obs_interp[i] = np.nansum((obs_val[inds] * weights))
return obs_interp
| -6,599,027,326,757,895,000
|
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
|
LBH_to_eflux/helper_funcs.py
|
dmsp_map_interpolate_NN_smooth_great_circle
|
jali7001/LBH_to_E_flux
|
python
|
def dmsp_map_interpolate_NN_smooth_great_circle(lat_dmsp, lon_dmsp, lat_map, lon_map, Obs_map, k=5, tol=1.5):
'\n \n '
tol = np.deg2rad(tol)
dmsp_points = np.deg2rad(np.hstack((lat_dmsp.flatten().reshape((- 1), 1), lon_dmsp.flatten().reshape((- 1), 1))))
map_points = np.deg2rad(np.hstack((lat_map.flatten().reshape((- 1), 1), lon_map.flatten().reshape((- 1), 1))))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.NearestNeighbors(n_neighbors=k, radius=tol, metric='haversine')
model.fit(map_points)
neighbors = model.kneighbors(dmsp_points, return_distance=True)
obs_interp = np.empty(N_points)
for i in range(N_points):
distances = neighbors[0][i]
inds = neighbors[1][i]
weights = (distances / np.nansum(distances))
obs_interp[i] = np.nansum((obs_val[inds] * weights))
return obs_interp
|
def create_host_template(resource_root, name, cluster_name):
'\n Create a host template.\n @param resource_root: The root Resource object.\n @param name: Host template name\n @param cluster_name: Cluster name\n @return: An ApiHostTemplate object for the created host template.\n @since: API v3\n '
apitemplate = ApiHostTemplate(resource_root, name, [])
return call(resource_root.post, (HOST_TEMPLATES_PATH % (cluster_name,)), ApiHostTemplate, True, data=[apitemplate], api_version=3)[0]
| 7,939,236,683,972,800,000
|
Create a host template.
@param resource_root: The root Resource object.
@param name: Host template name
@param cluster_name: Cluster name
@return: An ApiHostTemplate object for the created host template.
@since: API v3
|
python/src/cm_api/endpoints/host_templates.py
|
create_host_template
|
AnniDu/cm_api
|
python
|
def create_host_template(resource_root, name, cluster_name):
'\n Create a host template.\n @param resource_root: The root Resource object.\n @param name: Host template name\n @param cluster_name: Cluster name\n @return: An ApiHostTemplate object for the created host template.\n @since: API v3\n '
apitemplate = ApiHostTemplate(resource_root, name, [])
return call(resource_root.post, (HOST_TEMPLATES_PATH % (cluster_name,)), ApiHostTemplate, True, data=[apitemplate], api_version=3)[0]
|
def get_host_template(resource_root, name, cluster_name):
'\n Lookup a host template by name in the specified cluster.\n @param resource_root: The root Resource object.\n @param name: Host template name.\n @param cluster_name: Cluster name.\n @return: An ApiHostTemplate object.\n @since: API v3\n '
return call(resource_root.get, (HOST_TEMPLATE_PATH % (cluster_name, name)), ApiHostTemplate, api_version=3)
| 8,255,752,173,159,167,000
|
Lookup a host template by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@return: An ApiHostTemplate object.
@since: API v3
|
python/src/cm_api/endpoints/host_templates.py
|
get_host_template
|
AnniDu/cm_api
|
python
|
def get_host_template(resource_root, name, cluster_name):
'\n Lookup a host template by name in the specified cluster.\n @param resource_root: The root Resource object.\n @param name: Host template name.\n @param cluster_name: Cluster name.\n @return: An ApiHostTemplate object.\n @since: API v3\n '
return call(resource_root.get, (HOST_TEMPLATE_PATH % (cluster_name, name)), ApiHostTemplate, api_version=3)
|
def get_all_host_templates(resource_root, cluster_name='default'):
'\n Get all host templates in a cluster.\n @param cluster_name: Cluster name.\n @return: ApiList of ApiHostTemplate objects for all host templates in a cluster.\n @since: API v3\n '
return call(resource_root.get, (HOST_TEMPLATES_PATH % (cluster_name,)), ApiHostTemplate, True, api_version=3)
| -7,250,237,421,090,976,000
|
Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3
|
python/src/cm_api/endpoints/host_templates.py
|
get_all_host_templates
|
AnniDu/cm_api
|
python
|
def get_all_host_templates(resource_root, cluster_name='default'):
'\n Get all host templates in a cluster.\n @param cluster_name: Cluster name.\n @return: ApiList of ApiHostTemplate objects for all host templates in a cluster.\n @since: API v3\n '
return call(resource_root.get, (HOST_TEMPLATES_PATH % (cluster_name,)), ApiHostTemplate, True, api_version=3)
|
def delete_host_template(resource_root, name, cluster_name):
'\n Delete a host template identified by name in the specified cluster.\n @param resource_root: The root Resource object.\n @param name: Host template name.\n @param cluster_name: Cluster name.\n @return: The deleted ApiHostTemplate object.\n @since: API v3\n '
return call(resource_root.delete, (HOST_TEMPLATE_PATH % (cluster_name, name)), ApiHostTemplate, api_version=3)
| 6,140,958,390,686,519,000
|
Delete a host template identified by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@return: The deleted ApiHostTemplate object.
@since: API v3
|
python/src/cm_api/endpoints/host_templates.py
|
delete_host_template
|
AnniDu/cm_api
|
python
|
def delete_host_template(resource_root, name, cluster_name):
'\n Delete a host template identified by name in the specified cluster.\n @param resource_root: The root Resource object.\n @param name: Host template name.\n @param cluster_name: Cluster name.\n @return: The deleted ApiHostTemplate object.\n @since: API v3\n '
return call(resource_root.delete, (HOST_TEMPLATE_PATH % (cluster_name, name)), ApiHostTemplate, api_version=3)
|
def update_host_template(resource_root, name, cluster_name, api_host_template):
'\n Update a host template identified by name in the specified cluster.\n @param resource_root: The root Resource object.\n @param name: Host template name.\n @param cluster_name: Cluster name.\n @param api_host_template: The updated host template.\n @return: The updated ApiHostTemplate.\n @since: API v3\n '
return call(resource_root.put, (HOST_TEMPLATE_PATH % (cluster_name, name)), ApiHostTemplate, data=api_host_template, api_version=3)
| -5,353,111,303,949,854,000
|
Update a host template identified by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@param api_host_template: The updated host template.
@return: The updated ApiHostTemplate.
@since: API v3
|
python/src/cm_api/endpoints/host_templates.py
|
update_host_template
|
AnniDu/cm_api
|
python
|
def update_host_template(resource_root, name, cluster_name, api_host_template):
'\n Update a host template identified by name in the specified cluster.\n @param resource_root: The root Resource object.\n @param name: Host template name.\n @param cluster_name: Cluster name.\n @param api_host_template: The updated host template.\n @return: The updated ApiHostTemplate.\n @since: API v3\n '
return call(resource_root.put, (HOST_TEMPLATE_PATH % (cluster_name, name)), ApiHostTemplate, data=api_host_template, api_version=3)
|
def apply_host_template(resource_root, name, cluster_name, host_ids, start_roles):
'\n Apply a host template identified by name on the specified hosts and\n optionally start them.\n @param resource_root: The root Resource object.\n @param name: Host template name.\n @param cluster_name: Cluster name.\n @param host_ids: List of host ids.\n @param start_roles: Whether to start the created roles or not.\n @return: An ApiCommand object.\n @since: API v3\n '
host_refs = []
for host_id in host_ids:
host_refs.append(ApiHostRef(resource_root, host_id))
params = {'startRoles': start_roles}
return call(resource_root.post, (APPLY_HOST_TEMPLATE_PATH % (cluster_name, name)), ApiCommand, data=host_refs, params=params, api_version=3)
| 7,267,557,606,698,993,000
|
Apply a host template identified by name on the specified hosts and
optionally start them.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@param host_ids: List of host ids.
@param start_roles: Whether to start the created roles or not.
@return: An ApiCommand object.
@since: API v3
|
python/src/cm_api/endpoints/host_templates.py
|
apply_host_template
|
AnniDu/cm_api
|
python
|
def apply_host_template(resource_root, name, cluster_name, host_ids, start_roles):
'\n Apply a host template identified by name on the specified hosts and\n optionally start them.\n @param resource_root: The root Resource object.\n @param name: Host template name.\n @param cluster_name: Cluster name.\n @param host_ids: List of host ids.\n @param start_roles: Whether to start the created roles or not.\n @return: An ApiCommand object.\n @since: API v3\n '
host_refs = []
for host_id in host_ids:
host_refs.append(ApiHostRef(resource_root, host_id))
params = {'startRoles': start_roles}
return call(resource_root.post, (APPLY_HOST_TEMPLATE_PATH % (cluster_name, name)), ApiCommand, data=host_refs, params=params, api_version=3)
|
def rename(self, new_name):
'\n Rename a host template.\n @param new_name: New host template name.\n @return: An ApiHostTemplate object.\n '
update = copy.copy(self)
update.name = new_name
return self._do_update(update)
| 6,840,494,761,294,532,000
|
Rename a host template.
@param new_name: New host template name.
@return: An ApiHostTemplate object.
|
python/src/cm_api/endpoints/host_templates.py
|
rename
|
AnniDu/cm_api
|
python
|
def rename(self, new_name):
'\n Rename a host template.\n @param new_name: New host template name.\n @return: An ApiHostTemplate object.\n '
update = copy.copy(self)
update.name = new_name
return self._do_update(update)
|
def set_role_config_groups(self, role_config_group_refs):
'\n Updates the role config groups in a host template.\n @param role_config_group_refs: List of role config group refs.\n @return: An ApiHostTemplate object.\n '
update = copy.copy(self)
update.roleConfigGroupRefs = role_config_group_refs
return self._do_update(update)
| 3,588,990,677,251,294,700
|
Updates the role config groups in a host template.
@param role_config_group_refs: List of role config group refs.
@return: An ApiHostTemplate object.
|
python/src/cm_api/endpoints/host_templates.py
|
set_role_config_groups
|
AnniDu/cm_api
|
python
|
def set_role_config_groups(self, role_config_group_refs):
'\n Updates the role config groups in a host template.\n @param role_config_group_refs: List of role config group refs.\n @return: An ApiHostTemplate object.\n '
update = copy.copy(self)
update.roleConfigGroupRefs = role_config_group_refs
return self._do_update(update)
|
def apply_host_template(self, host_ids, start_roles):
'\n Apply a host template identified by name on the specified hosts and\n optionally start them.\n @param host_ids: List of host ids.\n @param start_roles: Whether to start the created roles or not.\n @return: An ApiCommand object.\n '
return apply_host_template(self._get_resource_root(), self.name, self.clusterRef.clusterName, host_ids, start_roles)
| 4,150,363,049,570,062,300
|
Apply a host template identified by name on the specified hosts and
optionally start them.
@param host_ids: List of host ids.
@param start_roles: Whether to start the created roles or not.
@return: An ApiCommand object.
|
python/src/cm_api/endpoints/host_templates.py
|
apply_host_template
|
AnniDu/cm_api
|
python
|
def apply_host_template(self, host_ids, start_roles):
'\n Apply a host template identified by name on the specified hosts and\n optionally start them.\n @param host_ids: List of host ids.\n @param start_roles: Whether to start the created roles or not.\n @return: An ApiCommand object.\n '
return apply_host_template(self._get_resource_root(), self.name, self.clusterRef.clusterName, host_ids, start_roles)
|
async def begin_delete(self, resource_group_name: str, network_interface_name: str, **kwargs) -> AsyncLROPoller[None]:
"Deletes the specified network interface.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._delete_initial(resource_group_name=resource_group_name, network_interface_name=network_interface_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
| 8,094,789,127,094,718,000
|
Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
begin_delete
|
AriZavala2/azure-sdk-for-python
|
python
|
async def begin_delete(self, resource_group_name: str, network_interface_name: str, **kwargs) -> AsyncLROPoller[None]:
"Deletes the specified network interface.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._delete_initial(resource_group_name=resource_group_name, network_interface_name=network_interface_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
async def get(self, resource_group_name: str, network_interface_name: str, expand: Optional[str]=None, **kwargs) -> '_models.NetworkInterface':
'Gets information about the specified network interface.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param expand: Expands referenced resources.\n :type expand: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: NetworkInterface, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-07-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
if (expand is not None):
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
| -970,889,739,739,060,600
|
Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
get
|
AriZavala2/azure-sdk-for-python
|
python
|
async def get(self, resource_group_name: str, network_interface_name: str, expand: Optional[str]=None, **kwargs) -> '_models.NetworkInterface':
'Gets information about the specified network interface.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param expand: Expands referenced resources.\n :type expand: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: NetworkInterface, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-07-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
if (expand is not None):
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
async def begin_create_or_update(self, resource_group_name: str, network_interface_name: str, parameters: '_models.NetworkInterface', **kwargs) -> AsyncLROPoller['_models.NetworkInterface']:
"Creates or updates a network interface.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param parameters: Parameters supplied to the create or update network interface operation.\n :type parameters: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._create_or_update_initial(resource_group_name=resource_group_name, network_interface_name=network_interface_name, parameters=parameters, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
| 6,187,002,099,064,935,000
|
Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network interface operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
begin_create_or_update
|
AriZavala2/azure-sdk-for-python
|
python
|
async def begin_create_or_update(self, resource_group_name: str, network_interface_name: str, parameters: '_models.NetworkInterface', **kwargs) -> AsyncLROPoller['_models.NetworkInterface']:
"Creates or updates a network interface.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param parameters: Parameters supplied to the create or update network interface operation.\n :type parameters: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._create_or_update_initial(resource_group_name=resource_group_name, network_interface_name=network_interface_name, parameters=parameters, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
async def begin_update_tags(self, resource_group_name: str, network_interface_name: str, parameters: '_models.TagsObject', **kwargs) -> AsyncLROPoller['_models.NetworkInterface']:
"Updates a network interface tags.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param parameters: Parameters supplied to update network interface tags.\n :type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._update_tags_initial(resource_group_name=resource_group_name, network_interface_name=network_interface_name, parameters=parameters, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
| 3,031,527,886,410,133,500
|
Updates a network interface tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to update network interface tags.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
begin_update_tags
|
AriZavala2/azure-sdk-for-python
|
python
|
async def begin_update_tags(self, resource_group_name: str, network_interface_name: str, parameters: '_models.TagsObject', **kwargs) -> AsyncLROPoller['_models.NetworkInterface']:
"Updates a network interface tags.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param parameters: Parameters supplied to update network interface tags.\n :type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._update_tags_initial(resource_group_name=resource_group_name, network_interface_name=network_interface_name, parameters=parameters, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
def list_all(self, **kwargs) -> AsyncIterable['_models.NetworkInterfaceListResult']:
'Gets all network interfaces in a subscription.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-07-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_all.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
| -2,660,559,911,661,629,400
|
Gets all network interfaces in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
list_all
|
AriZavala2/azure-sdk-for-python
|
python
|
def list_all(self, **kwargs) -> AsyncIterable['_models.NetworkInterfaceListResult']:
'Gets all network interfaces in a subscription.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-07-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_all.metadata['url']
path_format_arguments = {'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
|
def list(self, resource_group_name: str, **kwargs) -> AsyncIterable['_models.NetworkInterfaceListResult']:
'Gets all network interfaces in a resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-07-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
| -4,022,185,835,828,412,000
|
Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
list
|
AriZavala2/azure-sdk-for-python
|
python
|
def list(self, resource_group_name: str, **kwargs) -> AsyncIterable['_models.NetworkInterfaceListResult']:
'Gets all network interfaces in a resource group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2018-07-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
|
async def begin_get_effective_route_table(self, resource_group_name: str, network_interface_name: str, **kwargs) -> AsyncLROPoller['_models.EffectiveRouteListResult']:
"Gets all route tables applied to a network interface.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either EffectiveRouteListResult or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveRouteListResult]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._get_effective_route_table_initial(resource_group_name=resource_group_name, network_interface_name=network_interface_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
| 8,000,999,916,194,996,000
|
Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
begin_get_effective_route_table
|
AriZavala2/azure-sdk-for-python
|
python
|
async def begin_get_effective_route_table(self, resource_group_name: str, network_interface_name: str, **kwargs) -> AsyncLROPoller['_models.EffectiveRouteListResult']:
"Gets all route tables applied to a network interface.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either EffectiveRouteListResult or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveRouteListResult]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._get_effective_route_table_initial(resource_group_name=resource_group_name, network_interface_name=network_interface_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
async def begin_list_effective_network_security_groups(self, resource_group_name: str, network_interface_name: str, **kwargs) -> AsyncLROPoller['_models.EffectiveNetworkSecurityGroupListResult']:
"Gets all network security groups applied to a network interface.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveNetworkSecurityGroupListResult]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._list_effective_network_security_groups_initial(resource_group_name=resource_group_name, network_interface_name=network_interface_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
| 8,632,860,345,684,696,000
|
Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveNetworkSecurityGroupListResult]
:raises ~azure.core.exceptions.HttpResponseError:
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
begin_list_effective_network_security_groups
|
AriZavala2/azure-sdk-for-python
|
python
|
async def begin_list_effective_network_security_groups(self, resource_group_name: str, network_interface_name: str, **kwargs) -> AsyncLROPoller['_models.EffectiveNetworkSecurityGroupListResult']:
"Gets all network security groups applied to a network interface.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,\n False for no polling, or your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveNetworkSecurityGroupListResult]\n :raises ~azure.core.exceptions.HttpResponseError:\n "
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = (await self._list_effective_network_security_groups_initial(resource_group_name=resource_group_name, network_interface_name=network_interface_name, cls=(lambda x, y, z: x), **kwargs))
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
def list_virtual_machine_scale_set_vm_network_interfaces(self, resource_group_name: str, virtual_machine_scale_set_name: str, virtualmachine_index: str, **kwargs) -> AsyncIterable['_models.NetworkInterfaceListResult']:
'Gets information about all network interfaces in a virtual machine in a virtual machine scale\n set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_machine_scale_set_name: The name of the virtual machine scale set.\n :type virtual_machine_scale_set_name: str\n :param virtualmachine_index: The virtual machine index.\n :type virtualmachine_index: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2017-03-30'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url('virtual_machine_scale_set_name', virtual_machine_scale_set_name, 'str'), 'virtualmachineIndex': self._serialize.url('virtualmachine_index', virtualmachine_index, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
| -555,309,031,031,284,400
|
Gets information about all network interfaces in a virtual machine in a virtual machine scale
set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
list_virtual_machine_scale_set_vm_network_interfaces
|
AriZavala2/azure-sdk-for-python
|
python
|
def list_virtual_machine_scale_set_vm_network_interfaces(self, resource_group_name: str, virtual_machine_scale_set_name: str, virtualmachine_index: str, **kwargs) -> AsyncIterable['_models.NetworkInterfaceListResult']:
'Gets information about all network interfaces in a virtual machine in a virtual machine scale\n set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_machine_scale_set_name: The name of the virtual machine scale set.\n :type virtual_machine_scale_set_name: str\n :param virtualmachine_index: The virtual machine index.\n :type virtualmachine_index: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2017-03-30'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url('virtual_machine_scale_set_name', virtual_machine_scale_set_name, 'str'), 'virtualmachineIndex': self._serialize.url('virtualmachine_index', virtualmachine_index, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
|
def list_virtual_machine_scale_set_network_interfaces(self, resource_group_name: str, virtual_machine_scale_set_name: str, **kwargs) -> AsyncIterable['_models.NetworkInterfaceListResult']:
'Gets all network interfaces in a virtual machine scale set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_machine_scale_set_name: The name of the virtual machine scale set.\n :type virtual_machine_scale_set_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2017-03-30'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url('virtual_machine_scale_set_name', virtual_machine_scale_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
| -4,041,946,283,772,427,000
|
Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
list_virtual_machine_scale_set_network_interfaces
|
AriZavala2/azure-sdk-for-python
|
python
|
def list_virtual_machine_scale_set_network_interfaces(self, resource_group_name: str, virtual_machine_scale_set_name: str, **kwargs) -> AsyncIterable['_models.NetworkInterfaceListResult']:
'Gets all network interfaces in a virtual machine scale set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_machine_scale_set_name: The name of the virtual machine scale set.\n :type virtual_machine_scale_set_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2017-03-30'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url('virtual_machine_scale_set_name', virtual_machine_scale_set_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
|
async def get_virtual_machine_scale_set_network_interface(self, resource_group_name: str, virtual_machine_scale_set_name: str, virtualmachine_index: str, network_interface_name: str, expand: Optional[str]=None, **kwargs) -> '_models.NetworkInterface':
'Get the specified network interface in a virtual machine scale set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_machine_scale_set_name: The name of the virtual machine scale set.\n :type virtual_machine_scale_set_name: str\n :param virtualmachine_index: The virtual machine index.\n :type virtualmachine_index: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param expand: Expands referenced resources.\n :type expand: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: NetworkInterface, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2017-03-30'
accept = 'application/json'
url = self.get_virtual_machine_scale_set_network_interface.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url('virtual_machine_scale_set_name', virtual_machine_scale_set_name, 'str'), 'virtualmachineIndex': self._serialize.url('virtualmachine_index', virtualmachine_index, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
if (expand is not None):
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
| -1,541,610,260,864,576,800
|
Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
get_virtual_machine_scale_set_network_interface
|
AriZavala2/azure-sdk-for-python
|
python
|
async def get_virtual_machine_scale_set_network_interface(self, resource_group_name: str, virtual_machine_scale_set_name: str, virtualmachine_index: str, network_interface_name: str, expand: Optional[str]=None, **kwargs) -> '_models.NetworkInterface':
'Get the specified network interface in a virtual machine scale set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_machine_scale_set_name: The name of the virtual machine scale set.\n :type virtual_machine_scale_set_name: str\n :param virtualmachine_index: The virtual machine index.\n :type virtualmachine_index: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param expand: Expands referenced resources.\n :type expand: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: NetworkInterface, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2017-03-30'
accept = 'application/json'
url = self.get_virtual_machine_scale_set_network_interface.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url('virtual_machine_scale_set_name', virtual_machine_scale_set_name, 'str'), 'virtualmachineIndex': self._serialize.url('virtualmachine_index', virtualmachine_index, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
if (expand is not None):
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
def list_virtual_machine_scale_set_ip_configurations(self, resource_group_name: str, virtual_machine_scale_set_name: str, virtualmachine_index: str, network_interface_name: str, expand: Optional[str]=None, **kwargs) -> AsyncIterable['_models.NetworkInterfaceIPConfigurationListResult']:
'Get the specified network interface ip configuration in a virtual machine scale set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_machine_scale_set_name: The name of the virtual machine scale set.\n :type virtual_machine_scale_set_name: str\n :param virtualmachine_index: The virtual machine index.\n :type virtualmachine_index: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param expand: Expands referenced resources.\n :type expand: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfigurationListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2017-03-30'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_virtual_machine_scale_set_ip_configurations.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url('virtual_machine_scale_set_name', virtual_machine_scale_set_name, 'str'), 'virtualmachineIndex': self._serialize.url('virtualmachine_index', virtualmachine_index, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
if (expand is not None):
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
| -2,000,844,739,091,031,600
|
Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
list_virtual_machine_scale_set_ip_configurations
|
AriZavala2/azure-sdk-for-python
|
python
|
def list_virtual_machine_scale_set_ip_configurations(self, resource_group_name: str, virtual_machine_scale_set_name: str, virtualmachine_index: str, network_interface_name: str, expand: Optional[str]=None, **kwargs) -> AsyncIterable['_models.NetworkInterfaceIPConfigurationListResult']:
'Get the specified network interface ip configuration in a virtual machine scale set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_machine_scale_set_name: The name of the virtual machine scale set.\n :type virtual_machine_scale_set_name: str\n :param virtualmachine_index: The virtual machine index.\n :type virtualmachine_index: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param expand: Expands referenced resources.\n :type expand: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfigurationListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2017-03-30'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list_virtual_machine_scale_set_ip_configurations.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url('virtual_machine_scale_set_name', virtual_machine_scale_set_name, 'str'), 'virtualmachineIndex': self._serialize.url('virtualmachine_index', virtualmachine_index, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
if (expand is not None):
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), AsyncList(list_of_elem))
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
|
async def get_virtual_machine_scale_set_ip_configuration(self, resource_group_name: str, virtual_machine_scale_set_name: str, virtualmachine_index: str, network_interface_name: str, ip_configuration_name: str, expand: Optional[str]=None, **kwargs) -> '_models.NetworkInterfaceIPConfiguration':
'Get the specified network interface ip configuration in a virtual machine scale set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_machine_scale_set_name: The name of the virtual machine scale set.\n :type virtual_machine_scale_set_name: str\n :param virtualmachine_index: The virtual machine index.\n :type virtualmachine_index: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param ip_configuration_name: The name of the ip configuration.\n :type ip_configuration_name: str\n :param expand: Expands referenced resources.\n :type expand: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: NetworkInterfaceIPConfiguration, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfiguration\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2017-03-30'
accept = 'application/json'
url = self.get_virtual_machine_scale_set_ip_configuration.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url('virtual_machine_scale_set_name', virtual_machine_scale_set_name, 'str'), 'virtualmachineIndex': self._serialize.url('virtualmachine_index', virtualmachine_index, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'ipConfigurationName': self._serialize.url('ip_configuration_name', ip_configuration_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
if (expand is not None):
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
| 4,947,045,084,881,894,000
|
Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration.
:type ip_configuration_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
get_virtual_machine_scale_set_ip_configuration
|
AriZavala2/azure-sdk-for-python
|
python
|
async def get_virtual_machine_scale_set_ip_configuration(self, resource_group_name: str, virtual_machine_scale_set_name: str, virtualmachine_index: str, network_interface_name: str, ip_configuration_name: str, expand: Optional[str]=None, **kwargs) -> '_models.NetworkInterfaceIPConfiguration':
'Get the specified network interface ip configuration in a virtual machine scale set.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_machine_scale_set_name: The name of the virtual machine scale set.\n :type virtual_machine_scale_set_name: str\n :param virtualmachine_index: The virtual machine index.\n :type virtualmachine_index: str\n :param network_interface_name: The name of the network interface.\n :type network_interface_name: str\n :param ip_configuration_name: The name of the ip configuration.\n :type ip_configuration_name: str\n :param expand: Expands referenced resources.\n :type expand: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: NetworkInterfaceIPConfiguration, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfiguration\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2017-03-30'
accept = 'application/json'
url = self.get_virtual_machine_scale_set_ip_configuration.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url('virtual_machine_scale_set_name', virtual_machine_scale_set_name, 'str'), 'virtualmachineIndex': self._serialize.url('virtualmachine_index', virtualmachine_index, 'str'), 'networkInterfaceName': self._serialize.url('network_interface_name', network_interface_name, 'str'), 'ipConfigurationName': self._serialize.url('ip_configuration_name', ip_configuration_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
if (expand is not None):
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs))
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
def supersample(clip, d, n_frames):
'Replaces each frame at time t by the mean of `n_frames` equally spaced frames\n taken in the interval [t-d, t+d]. This results in motion blur.\n '
def filter(get_frame, t):
timings = np.linspace((t - d), (t + d), n_frames)
frame_average = np.mean((1.0 * np.array([get_frame(t_) for t_ in timings], dtype='uint16')), axis=0)
return frame_average.astype('uint8')
return clip.transform(filter)
| -8,634,872,168,947,782,000
|
Replaces each frame at time t by the mean of `n_frames` equally spaced frames
taken in the interval [t-d, t+d]. This results in motion blur.
|
moviepy/video/fx/supersample.py
|
supersample
|
va6996/moviepy
|
python
|
def supersample(clip, d, n_frames):
'Replaces each frame at time t by the mean of `n_frames` equally spaced frames\n taken in the interval [t-d, t+d]. This results in motion blur.\n '
def filter(get_frame, t):
timings = np.linspace((t - d), (t + d), n_frames)
frame_average = np.mean((1.0 * np.array([get_frame(t_) for t_ in timings], dtype='uint16')), axis=0)
return frame_average.astype('uint8')
return clip.transform(filter)
|
def _get_filesystem_space_available(self) -> int:
'\n Abort a configuration -- discards any loaded config\n\n Args:\n N/A\n\n Returns:\n None\n\n Raises:\n FailedToDetermineDeviceState: if unable to fetch file filesystem bytes available\n\n '
filesystem_size_result = self.conn.send_command(command=f'dir {self.filesystem} | i bytes')
if filesystem_size_result.failed:
raise FailedToDetermineDeviceState('failed to determine space available on filesystem')
return self._post_get_filesystem_space_available(output=filesystem_size_result.result)
| 4,290,810,500,748,125,000
|
Abort a configuration -- discards any loaded config
Args:
N/A
Returns:
None
Raises:
FailedToDetermineDeviceState: if unable to fetch file filesystem bytes available
|
scrapli_cfg/platform/core/cisco_iosxe/sync_platform.py
|
_get_filesystem_space_available
|
m1009d/scrapli_cfg
|
python
|
def _get_filesystem_space_available(self) -> int:
'\n Abort a configuration -- discards any loaded config\n\n Args:\n N/A\n\n Returns:\n None\n\n Raises:\n FailedToDetermineDeviceState: if unable to fetch file filesystem bytes available\n\n '
filesystem_size_result = self.conn.send_command(command=f'dir {self.filesystem} | i bytes')
if filesystem_size_result.failed:
raise FailedToDetermineDeviceState('failed to determine space available on filesystem')
return self._post_get_filesystem_space_available(output=filesystem_size_result.result)
|
def _determine_file_prompt_mode(self) -> FilePromptMode:
'\n Determine the device file prompt mode\n\n Args:\n N/A\n\n Returns:\n FilePromptMode: enum representing file prompt mode\n\n Raises:\n FailedToDetermineDeviceState: if unable to fetch file prompt mode\n\n '
file_prompt_mode_result = self.conn.send_command(command='show run | i file prompt')
if file_prompt_mode_result.failed:
raise FailedToDetermineDeviceState('failed to determine file prompt mode')
return self._post_determine_file_prompt_mode(output=file_prompt_mode_result.result)
| 4,494,592,894,372,750,000
|
Determine the device file prompt mode
Args:
N/A
Returns:
FilePromptMode: enum representing file prompt mode
Raises:
FailedToDetermineDeviceState: if unable to fetch file prompt mode
|
scrapli_cfg/platform/core/cisco_iosxe/sync_platform.py
|
_determine_file_prompt_mode
|
m1009d/scrapli_cfg
|
python
|
def _determine_file_prompt_mode(self) -> FilePromptMode:
'\n Determine the device file prompt mode\n\n Args:\n N/A\n\n Returns:\n FilePromptMode: enum representing file prompt mode\n\n Raises:\n FailedToDetermineDeviceState: if unable to fetch file prompt mode\n\n '
file_prompt_mode_result = self.conn.send_command(command='show run | i file prompt')
if file_prompt_mode_result.failed:
raise FailedToDetermineDeviceState('failed to determine file prompt mode')
return self._post_determine_file_prompt_mode(output=file_prompt_mode_result.result)
|
def _delete_candidate_config(self) -> Response:
'\n Delete candidate config from the filesystem\n\n Args:\n N/A\n\n Returns:\n Response: response from deleting the candidate config\n\n Raises:\n N/A\n\n '
file_prompt_mode = self._determine_file_prompt_mode()
if (file_prompt_mode in (FilePromptMode.ALERT, FilePromptMode.NOISY)):
delete_events = [(f'delete {self.filesystem}{self.candidate_config_filename}', 'Delete filename'), ('', '[confirm]'), ('', '')]
else:
delete_events = [(f'delete {self.filesystem}{self.candidate_config_filename}', '[confirm]'), ('', '')]
delete_result = self.conn.send_interactive(interact_events=delete_events)
return delete_result
| -3,679,154,330,024,867,300
|
Delete candidate config from the filesystem
Args:
N/A
Returns:
Response: response from deleting the candidate config
Raises:
N/A
|
scrapli_cfg/platform/core/cisco_iosxe/sync_platform.py
|
_delete_candidate_config
|
m1009d/scrapli_cfg
|
python
|
def _delete_candidate_config(self) -> Response:
'\n Delete candidate config from the filesystem\n\n Args:\n N/A\n\n Returns:\n Response: response from deleting the candidate config\n\n Raises:\n N/A\n\n '
file_prompt_mode = self._determine_file_prompt_mode()
if (file_prompt_mode in (FilePromptMode.ALERT, FilePromptMode.NOISY)):
delete_events = [(f'delete {self.filesystem}{self.candidate_config_filename}', 'Delete filename'), (, '[confirm]'), (, )]
else:
delete_events = [(f'delete {self.filesystem}{self.candidate_config_filename}', '[confirm]'), (, )]
delete_result = self.conn.send_interactive(interact_events=delete_events)
return delete_result
|
def load_config(self, config: str, replace: bool=False, **kwargs: Any) -> ScrapliCfgResponse:
'\n Load configuration to a device\n\n Supported kwargs:\n auto_clean: automatically "clean" any data that would be in a configuration from a\n "get_config" operation that would prevent loading a config -- for example, things\n like the "Building Configuration" lines in IOSXE output, etc.. Defaults to `True`\n\n Args:\n config: string of the configuration to load\n replace: replace the configuration or not, if false configuration will be loaded as a\n merge operation\n kwargs: additional kwargs that the implementing classes may need for their platform,\n see above for iosxe supported kwargs\n\n Returns:\n ScrapliCfgResponse: response object\n\n Raises:\n N/A\n\n '
if (kwargs.get('auto_clean', True) is True):
config = self.clean_config(config=config)
response = self._pre_load_config(config=config)
config = self._prepare_load_config(config=config, replace=replace)
filesystem_bytes_available = self._get_filesystem_space_available()
self._space_available(filesystem_bytes_available=filesystem_bytes_available)
original_return_char = self.conn.comms_return_char
tcl_comms_return_char = '\r'
self.conn.acquire_priv(desired_priv='tclsh')
self.conn.comms_return_char = tcl_comms_return_char
config_result = self.conn.send_config(config=config, privilege_level='tclsh')
self.conn.acquire_priv(desired_priv=self.conn.default_desired_privilege_level)
self.conn.comms_return_char = original_return_char
return self._post_load_config(response=response, scrapli_responses=[config_result])
| -8,690,298,349,246,840,000
|
Load configuration to a device
Supported kwargs:
auto_clean: automatically "clean" any data that would be in a configuration from a
"get_config" operation that would prevent loading a config -- for example, things
like the "Building Configuration" lines in IOSXE output, etc.. Defaults to `True`
Args:
config: string of the configuration to load
replace: replace the configuration or not, if false configuration will be loaded as a
merge operation
kwargs: additional kwargs that the implementing classes may need for their platform,
see above for iosxe supported kwargs
Returns:
ScrapliCfgResponse: response object
Raises:
N/A
|
scrapli_cfg/platform/core/cisco_iosxe/sync_platform.py
|
load_config
|
m1009d/scrapli_cfg
|
python
|
def load_config(self, config: str, replace: bool=False, **kwargs: Any) -> ScrapliCfgResponse:
'\n Load configuration to a device\n\n Supported kwargs:\n auto_clean: automatically "clean" any data that would be in a configuration from a\n "get_config" operation that would prevent loading a config -- for example, things\n like the "Building Configuration" lines in IOSXE output, etc.. Defaults to `True`\n\n Args:\n config: string of the configuration to load\n replace: replace the configuration or not, if false configuration will be loaded as a\n merge operation\n kwargs: additional kwargs that the implementing classes may need for their platform,\n see above for iosxe supported kwargs\n\n Returns:\n ScrapliCfgResponse: response object\n\n Raises:\n N/A\n\n '
if (kwargs.get('auto_clean', True) is True):
config = self.clean_config(config=config)
response = self._pre_load_config(config=config)
config = self._prepare_load_config(config=config, replace=replace)
filesystem_bytes_available = self._get_filesystem_space_available()
self._space_available(filesystem_bytes_available=filesystem_bytes_available)
original_return_char = self.conn.comms_return_char
tcl_comms_return_char = '\r'
self.conn.acquire_priv(desired_priv='tclsh')
self.conn.comms_return_char = tcl_comms_return_char
config_result = self.conn.send_config(config=config, privilege_level='tclsh')
self.conn.acquire_priv(desired_priv=self.conn.default_desired_privilege_level)
self.conn.comms_return_char = original_return_char
return self._post_load_config(response=response, scrapli_responses=[config_result])
|
def save_config(self) -> Response:
'\n Save the config -- "copy run start"!\n\n Args:\n N/A\n\n Returns:\n Response: scrapli response object\n\n Raises:\n N/A\n\n '
file_prompt_mode = self._determine_file_prompt_mode()
if (file_prompt_mode == FilePromptMode.ALERT):
save_events = [('copy running-config startup-config', 'Destination filename'), ('', '')]
elif (file_prompt_mode == FilePromptMode.NOISY):
save_events = [('copy running-config startup-config', 'Source filename'), ('', 'Destination filename'), ('', '')]
else:
save_events = [('copy running-config startup-config', '')]
save_result = self.conn.send_interactive(interact_events=save_events)
return save_result
| 2,636,294,837,735,312,000
|
Save the config -- "copy run start"!
Args:
N/A
Returns:
Response: scrapli response object
Raises:
N/A
|
scrapli_cfg/platform/core/cisco_iosxe/sync_platform.py
|
save_config
|
m1009d/scrapli_cfg
|
python
|
def save_config(self) -> Response:
'\n Save the config -- "copy run start"!\n\n Args:\n N/A\n\n Returns:\n Response: scrapli response object\n\n Raises:\n N/A\n\n '
file_prompt_mode = self._determine_file_prompt_mode()
if (file_prompt_mode == FilePromptMode.ALERT):
save_events = [('copy running-config startup-config', 'Destination filename'), (, )]
elif (file_prompt_mode == FilePromptMode.NOISY):
save_events = [('copy running-config startup-config', 'Source filename'), (, 'Destination filename'), (, )]
else:
save_events = [('copy running-config startup-config', )]
save_result = self.conn.send_interactive(interact_events=save_events)
return save_result
|
def _commit_config_merge(self, file_prompt_mode: Optional[FilePromptMode]=None) -> Response:
'\n Commit the configuration in merge mode\n\n Args:\n file_prompt_mode: optionally provide the file prompt mode, if its None we will fetch it\n to decide if we need to use interactive mode or not\n\n Returns:\n Response: scrapli response object\n\n Raises:\n N/A\n\n '
if (file_prompt_mode is None):
file_prompt_mode = self._determine_file_prompt_mode()
if (file_prompt_mode == FilePromptMode.ALERT):
merge_events = [(f'copy {self.filesystem}{self.candidate_config_filename} running-config', 'Destination filename'), ('', '')]
elif (file_prompt_mode == FilePromptMode.NOISY):
merge_events = [(f'copy {self.filesystem}{self.candidate_config_filename} running-config', 'Source filename'), ('', 'Destination filename'), ('', '')]
else:
merge_events = [(f'copy {self.filesystem}{self.candidate_config_filename} running-config', '')]
commit_result = self.conn.send_interactive(interact_events=merge_events)
return commit_result
| 8,629,980,584,191,336,000
|
Commit the configuration in merge mode
Args:
file_prompt_mode: optionally provide the file prompt mode, if its None we will fetch it
to decide if we need to use interactive mode or not
Returns:
Response: scrapli response object
Raises:
N/A
|
scrapli_cfg/platform/core/cisco_iosxe/sync_platform.py
|
_commit_config_merge
|
m1009d/scrapli_cfg
|
python
|
def _commit_config_merge(self, file_prompt_mode: Optional[FilePromptMode]=None) -> Response:
'\n Commit the configuration in merge mode\n\n Args:\n file_prompt_mode: optionally provide the file prompt mode, if its None we will fetch it\n to decide if we need to use interactive mode or not\n\n Returns:\n Response: scrapli response object\n\n Raises:\n N/A\n\n '
if (file_prompt_mode is None):
file_prompt_mode = self._determine_file_prompt_mode()
if (file_prompt_mode == FilePromptMode.ALERT):
merge_events = [(f'copy {self.filesystem}{self.candidate_config_filename} running-config', 'Destination filename'), (, )]
elif (file_prompt_mode == FilePromptMode.NOISY):
merge_events = [(f'copy {self.filesystem}{self.candidate_config_filename} running-config', 'Source filename'), (, 'Destination filename'), (, )]
else:
merge_events = [(f'copy {self.filesystem}{self.candidate_config_filename} running-config', )]
commit_result = self.conn.send_interactive(interact_events=merge_events)
return commit_result
|
@Metadata.property
def name(self):
'\n Returns:\n str?: package name\n '
return self.get('name')
| 5,820,657,966,021,142,000
|
Returns:
str?: package name
|
frictionless/package.py
|
name
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def name(self):
'\n Returns:\n str?: package name\n '
return self.get('name')
|
@Metadata.property
def id(self):
'\n Returns:\n str?: package id\n '
return self.get('id')
| 8,748,339,723,644,212,000
|
Returns:
str?: package id
|
frictionless/package.py
|
id
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def id(self):
'\n Returns:\n str?: package id\n '
return self.get('id')
|
@Metadata.property
def licenses(self):
'\n Returns:\n dict?: package licenses\n '
return self.get('licenses')
| -8,123,081,130,484,642,000
|
Returns:
dict?: package licenses
|
frictionless/package.py
|
licenses
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def licenses(self):
'\n Returns:\n dict?: package licenses\n '
return self.get('licenses')
|
@Metadata.property
def profile(self):
'\n Returns:\n str: package profile\n '
return self.get('profile', config.DEFAULT_PACKAGE_PROFILE)
| 1,270,557,999,987,451,100
|
Returns:
str: package profile
|
frictionless/package.py
|
profile
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def profile(self):
'\n Returns:\n str: package profile\n '
return self.get('profile', config.DEFAULT_PACKAGE_PROFILE)
|
@Metadata.property
def title(self):
'\n Returns:\n str?: package title\n '
return self.get('title')
| -277,341,525,452,260,380
|
Returns:
str?: package title
|
frictionless/package.py
|
title
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def title(self):
'\n Returns:\n str?: package title\n '
return self.get('title')
|
@Metadata.property
def description(self):
'\n Returns:\n str?: package description\n '
return self.get('description')
| 3,847,154,689,001,268,000
|
Returns:
str?: package description
|
frictionless/package.py
|
description
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def description(self):
'\n Returns:\n str?: package description\n '
return self.get('description')
|
@Metadata.property
def homepage(self):
'\n Returns:\n str?: package homepage\n '
return self.get('homepage')
| 7,836,316,663,488,343,000
|
Returns:
str?: package homepage
|
frictionless/package.py
|
homepage
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def homepage(self):
'\n Returns:\n str?: package homepage\n '
return self.get('homepage')
|
@Metadata.property
def version(self):
'\n Returns:\n str?: package version\n '
return self.get('version')
| 7,422,947,098,319,725,000
|
Returns:
str?: package version
|
frictionless/package.py
|
version
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def version(self):
'\n Returns:\n str?: package version\n '
return self.get('version')
|
@Metadata.property
def sources(self):
'\n Returns:\n dict[]?: package sources\n '
return self.get('sources')
| 1,385,391,418,121,966,300
|
Returns:
dict[]?: package sources
|
frictionless/package.py
|
sources
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def sources(self):
'\n Returns:\n dict[]?: package sources\n '
return self.get('sources')
|
@Metadata.property
def contributors(self):
'\n Returns:\n dict[]?: package contributors\n '
return self.get('contributors')
| -3,563,835,736,333,918,000
|
Returns:
dict[]?: package contributors
|
frictionless/package.py
|
contributors
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def contributors(self):
'\n Returns:\n dict[]?: package contributors\n '
return self.get('contributors')
|
@Metadata.property
def keywords(self):
'\n Returns:\n str[]?: package keywords\n '
return self.get('keywords')
| -7,563,408,634,650,146,000
|
Returns:
str[]?: package keywords
|
frictionless/package.py
|
keywords
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def keywords(self):
'\n Returns:\n str[]?: package keywords\n '
return self.get('keywords')
|
@Metadata.property
def image(self):
'\n Returns:\n str?: package image\n '
return self.get('image')
| -6,807,587,785,999,051,000
|
Returns:
str?: package image
|
frictionless/package.py
|
image
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def image(self):
'\n Returns:\n str?: package image\n '
return self.get('image')
|
@Metadata.property
def created(self):
'\n Returns:\n str?: package created\n '
return self.get('created')
| 329,250,756,673,019,840
|
Returns:
str?: package created
|
frictionless/package.py
|
created
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property
def created(self):
'\n Returns:\n str?: package created\n '
return self.get('created')
|
@Metadata.property(cache=False, write=False)
def hashing(self):
'\n Returns:\n str: package hashing\n '
return self.__hashing
| 9,121,979,823,706,777,000
|
Returns:
str: package hashing
|
frictionless/package.py
|
hashing
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property(cache=False, write=False)
def hashing(self):
'\n Returns:\n str: package hashing\n '
return self.__hashing
|
@Metadata.property(cache=False, write=False)
def basepath(self):
'\n Returns:\n str: package basepath\n '
return self.__basepath
| -6,273,207,310,273,945,000
|
Returns:
str: package basepath
|
frictionless/package.py
|
basepath
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property(cache=False, write=False)
def basepath(self):
'\n Returns:\n str: package basepath\n '
return self.__basepath
|
@Metadata.property(cache=False, write=False)
def onerror(self):
'\n Returns:\n ignore|warn|raise: on error bahaviour\n '
return self.__onerror
| -2,288,389,652,372,572,400
|
Returns:
ignore|warn|raise: on error bahaviour
|
frictionless/package.py
|
onerror
|
augusto-herrmann/frictionless-py
|
python
|
@Metadata.property(cache=False, write=False)
def onerror(self):
'\n Returns:\n ignore|warn|raise: on error bahaviour\n '
return self.__onerror
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.