gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
""" Organisation Registry - Controllers
@author: Fran Boon
@author: Michael Howden
"""
prefix = request.controller
resourcename = request.function
if prefix not in deployment_settings.modules:
session.error = T("Module disabled!")
redirect(URL(r=request, c="default", f="index"))
# Options Menu (available in all Functions" Views)
response.menu_options = org_menu
#==============================================================================
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[prefix].name_nice
response.title = module_name
return dict(module_name=module_name)
#==============================================================================
def cluster():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
return s3_rest_controller(prefix, resourcename)
#==============================================================================
def cluster_subsector():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
return s3_rest_controller(prefix, resourcename)
#==============================================================================
def organisation():
""" RESTful CRUD controller """
rheader = lambda r: shn_org_rheader(r,
tabs = [(T("Basic Details"), None),
(T("Staff"), "staff"),
(T("Offices"), "office"),
(T("Warehouses"), "store"),
(T("Assessments"), "assess"),
(T("Projects"), "project"),
(T("Activities"), "activity"),
#(T("Tasks"), "task"),
#(T("Donors"), "organisation"),
#(T("Sites"), "site"), # Ticket 195
])
output = s3_rest_controller(prefix, resourcename, rheader=rheader)
return output
#==============================================================================
def office():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
if isinstance(request.vars.organisation_id, list):
request.vars.organisation_id = request.vars.organisation_id[0]
# Pre-processor
def prep(r):
if r.representation == "popup":
organisation = request.vars.organisation_id or session.s3.organisation_id or ""
if organisation:
table.organisation_id.default = organisation
# No point in downloading large dropdowns which we hide, so provide a smaller represent
# the update forms are not ready. when they will - uncomment this and comment the next one
#if r.method in ("create", "update"):
if r.method == "create":
table.organisation_id.requires = IS_NULL_OR(IS_ONE_OF_EMPTY(db, "org_organisation.id"))
if request.vars.organisation_id and request.vars.organisation_id != "None":
table.organisation_id.default = request.vars.organisation_id
return True
response.s3.prep = prep
rheader = lambda r: shn_org_rheader(r,
tabs = [(T("Basic Details"), None),
(T("Contact Data"), "pe_contact"),
(T("Staff"), "staff"),
])
return s3_rest_controller(prefix, resourcename, rheader=rheader)
#==============================================================================
def staff():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
# Pre-processor
def prep(r):
# No point in downloading large dropdowns which we hide, so provide a smaller represent
# the update forms are not ready. when they will - uncomment this and comment the next one
#if r.method in ("create", "update"):
if r.method == "create":
# person_id mandatory for a staff? We should allow room for vacant positions
#table.person_id.requires = IS_ONE_OF_EMPTY(db, "pr_person.id")
table.organisation_id.widget = S3AutocompleteWidget(request, "org", "organisation", post_process="load_offices(false);")
return True
response.s3.prep = prep
return s3_rest_controller(prefix, resourcename)
#==============================================================================
def donor():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
s3xrc.model.configure(table, listadd=False)
output = s3_rest_controller(prefix, resourcename)
return output
#==============================================================================
# Component Resources need these settings to be visible where they are linked from
# - so we put them outside their controller function
tablename = "%s_%s" % (prefix, "donor")
s3.crud_strings[tablename] = Storage(
title_create = ADD_DONOR,
title_display = T("Donor Details"),
title_list = T("Donors Report"),
title_update = T("Edit Donor"),
title_search = T("Search Donors"),
subtitle_create = T("Add New Donor"),
subtitle_list = T("Donors"),
label_list_button = T("List Donors"),
label_create_button = ADD_DONOR,
label_delete_button = T("Delete Donor"),
msg_record_created = T("Donor added"),
msg_record_modified = T("Donor updated"),
msg_record_deleted = T("Donor deleted"),
msg_list_empty = T("No Donors currently registered"))
#==============================================================================
def shn_org_rheader(r, tabs=[]):
""" Organisation Registry page headers """
if r.representation == "html":
if r.record is None:
# List or Create form: rheader makes no sense here
return None
rheader_tabs = shn_rheader_tabs(r, tabs)
if r.name == "organisation":
#_next = r.here()
#_same = r.same()
organisation = r.record
if organisation:
if organisation.cluster_id:
_sectors = shn_org_cluster_represent(organisation.cluster_id)
else:
_sectors = None
try:
_type = org_organisation_type_opts[organisation.type]
except KeyError:
_type = None
rheader = DIV(TABLE(
TR(
TH(T("Organization") + ": "),
organisation.name,
TH(T("Cluster(s)") + ": "),
_sectors
),
TR(
#TH(A(T("Edit Organization"),
# _href=URL(r=request, c="org", f="organisation", args=[r.id, "update"], vars={"_next": _next})))
TH(T("Type") + ": "),
_type,
)
), rheader_tabs)
return rheader
elif r.name == "office":
office = r.record
if office:
organisation = db(db.org_organisation.id == office.organisation_id).select(db.org_organisation.name, limitby=(0, 1)).first()
if organisation:
org_name = organisation.name
else:
org_name = None
rheader = DIV(TABLE(
TR(
TH(T("Name") + ": "),
office.name,
TH(T("Type") + ": "),
org_office_type_opts.get(office.type, UNKNOWN_OPT),
),
TR(
TH(T("Organization") + ": "),
org_name,
TH(T("Location") + ": "),
shn_gis_location_represent(office.location_id),
),
TR(
#TH(A(T("Edit Office"),
# _href=URL(r=request, c="org", f="office", args=[r.id, "update"], vars={"_next": _next})))
)
), rheader_tabs)
return rheader
return None
#==============================================================================
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export_output
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import nn
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# The above default is defined by TF Serving, but these next three are just
# a local convention without any special meaning.
_CLASSIFY_SERVING_KEY = 'classification'
_REGRESS_SERVING_KEY = 'regression'
_PREDICT_SERVING_KEY = 'predict'
LossAndLabels = collections.namedtuple('LossAndLabels',
['unweighted_loss', 'processed_labels'])
def _summary_key(head_name, val):
return '%s/%s' % (val, head_name) if head_name else val
class _Head(object):
"""Interface for the head/top of a model.
Given logits (or output of a hidden layer), a Head knows how to compute
predictions, loss, train_op, metrics and export outputs. It is meant to:
1. Simplify writing model_fn and to make model_fn more configurable
2. Support wide range of machine learning models. Since most heads can work
with logits, they can support DNN, RNN, Wide, Wide&Deep,
Global objectives, Gradient boosted trees and many other types
of machine learning models.
Common usage:
Here is simplified model_fn to build a DNN regression model.
```python
def _my_dnn_model_fn(features, labels, mode, params, config=None):
# Optionally your callers can pass head to model_fn as a param.
head = tf.contrib.learn.regression_head(...)
input = tf.contrib.layers.input_from_feature_columns(features, ...)
last_hidden_layer_out = tf.contrib.layers.stack(
input, tf.contrib.layers.fully_connected, [1000, 500])
logits = tf.contrib.layers.fully_connected(
last_hidden_layer_out, head.logits_dimension, activation_fn=None)
def _train_op_fn(loss):
return optimizer.minimize(loss)
return head.create_estimator_spec(
features=features,
labels=labels,
mode=mode,
logits=logits,
train_op_fn=_train_op_fn)
```
There are cases where computing and applying gradients can not be meaningfully
captured with train_op_fn we support (for example, with sync optimizer). In
such case, you can take the responsibility on your own. Here is a common
use case,
```python
estimator_spec = head.create_estimator_spec(
features=features,
labels=labels,
mode=mode,
logits=logits,
train_op_fn=tf.contrib.learn.no_op_train_fn)
if mode == model_fn.ModeKeys.TRAIN:
optimizer = ...
sync = tf.train.SyncReplicasOptimizer(opt=optimizer, ...)
update_op = tf.contrib.layers.optimize_loss(optimizer=sync,
loss=estimator_spec.loss, ...)
hooks = [sync.make_session_run_hook(is_chief)]
... upate train_op and hooks in EstimatorSpec and return
```
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def name(self):
"""The name of this head.
Returns:
A string.
"""
raise NotImplementedError('Calling an abstract method.')
@abc.abstractproperty
def logits_dimension(self):
"""Size of the last dimension of the logits `Tensor`.
Typically, logits is of shape `[batch_size, logits_dimension]`.
Returns:
The expected size of the `logits` tensor.
"""
raise NotImplementedError('Calling an abstract method.')
@abc.abstractmethod
def create_loss(self, features, mode, logits, labels):
"""Returns a loss Tensor from provided logits.
This function is designed to be used by framework developers. Almost all
users should use create_estimator_spec(), which calls this internally.
`mode` and `features` are most likely not used, but some Head
implementations may require them.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used for loss construction.
labels: Labels `Tensor`, or `dict` of same.
Returns:
A LossAndLabels that contains the `Tensor` representing the loss and
possibly processed labels (e.g. vocabulary lookup, shape manipulation,
etc.), to be extendable in the future.
"""
raise NotImplementedError('Calling an abstract method.')
@abc.abstractmethod
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""Returns `EstimatorSpec` that a model_fn can return.
Please note that,
+ All args must be passed via name.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used by the head.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss `Tensor` and returns an op
to optimize the model with the loss. This is used in TRAIN mode and
must not be None. None is allowed in other modes. If you want to
optimize loss yourself you can pass `no_op_train_fn` and then use
EstimatorSpec.loss to compute and apply gradients.
Returns:
`EstimatorSpec`.
"""
raise NotImplementedError('Calling an abstract method.')
def _maybe_expand_dim(tensor):
"""Expand the dim of `tensor` with static rank 1."""
with ops.name_scope(None, 'maybe_expand_dim', (tensor,)):
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor labels are not supported.')
static_shape = tensor.shape
if static_shape is None:
return tensor
return (array_ops.expand_dims(tensor, -1) if static_shape.ndims == 1
else tensor)
def _check_labels(labels, expected_labels_dimension):
"""Check labels type and shape."""
with ops.name_scope(None, 'labels', (labels,)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor labels are not supported.')
labels_shape = array_ops.shape(labels)
err_msg = 'labels shape must be [batch_size, {}]'.format(
expected_labels_dimension)
assert_rank = check_ops.assert_rank(labels, 2, message=err_msg)
with ops.control_dependencies([assert_rank]):
static_shape = labels.shape
if static_shape is not None:
dim1 = static_shape[1]
if (dim1 is not None) and (dim1 != expected_labels_dimension):
raise ValueError(
'Mismatched label shape. '
'Classifier configured with n_classes=%s. Received %s. '
'Suggested Fix: check your n_classes argument to the estimator '
'and/or the shape of your label.' %
(expected_labels_dimension, dim1))
assert_dimension = check_ops.assert_equal(
expected_labels_dimension, labels_shape[1], message=err_msg)
with ops.control_dependencies([assert_dimension]):
return array_ops.identity(labels, name=scope)
def _check_logits(logits, expected_logits_dimension):
"""Check logits type and shape."""
with ops.name_scope(None, 'logits', (logits,)) as scope:
logits = math_ops.to_float(logits)
logits_shape = array_ops.shape(logits)
assert_rank = check_ops.assert_rank(
logits, 2, data=[logits_shape],
message='logits shape must be [batch_size, logits_dimension]')
with ops.control_dependencies([assert_rank]):
static_shape = logits.shape
if static_shape is not None:
dim1 = static_shape[1]
if (dim1 is not None) and (dim1 != expected_logits_dimension):
raise ValueError(
'logits shape must be [batch_size, logits_dimension], got %s.' %
(static_shape,))
assert_dimension = check_ops.assert_equal(
expected_logits_dimension, logits_shape[1], data=[logits_shape],
message='logits shape must be [batch_size, logits_dimension]')
with ops.control_dependencies([assert_dimension]):
return array_ops.identity(logits, name=scope)
def _indicator_labels_mean(labels, weights=None, name=None):
with ops.name_scope(name, 'labels_mean', (labels, weights)) as scope:
labels = math_ops.to_float(labels, name='labels')
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
return metrics_lib.mean(labels, weights=weights, name=scope)
def _accuracy_baseline(labels_mean):
"""Return accuracy baseline based on labels mean.
This is the best the model could do by always predicting one class.
Args:
labels_mean: Tuple of value and update op.
Returns:
Tuple of value and update op.
"""
with ops.name_scope(None, 'accuracy_baseline', labels_mean):
value, update_op = labels_mean
return (
math_ops.maximum(value, 1. - value, name='value'),
math_ops.maximum(update_op, 1 - update_op, name='update_op'))
def _predictions_mean(predictions, weights=None, name=None):
with ops.name_scope(
name, 'predictions_mean', (predictions, weights)) as scope:
predictions = math_ops.to_float(predictions, name='predictions')
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
return metrics_lib.mean(predictions, weights=weights, name=scope)
def _auc(labels, predictions, weights=None, curve='ROC', name=None):
with ops.name_scope(name, 'auc', (predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions, name='predictions')
if labels.dtype.base_dtype != dtypes.bool:
logging.warning('Casting %s labels to bool.', labels.dtype)
labels = math_ops.cast(labels, dtypes.bool)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
return metrics_lib.auc(
labels=labels, predictions=predictions, weights=weights, curve=curve,
name=scope)
def _accuracy_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'accuracy_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.accuracy(
labels=labels, predictions=threshold_predictions, weights=weights,
name=scope)
def _precision_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'precision_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
precision_tensor, update_op = metrics_lib.precision_at_thresholds(
labels=labels, predictions=predictions, thresholds=(threshold,),
weights=weights, name=scope)
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _recall_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'recall_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
precision_tensor, update_op = metrics_lib.recall_at_thresholds(
labels=labels, predictions=predictions, thresholds=(threshold,),
weights=weights, name=scope)
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _multi_class_head_with_softmax_cross_entropy_loss(n_classes,
weight_column=None,
label_vocabulary=None,
name=None):
"""Creates a '_Head' for multi class classification.
This head expects to be fed integer labels specifying the class index.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`_BinaryLogisticHeadWithSigmoidCrossEntropyLoss`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_vocabulary: A list of strings represents possible label values. If it
is not given, that means labels are already encoded as integer within
[0, n_classes). If given, labels must be string type and have any value in
`label_vocabulary`. Also there will be errors if vocabulary is not
provided and labels are string.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`.
Returns:
An instance of `_Head` for multi class classification.
Raises:
ValueError: if `n_classes`, `metric_class_ids` or `label_keys` is invalid.
"""
if label_vocabulary is not None and not isinstance(label_vocabulary,
(list, tuple)):
raise ValueError('label_vocabulary should be a list. Given type: {}'.format(
type(label_vocabulary)))
return _MultiClassHeadWithSoftmaxCrossEntropyLoss(n_classes, weight_column,
label_vocabulary, name)
class _MultiClassHeadWithSoftmaxCrossEntropyLoss(_Head):
"""See `_multi_class_head_with_softmax_cross_entropy_loss`."""
def __init__(self,
n_classes,
weight_column=None,
label_vocabulary=None,
name=None):
if (n_classes is None) or (n_classes <= 2):
raise ValueError('n_classes must be > 2: %s.' % n_classes)
self._n_classes = n_classes
self._weight_column = weight_column
self._label_vocabulary = label_vocabulary
self._name = name
@property
def name(self):
return self._name
@property
def logits_dimension(self):
return self._n_classes
def _eval_metric_ops(self, labels, probabilities, logits,
class_ids, weights, unweighted_loss):
"""Returns the Eval metric ops."""
with ops.name_scope(
None, 'metrics',
(labels, probabilities, logits, class_ids, weights, unweighted_loss)):
keys = metric_keys.MetricKeys
metric_ops = {
# Estimator already adds a metric for loss.
# TODO(xiejw): Any other metrics?
_summary_key(self._name, keys.LOSS_MEAN):
metrics_lib.mean(
unweighted_loss, weights=weights, name=keys.LOSS_MEAN),
_summary_key(self._name, keys.ACCURACY):
metrics_lib.accuracy(
labels=labels,
predictions=class_ids,
weights=weights,
name=keys.ACCURACY),
}
return metric_ops
def _label_ids(self, labels):
"""Converts labels to integer id space."""
if self._label_vocabulary is None:
if not labels.dtype.is_integer:
raise ValueError('Labels dtype should be integer '
'Instead got %s.' % labels.dtype)
label_ids = labels
else:
if labels.dtype != dtypes.string:
raise ValueError('Labels dtype should be string if there is a '
'vocabulary. Instead got {}'.format(labels.dtype))
label_ids = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels)
return _assert_range(label_ids, self._n_classes)
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode, features # Unused for this head.
label_ids = self._label_ids(_check_labels(_maybe_expand_dim(labels), 1))
unweighted_loss = losses.sparse_softmax_cross_entropy(
labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
# Restore the squeezed dim, so unweighted_loss matches the weights shape.
return LossAndLabels(
unweighted_loss=array_ops.expand_dims(unweighted_loss, axis=(1,)),
processed_labels=label_ids)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
with ops.name_scope('head'):
logits = _check_logits(logits, self.logits_dimension)
# Predict.
pred_keys = prediction_keys.PredictionKeys
with ops.name_scope(None, 'predictions', (logits,)):
# class_ids's shape is [batch_size]
class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASS_IDS)
class_ids = array_ops.expand_dims(class_ids, axis=(1,))
if self._label_vocabulary:
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=self._label_vocabulary,
name='class_string_lookup')
classes = table.lookup(class_ids)
else:
classes = string_ops.as_string(class_ids, name='str_classes')
probabilities = nn.softmax(logits, name=pred_keys.PROBABILITIES)
predictions = {
pred_keys.LOGITS: logits,
pred_keys.PROBABILITIES: probabilities,
# Expand to [batch_size, 1]
pred_keys.CLASS_IDS: class_ids,
pred_keys.CLASSES: classes,
}
if mode == model_fn.ModeKeys.PREDICT:
batch_size = array_ops.shape(probabilities)[0]
export_class_list = self._label_vocabulary
if not export_class_list:
export_class_list = string_ops.as_string(
math_ops.range(self._n_classes))
export_output_classes = array_ops.tile(
input=array_ops.expand_dims(input=export_class_list, axis=0),
multiples=[batch_size, 1])
classifier_output = export_output.ClassificationOutput(
scores=probabilities,
# `ClassificationOutput` requires string classes.
classes=export_output_classes)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
_DEFAULT_SERVING_KEY: classifier_output,
_CLASSIFY_SERVING_KEY: classifier_output,
_PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
})
# Eval.
unweighted_loss, label_ids = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
weights = _weights(features, self._weight_column)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=label_ids,
probabilities=probabilities,
logits=logits,
class_ids=class_ids,
unweighted_loss=unweighted_loss,
weights=weights))
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(
_summary_key(self._name, metric_keys.MetricKeys.LOSS),
training_loss)
summary.scalar(
_summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),
losses.compute_weighted_loss(
unweighted_loss, weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
def _binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column=None, thresholds=None, label_vocabulary=None, name=None):
"""Creates a `Head` for single label binary classification.
This head uses `sigmoid_cross_entropy_with_logits` loss.
This head expects to be fed float labels of shape `(batch_size, 1)`.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
thresholds: Iterable of floats in the range `(0, 1)`. For binary
classification metrics such as precision and recall, an eval metric is
generated for each threshold value. This threshold is applied to the
logistic values to determine the binary classification (i.e., above the
threshold is `true`, below is `false`.
label_vocabulary: A list of strings represents possible label values. If it
is not given, that means labels are already encoded within [0, 1]. If
given, labels must be string type and have any value in
`label_vocabulary`. Also there will be errors if vocabulary is not
provided and labels are string.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`.
Returns:
An instance of `Head` for binary classification.
Raises:
ValueError: if `thresholds` contains a value outside of `(0, 1)`.
"""
thresholds = tuple(thresholds) if thresholds else tuple()
if label_vocabulary is not None and not isinstance(label_vocabulary,
(list, tuple)):
raise ValueError('label_vocabulary should be a list. Given type: {}'.format(
type(label_vocabulary)))
for threshold in thresholds:
if (threshold <= 0.0) or (threshold >= 1.0):
raise ValueError('thresholds not in (0, 1): %s.' % (thresholds,))
return _BinaryLogisticHeadWithSigmoidCrossEntropyLoss(
weight_column=weight_column,
thresholds=thresholds,
label_vocabulary=label_vocabulary,
name=name)
class _BinaryLogisticHeadWithSigmoidCrossEntropyLoss(_Head):
"""See `_binary_logistic_head_with_sigmoid_cross_entropy_loss`."""
def __init__(self,
weight_column=None,
thresholds=None,
label_vocabulary=None,
name=None):
self._weight_column = weight_column
self._thresholds = thresholds
self._label_vocabulary = label_vocabulary
self._name = name
@property
def name(self):
return self._name
@property
def logits_dimension(self):
return 1
def _eval_metric_ops(self,
labels,
logits,
logistic,
scores,
class_ids,
unweighted_loss,
weights=None):
with ops.name_scope(None, 'metrics', (labels, logits, logistic, scores,
class_ids, unweighted_loss, weights)):
keys = metric_keys.MetricKeys
labels_mean = _indicator_labels_mean(
labels=labels, weights=weights, name=keys.LABEL_MEAN)
metric_ops = {
# Estimator already adds a metric for loss.
_summary_key(self._name, keys.LOSS_MEAN):
metrics_lib.mean(
unweighted_loss, weights=weights, name=keys.LOSS_MEAN),
_summary_key(self._name, keys.ACCURACY):
metrics_lib.accuracy(
labels=labels,
predictions=class_ids,
weights=weights,
name=keys.ACCURACY),
_summary_key(self._name, keys.PREDICTION_MEAN):
_predictions_mean(
predictions=logistic,
weights=weights,
name=keys.PREDICTION_MEAN),
_summary_key(self._name, keys.LABEL_MEAN):
labels_mean,
_summary_key(self._name, keys.ACCURACY_BASELINE):
_accuracy_baseline(labels_mean),
_summary_key(self._name, keys.AUC):
_auc(
labels=labels,
predictions=logistic,
weights=weights,
name=keys.AUC),
_summary_key(self._name, keys.AUC_PR):
_auc(
labels=labels,
predictions=logistic,
weights=weights,
curve='PR',
name=keys.AUC_PR)
}
for threshold in self._thresholds:
accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
metric_ops[_summary_key(self._name,
accuracy_key)] = _accuracy_at_threshold(
labels=labels,
predictions=logistic,
weights=weights,
threshold=threshold,
name=accuracy_key)
# Precision for positive examples.
precision_key = keys.PRECISION_AT_THRESHOLD % threshold
metric_ops[_summary_key(self._name,
precision_key)] = _precision_at_threshold(
labels=labels,
predictions=logistic,
weights=weights,
threshold=threshold,
name=precision_key)
# Recall for positive examples.
recall_key = keys.RECALL_AT_THRESHOLD % threshold
metric_ops[_summary_key(self._name,
recall_key)] = _recall_at_threshold(
labels=labels,
predictions=logistic,
weights=weights,
threshold=threshold,
name=recall_key)
return metric_ops
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode, features # Unused for this head.
labels = _check_labels(_maybe_expand_dim(labels), self.logits_dimension)
if self._label_vocabulary is not None:
labels = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels)
labels = math_ops.to_float(labels)
labels = _assert_range(labels, 2)
return LossAndLabels(
unweighted_loss=nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits),
processed_labels=labels)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
# Predict.
with ops.name_scope('head'):
with ops.name_scope(None, 'predictions', (logits,)):
pred_keys = prediction_keys.PredictionKeys
logits = _check_logits(logits, self.logits_dimension)
logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
two_class_logits = array_ops.concat(
(array_ops.zeros_like(logits), logits), 1, name='two_class_logits')
scores = nn.softmax(two_class_logits, name=pred_keys.PROBABILITIES)
class_ids = array_ops.reshape(
math_ops.argmax(two_class_logits, axis=1), (-1, 1), name='classes')
if self._label_vocabulary:
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=self._label_vocabulary,
name='class_string_lookup')
classes = table.lookup(class_ids)
else:
classes = string_ops.as_string(class_ids, name='str_classes')
predictions = {
pred_keys.LOGITS: logits,
pred_keys.LOGISTIC: logistic,
pred_keys.PROBABILITIES: scores,
pred_keys.CLASS_IDS: class_ids,
pred_keys.CLASSES: classes,
}
if mode == model_fn.ModeKeys.PREDICT:
batch_size = array_ops.shape(logistic)[0]
export_class_list = self._label_vocabulary
if not export_class_list:
export_class_list = string_ops.as_string([0, 1])
export_output_classes = array_ops.tile(
input=array_ops.expand_dims(input=export_class_list, axis=0),
multiples=[batch_size, 1])
classifier_output = export_output.ClassificationOutput(
scores=scores,
# `ClassificationOutput` requires string classes.
classes=export_output_classes)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
_DEFAULT_SERVING_KEY: classifier_output,
_CLASSIFY_SERVING_KEY: classifier_output,
_REGRESS_SERVING_KEY: export_output.RegressionOutput(
value=logistic),
_PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
})
# Eval.
unweighted_loss, processed_labels = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
weights = _weights(features, self._weight_column)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=processed_labels,
logits=logits,
logistic=logistic,
scores=scores,
class_ids=class_ids,
unweighted_loss=unweighted_loss,
weights=weights))
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(
_summary_key(self._name, metric_keys.MetricKeys.LOSS),
training_loss)
summary.scalar(
_summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),
losses.compute_weighted_loss(
unweighted_loss, weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
def _regression_head_with_mean_squared_error_loss(weight_column=None,
label_dimension=1,
name=None):
"""Creates a `_Head` for regression using the mean squared loss.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`.
Returns:
An instance of `_Head` for linear regression.
"""
return _RegressionHeadWithMeanSquaredErrorLoss(
weight_column=weight_column,
label_dimension=label_dimension,
name=name)
class _RegressionHeadWithMeanSquaredErrorLoss(_Head):
"""`Head` for regression using the mean squared loss."""
def __init__(self, label_dimension, weight_column=None, name=None):
"""`Head` for regression."""
if label_dimension < 1:
raise ValueError('Invalid label_dimension %s.' % label_dimension)
self._logits_dimension = label_dimension
self._weight_column = weight_column
self._name = name
@property
def name(self):
return self._name
@property
def logits_dimension(self):
return self._logits_dimension
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode, features # Unused for this head.
labels = _check_labels(
_maybe_expand_dim(math_ops.to_float(labels)), self._logits_dimension)
return LossAndLabels(
unweighted_loss=losses.mean_squared_error(
labels=labels, predictions=logits, reduction=losses.Reduction.NONE),
processed_labels=labels)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
# Predict.
with ops.name_scope('head'):
logits = _check_logits(logits, self._logits_dimension)
predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
if mode == model_fn.ModeKeys.PREDICT:
regression_output = export_output.RegressionOutput(value=logits)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
_DEFAULT_SERVING_KEY: regression_output,
_REGRESS_SERVING_KEY: regression_output,
_PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
})
# Eval.
unweighted_loss, _ = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
weights = _weights(features, self._weight_column)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
# Estimator already adds a metric for loss.
eval_metric_ops = {
metric_keys.MetricKeys.LOSS_MEAN: metrics_lib.mean(
unweighted_loss, weights=weights)
}
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=eval_metric_ops)
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(
_summary_key(self._name, metric_keys.MetricKeys.LOSS),
training_loss)
summary.scalar(
_summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),
losses.compute_weighted_loss(
unweighted_loss, weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
def _assert_range(labels, n_classes):
with ops.name_scope(None, 'assert_range', (labels,)):
assert_less = check_ops.assert_less(
labels,
ops.convert_to_tensor(n_classes, dtype=labels.dtype),
message='Label IDs must < n_classes')
assert_greater = check_ops.assert_non_negative(
labels, message='Label IDs must >= 0')
with ops.control_dependencies((assert_less, assert_greater)):
return array_ops.identity(labels)
def _weights(features, weight_column):
"""Fetches weights from features."""
with ops.name_scope(None, 'weights', values=features.values()):
if weight_column is None:
return 1.
if isinstance(weight_column, six.string_types):
weight_column = feature_column_lib.numeric_column(key=weight_column)
if not isinstance(weight_column, feature_column_lib._NumericColumn): # pylint: disable=protected-access
raise TypeError('Weight column must be either a string or _NumericColumn.'
' Given type: {}.'.format(type(weight_column)))
weights = weight_column._get_dense_tensor( # pylint: disable=protected-access
feature_column_lib._LazyBuilder(features)) # pylint: disable=protected-access
if not (weights.dtype.is_floating or weights.dtype.is_integer):
raise ValueError('Weight column should be castable to float. '
'Given dtype: {}'.format(weights.dtype))
weights = _maybe_expand_dim(math_ops.to_float(weights, name='weights'))
return weights
| |
"""Base option parser setup"""
import sys
import optparse
import pkg_resources
import os
from distutils.util import strtobool
from pip.backwardcompat import ConfigParser, string_types
from pip.locations import default_config_file, default_log_file
class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class ConfigOptionParser(optparse.OptionParser):
"""Custom option parser which updates its defaults by by checking the
configuration files and environmental variables"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.name = kwargs.pop('name')
self.files = self.get_config_files()
self.config.read(self.files)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(dict(self.get_config_section(section)))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occured during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def get_config_section(self, name):
"""Get a section of a configuration"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='PIP_'):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
try:
pip_dist = pkg_resources.get_distribution('pip')
version = '%s from %s (python %s)' % (
pip_dist, pip_dist.location, sys.version[:3])
except pkg_resources.DistributionNotFound:
# when running pip.py without installing
version=None
parser = ConfigOptionParser(
usage='%prog COMMAND [OPTIONS]',
version=version,
add_help_option=False,
formatter=UpdatingDefaultsHelpFormatter(),
name='global')
parser.add_option(
'-h', '--help',
dest='help',
action='store_true',
help='Show help')
parser.add_option(
'-E', '--environment',
dest='venv',
metavar='DIR',
help='virtualenv environment to run pip in (either give the '
'interpreter or the environment base directory)')
parser.add_option(
'-s', '--enable-site-packages',
dest='site_packages',
action='store_true',
help='Include site-packages in virtualenv if one is to be '
'created. Ignored if --environment is not used or '
'the virtualenv already exists.')
parser.add_option(
# Defines a default root directory for virtualenvs, relative
# virtualenvs names/paths are considered relative to it.
'--virtualenv-base',
dest='venv_base',
type='str',
default='',
help=optparse.SUPPRESS_HELP)
parser.add_option(
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=optparse.SUPPRESS_HELP)
parser.add_option(
# Use automatically an activated virtualenv instead of installing
# globally. -E will be ignored if used.
'--respect-virtualenv', '--respect-venv',
dest='respect_venv',
action='store_true',
default=False,
help=optparse.SUPPRESS_HELP)
parser.add_option(
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output')
parser.add_option(
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output')
parser.add_option(
'--log',
dest='log',
metavar='FILENAME',
help='Log file where a complete (maximum verbosity) record will be kept')
parser.add_option(
# Writes the log levels explicitely to the log'
'--log-explicit-levels',
dest='log_explicit_levels',
action='store_true',
default=False,
help=optparse.SUPPRESS_HELP)
parser.add_option(
# The default log file
'--local-log', '--log-file',
dest='log_file',
metavar='FILENAME',
default=default_log_file,
help=optparse.SUPPRESS_HELP)
parser.add_option(
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=optparse.SUPPRESS_HELP)
parser.add_option(
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form user:passwd@proxy.server:port. "
"Note that the user:password@ is optional and required only if you "
"are behind an authenticated proxy. If you provide "
"user@proxy.server:port then you will be prompted for a password.")
parser.add_option(
'--timeout', '--default-timeout',
metavar='SECONDS',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds)')
parser.add_option(
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=optparse.SUPPRESS_HELP)
parser.add_option(
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=optparse.SUPPRESS_HELP)
parser.disable_interspersed_args()
| |
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
#
# progressbar - Text progressbar library for python.
# Copyright (c) 2005 Nilton Volpato
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Text progressbar library for python.
This library provides a text mode progressbar. This is tipically used
to display the progress of a long running operation, providing a
visual clue that processing is underway.
The ProgressBar class manages the progress, and the format of the line
is given by a number of widgets. A widget is an object that may
display diferently depending on the state of the progress. There are
three types of widget:
- a string, which always shows itself;
- a ProgressBarWidget, which may return a diferent value every time
it's update method is called; and
- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
expands to fill the remaining width of the line.
The progressbar module is very easy to use, yet very powerful. And
automatically supports features like auto-resizing when available.
"""
__author__ = "Nilton Volpato"
__author_email__ = "first-name dot last-name @ gmail.com"
__date__ = "2006-05-07"
__version__ = "2.2"
# Changelog
#
# 2006-05-07: v2.2 fixed bug in windows
# 2005-12-04: v2.1 autodetect terminal width, added start method
# 2005-12-04: v2.0 everything is now a widget (wow!)
# 2005-12-03: v1.0 rewrite using widgets
# 2005-06-02: v0.5 rewrite
# 2004-??-??: v0.1 first version
import sys, time
from array import array
try:
from fcntl import ioctl
import termios
except ImportError:
pass
import signal
class ProgressBarWidget(object):
"""This is an element of ProgressBar formatting.
The ProgressBar object will call it's update value when an update
is needed. It's size may change between call, but the results will
not be good if the size changes drastically and repeatedly.
"""
def update(self, pbar):
"""Returns the string representing the widget.
The parameter pbar is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made.
At least this function must be overriden."""
pass
class ProgressBarWidgetHFill(object):
"""This is a variable width element of ProgressBar formatting.
The ProgressBar object will call it's update value, informing the
width this object must the made. This is like TeX \\hfill, it will
expand to fill the line. You can use more than one in the same
line, and they will all have the same width, and together will
fill the line.
"""
def update(self, pbar, width):
"""Returns the string representing the widget.
The parameter pbar is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made. The parameter width is the total
horizontal width the widget must have.
At least this function must be overriden."""
pass
class ETA(ProgressBarWidget):
"Widget for the Estimated Time of Arrival"
def format_time(self, seconds):
return time.strftime('%H:%M:%S', time.gmtime(seconds))
def update(self, pbar):
if pbar.currval == 0:
return 'ETA: --:--:--'
elif pbar.finished:
return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
else:
elapsed = pbar.seconds_elapsed
eta = elapsed * pbar.maxval / pbar.currval - elapsed
return 'ETA: %s' % self.format_time(eta)
class FileTransferSpeed(ProgressBarWidget):
"Widget for showing the transfer speed (useful for file transfers)."
def __init__(self):
self.fmt = '%6.2f %s'
self.units = ['B','K','M','G','T','P']
def update(self, pbar):
if pbar.seconds_elapsed < 2e-6:#== 0:
bps = 0.0
else:
bps = float(pbar.currval) / pbar.seconds_elapsed
spd = bps
for u in self.units:
if spd < 1000:
break
spd /= 1000
return self.fmt % (spd, u+'/s')
class RotatingMarker(ProgressBarWidget):
"A rotating marker for filling the bar of progress."
def __init__(self, markers='|/-\\'):
self.markers = markers
self.curmark = -1
def update(self, pbar):
if pbar.finished:
return self.markers[0]
self.curmark = (self.curmark + 1)%len(self.markers)
return self.markers[self.curmark]
class Percentage(ProgressBarWidget):
"Just the percentage done."
def update(self, pbar):
return '%3d%%' % pbar.percentage()
class Bar(ProgressBarWidgetHFill):
"The bar of progress. It will strech to fill the line."
def __init__(self, marker='#', left='|', right='|'):
self.marker = marker
self.left = left
self.right = right
def _format_marker(self, pbar):
if isinstance(self.marker, (str, unicode)):
return self.marker
else:
return self.marker.update(pbar)
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = width - len(self.left) - len(self.right)
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
return bar
class ReverseBar(Bar):
"The reverse bar of progress, or bar of regress. :)"
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = width - len(self.left) - len(self.right)
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
return bar
default_widgets = [Percentage(), ' ', Bar()]
class ProgressBar(object):
"""This is the ProgressBar class, it updates and prints the bar.
The term_width parameter may be an integer. Or None, in which case
it will try to guess it, if it fails it will default to 80 columns.
The simple use is like this:
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
But anything you want to do is possible (well, almost anything).
You can supply different widgets of any type in any order. And you
can even write your own widgets! There are many widgets already
shipped and you should experiment with them.
When implementing a widget update method you may access any
attribute or function of the ProgressBar object calling the
widget's update method. The most important attributes you would
like to access are:
- currval: current value of the progress, 0 <= currval <= maxval
- maxval: maximum (and final) value of the progress
- finished: True if the bar is have finished (reached 100%), False o/w
- start_time: first time update() method of ProgressBar was called
- seconds_elapsed: seconds elapsed since start_time
- percentage(): percentage of the progress (this is a method)
"""
def __init__(self, maxval=100, widgets=default_widgets, term_width=None,
fd=sys.stderr):
assert maxval > 0
self.maxval = maxval
self.widgets = widgets
self.fd = fd
self.signal_set = False
if term_width is None:
try:
self.handle_resize(None,None)
signal.signal(signal.SIGWINCH, self.handle_resize)
self.signal_set = True
except:
self.term_width = 79
else:
self.term_width = term_width
self.currval = 0
self.finished = False
self.prev_percentage = -1
self.start_time = None
self.seconds_elapsed = 0
def handle_resize(self, signum, frame):
h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
self.term_width = w
def percentage(self):
"Returns the percentage of the progress."
return self.currval*100.0 / self.maxval
def _format_widgets(self):
r = []
hfill_inds = []
num_hfill = 0
currwidth = 0
for i, w in enumerate(self.widgets):
if isinstance(w, ProgressBarWidgetHFill):
r.append(w)
hfill_inds.append(i)
num_hfill += 1
elif isinstance(w, (str, unicode)):
r.append(w)
currwidth += len(w)
else:
weval = w.update(self)
currwidth += len(weval)
r.append(weval)
for iw in hfill_inds:
r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
return r
def _format_line(self):
return ''.join(self._format_widgets()).ljust(self.term_width)
def _need_update(self):
return int(self.percentage()) != int(self.prev_percentage)
def update(self, value):
"Updates the progress bar to a new value."
assert 0 <= value <= self.maxval
self.currval = value
if not self._need_update() or self.finished:
return
if not self.start_time:
self.start_time = time.time()
self.seconds_elapsed = time.time() - self.start_time
self.prev_percentage = self.percentage()
if value != self.maxval:
self.fd.write(self._format_line() + '\r')
else:
self.finished = True
self.fd.write(self._format_line() + '\n')
def start(self):
"""Start measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
"""
self.update(0)
return self
def finish(self):
"""Used to tell the progress is finished."""
self.update(self.maxval)
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
if __name__=='__main__':
import os
def example1():
widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example2():
class CrazyFileTransferSpeed(FileTransferSpeed):
"It's bigger between 45 and 80 percent"
def update(self, pbar):
if 45 < pbar.percentage() < 80:
return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
else:
return FileTransferSpeed.update(self,pbar)
widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=10000000)
# maybe do something
pbar.start()
for i in range(2000000):
# do something
pbar.update(5*i+1)
pbar.finish()
print
def example3():
widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example4():
widgets = ['Test: ', Percentage(), ' ',
Bar(marker='0',left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=500)
pbar.start()
for i in range(100,500+1,50):
time.sleep(0.2)
pbar.update(i)
pbar.finish()
print
example1()
example2()
example3()
example4()
# def timedProgressBar(time_in_secs):
# widgets = ['Running TRex: ', Percentage(), ' ',
# Bar(marker='>',left='[',right=']'),
# ' ', ETA()]
# pbar = ProgressBar(widgets=widgets, maxval=time_in_secs*2)
# pbar.start()
# for i in range(0, time_in_secs*2 + 1):
# time.sleep(0.5)
# pbar.update(i)
# pbar.finish()
# print
# timedProgressBar(20)
| |
# Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Cloud Keras Tuner."""
import os
import time
from keras_tuner.engine import hypermodel as hypermodel_module
from keras_tuner.engine import hyperparameters as hp_module
from keras_tuner.engine import oracle as oracle_module
from keras_tuner.engine import trial as trial_module
from keras_tuner.engine import tuner as super_tuner
import mock
import tensorflow as tf
from tensorboard.plugins.hparams import api as hparams_api
from tensorflow_cloud.core import deploy
from tensorflow_cloud.core import machine_config
from tensorflow_cloud.core import validate
from tensorflow_cloud.tuner import cloud_fit_client
from tensorflow_cloud.tuner import tuner
from tensorflow_cloud.tuner.tuner import vizier_client
from tensorflow_cloud.utils import google_api_client
from tensorflow_cloud.utils import tf_utils
def build_model(hp):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(28, 28)))
model.add(tf.keras.layers.Dense(10, activation="softmax"))
model.compile(
optimizer=tf.keras.optimizers.Adam(
hp.Choice("learning_rate", [0.0001, 0.001, 0.01])
),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
class CloudTunerTest(tf.test.TestCase):
def setUp(self):
super(CloudTunerTest, self).setUp()
self.addCleanup(mock.patch.stopall)
self._study_id = "study-a"
self._region = "us-central1"
self._remote_dir = "gs://remote_dir"
self._project_id = "project-a"
self._trial_parent = (
f"projects/{self._project_id}/locations/{self._region}/studies/"
f"{self._study_id}")
self._container_uri = "test_container_uri",
hps = hp_module.HyperParameters()
hps.Choice("learning_rate", [1e-4, 1e-3, 1e-2])
self._test_hyperparameters = hps
self._study_config = {
"algorithm": "ALGORITHM_UNSPECIFIED",
"metrics": [{"metric": "val_acc", "goal": "MAXIMIZE"}],
"parameters": [
{
"parameter": "learning_rate",
"discrete_value_spec": {"values": [0.0001, 0.001, 0.01]},
"type": "DISCRETE",
}
],
"automatedStoppingConfig": {
"decayCurveStoppingConfig": {"useElapsedTime": True}
},
}
self._test_trial = trial_module.Trial(
hyperparameters=self._test_hyperparameters,
trial_id="1",
status=trial_module.TrialStatus.RUNNING,
)
self._job_id = f"{self._study_id}_{self._test_trial.trial_id}"
self.mock_vizier_client_module = mock.patch.object(
tuner, "vizier_client", autospec=True
).start()
self.mock_client = mock.create_autospec(
vizier_client._VizierClient)
self.mock_vizier_client_module.create_or_load_study.return_value = (
self.mock_client
)
def _tuner_with_hparams(self):
self.tuner = self._tuner(
objective=oracle_module.Objective("val_acc", "max"),
hyperparameters=self._test_hyperparameters,
study_config=None,
)
def _tuner(self, objective, hyperparameters, study_config, max_trials=None):
return tuner.CloudTuner(
hypermodel=build_model,
objective=objective,
study_config=study_config,
hyperparameters=hyperparameters,
max_trials=max_trials,
project_id=self._project_id,
region=self._region,
study_id=self._study_id,
directory=self.get_temp_dir(),
)
def _remote_tuner(
self,
objective,
hyperparameters,
study_config,
directory=None,
max_trials=None
):
directory = directory or self._remote_dir
return tuner.DistributingCloudTuner(
hypermodel=build_model,
objective=objective,
study_config=study_config,
hyperparameters=hyperparameters,
max_trials=max_trials,
project_id=self._project_id,
region=self._region,
directory=directory,
study_id=self._study_id,
container_uri=self._container_uri
)
def test_tuner_initialization_with_hparams(self):
self._tuner_with_hparams()
(self.mock_vizier_client_module.create_or_load_study
.assert_called_with(self._project_id,
self._region,
self._study_id,
self._study_config))
def test_tuner_initialization_with_study_config(self):
self.tuner = self._tuner(None, None, self._study_config)
(self.mock_vizier_client_module.create_or_load_study
.assert_called_with(self._project_id,
self._region,
self._study_id,
self._study_config))
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
def test_remote_tuner_initialization_with_study_config(self, mock_super):
self._remote_tuner(None, None, self._study_config)
(self.mock_vizier_client_module.create_or_load_study
.assert_called_with(self._project_id,
self._region,
self._study_id,
self._study_config))
def test_tuner_initialization_neither_hparam_nor_study_config(self):
with self.assertRaises(ValueError):
_ = self._tuner(None, None, None)
def test_tuner_initialization_with_hparams_missing_objectives(self):
with self.assertRaises(ValueError):
_ = self._tuner(None, self._test_hyperparameters, None)
def test_tuner_initialization_both_hparam_and_study_config(self):
with self.assertRaises(ValueError):
_ = self._tuner(
oracle_module.Objective("value_acc", "max"),
self._test_hyperparameters,
self._study_config,
)
def test_tuner_initialization_with_study_config_and_max_trials(self):
self.tuner = self._tuner(None, None, self._study_config, max_trials=100)
(self.mock_vizier_client_module.create_or_load_study
.assert_called_with(self._project_id,
self._region,
self._study_id,
self._study_config))
def test_create_trial_initially(self):
self._tuner_with_hparams()
self.mock_client.list_trials.return_value = []
self.mock_client.get_suggestions.return_value = [
{
"name": "1",
"state": "ACTIVE",
"parameters":
[{"parameter": "learning_rate", "floatValue": 0.001}],
}
]
trial = self.tuner.oracle.create_trial("tuner_0")
self.mock_client.list_trials.assert_called_once()
self.mock_client.get_suggestions.assert_called_with("tuner_0")
self.assertEqual(trial.hyperparameters.values, {"learning_rate": 0.001})
def test_create_trial_before_reaching_max_trials(self):
self.tuner = self._tuner(None, None, self._study_config, max_trials=100)
self.mock_client.list_trials.return_value = [
{"name": "a", "state": "ACTIVE"}
] * 50
self.mock_client.get_suggestions.return_value = [
{
"name": "1",
"state": "ACTIVE",
"parameters":
[{"parameter": "learning_rate", "floatValue": 0.001}],
}
]
trial = self.tuner.oracle.create_trial("tuner_0")
self.mock_client.list_trials.assert_called_once()
self.mock_client.get_suggestions.assert_called_with("tuner_0")
self.assertEqual(trial.hyperparameters.values, {"learning_rate": 0.001})
def test_create_trial_reaching_max_trials(self):
self.tuner = self._tuner(None, None, self._study_config, max_trials=100)
self.mock_client.list_trials.return_value = [
{"name": "a", "state": "ACTIVE"}
] * 100
trial = self.tuner.oracle.create_trial("tuner_0")
self.mock_client.list_trials.assert_called_once()
self.assertEqual(trial.hyperparameters.values, {})
self.assertEqual(trial.status, trial_module.TrialStatus.STOPPED)
def test_create_trial_after_early_stopping(self):
self._tuner_with_hparams()
self.mock_client.list_trials.return_value = [
{"name": "a", "state": "STOPPING"}]
trial = self.tuner.oracle.create_trial("tuner_0")
self.mock_client.list_trials.assert_called_once()
self.assertEqual(trial.hyperparameters.values, {})
self.assertEqual(trial.status, trial_module.TrialStatus.STOPPED)
@mock.patch.object(oracle_module.Oracle, "update_trial", autospec=True)
def test_update_trial(self, mock_super_update_trial):
self._tuner_with_hparams()
self.mock_client.should_trial_stop.return_value = True
mock_time = mock.patch.object(time, "time", autospec=True).start()
mock_time.return_value = 1000
self.tuner.oracle._start_time = 10
self.tuner.oracle.trials = {"1": self._test_trial}
status = self.tuner.oracle.update_trial(
trial_id="1", metrics={"val_acc": 0.8}, step=3
)
(self.mock_client.report_intermediate_objective_value
.assert_called_once_with(
3, # step
990, # elapsed_secs
[{"metric": "val_acc", "value": 0.8}], # metrics_list
"1", # trial_id,
)
)
self.mock_client.should_trial_stop.assert_called_once_with("1")
self.assertEqual(status, trial_module.TrialStatus.STOPPED)
def test_end_trial_success(self):
self._tuner_with_hparams()
self.mock_client.complete_trial.return_value = {
"name": "1",
"state": "COMPLETED",
"parameters": [{"parameter": "learning_rate", "floatValue": 0.01}],
"finalMeasurement": {
"stepCount": "3",
"metrics": [{"metric": "val_acc", "value": 0.7}],
},
"trial_infeasible": False,
"infeasible_reason": None,
}
mock_save_trial = mock.Mock()
self.tuner.oracle._save_trial = mock_save_trial
self.tuner.oracle.ongoing_trials = {"tuner_0": self._test_trial}
expected_trial = trial_module.Trial(
hyperparameters=self._test_hyperparameters,
trial_id="1",
status=trial_module.TrialStatus.COMPLETED,
)
expected_trial.best_step = 3
expected_trial.score = 0.7
self.tuner.oracle.end_trial(trial_id="1")
self.mock_client.complete_trial.assert_called_once_with(
"1", False, None)
self.assertEqual(repr(mock_save_trial.call_args[0][0].get_state()),
repr(expected_trial.get_state()))
def test_end_trial_infeasible_trial(self):
self._tuner_with_hparams()
# Return value from complete_trial is irrelevant to this test case.
self.mock_client.complete_trial.return_value = {"dummy": "trial"}
self.tuner.oracle.ongoing_trials = {"tuner_0": self._test_trial}
self.tuner.oracle.end_trial(trial_id="1", status="INVALID")
self.mock_client.complete_trial.assert_called_once_with(
"1", True, "INVALID")
def test_end_trial_invalid_trial(self):
self._tuner_with_hparams()
self.tuner.oracle.ongoing_trials = {"tuner_0": self._test_trial}
with self.assertRaises(ValueError):
self.tuner.oracle.end_trial(trial_id="2")
def test_end_trial_invalid_status(self):
self._tuner_with_hparams()
self.tuner.oracle.ongoing_trials = {"tuner_0": self._test_trial}
with self.assertRaises(ValueError):
self.tuner.oracle.end_trial(trial_id="1", status="FOO")
def test_get_best_trials(self):
self._tuner_with_hparams()
self.mock_client.list_trials.return_value = [
{
"name": "1",
"state": "COMPLETED",
"parameters":
[{"parameter": "learning_rate", "floatValue": 0.01}],
"finalMeasurement": {
"stepCount": "3",
"metrics": [{"metric": "val_acc", "value": 0.7}],
},
"trial_infeasible": False,
"infeasible_reason": None,
},
{
"name": "2",
"state": "COMPLETED",
"parameters":
[{"parameter": "learning_rate", "floatValue": 0.001}],
"finalMeasurement": {
"stepCount": "3",
"metrics": [{"metric": "val_acc", "value": 0.9}],
},
"trial_infeasible": False,
"infeasible_reason": None,
},
]
trials = self.tuner.oracle.get_best_trials(num_trials=2)
self.mock_client.list_trials.assert_called_once()
self.assertEqual(len(trials), 2)
self.assertEqual(trials[0].trial_id, "2")
self.assertEqual(trials[1].trial_id, "1")
self.assertEqual(trials[0].score, 0.9)
self.assertEqual(trials[0].best_step, 3)
def test_get_best_trials_multi_tuners(self):
# Instantiate tuner_1
tuner_1 = self._tuner(
objective=oracle_module.Objective("val_acc", "max"),
hyperparameters=self._test_hyperparameters,
study_config=None,
)
tuner_1.tuner_id = "tuner_1"
# tuner_1 has a completed trial
trial_1 = trial_module.Trial(
hyperparameters=self._test_hyperparameters,
trial_id="1",
status=trial_module.TrialStatus.COMPLETED,
)
tuner_1.oracle.trials = {"1": trial_1}
# Instantiate tuner_2
tuner_2 = self._tuner(
objective=oracle_module.Objective("val_acc", "max"),
hyperparameters=self._test_hyperparameters,
study_config=None,
)
tuner_2.tuner_id = "tuner_2"
# tuner_2 has a completed trial
trial_2 = trial_module.Trial(
hyperparameters=self._test_hyperparameters,
trial_id="2",
status=trial_module.TrialStatus.COMPLETED,
)
tuner_2.oracle.trials = {"2": trial_2}
self.mock_client.list_trials.return_value = [
{
"name": "1",
"state": "COMPLETED",
"parameters":
[{"parameter": "learning_rate", "floatValue": 0.01}],
"finalMeasurement": {
"stepCount": "3",
"metrics": [{"metric": "val_acc", "value": 0.7}],
},
"trial_infeasible": False,
"infeasible_reason": None,
},
{
"name": "2",
"state": "COMPLETED",
"parameters":
[{"parameter": "learning_rate", "floatValue": 0.001}],
"finalMeasurement": {
"stepCount": "3",
"metrics": [{"metric": "val_acc", "value": 0.9}],
},
"trial_infeasible": False,
"infeasible_reason": None,
},
]
# For any tuner worker who tries to get the best trials, all the top N
# sorted trials will be returned.
best_trials_1 = tuner_1.oracle.get_best_trials(num_trials=2)
self.mock_client.list_trials.assert_called_once()
best_trials_2 = tuner_2.oracle.get_best_trials(num_trials=2)
self.assertEqual(len(best_trials_1), 2)
self.assertEqual(best_trials_1[0].trial_id, best_trials_2[0].trial_id)
self.assertEqual(best_trials_1[1].trial_id, best_trials_2[1].trial_id)
self.assertEqual(best_trials_1[0].score, 0.9)
self.assertEqual(best_trials_1[0].best_step, 3)
def test_get_single_objective(self):
self._tuner_with_hparams()
self.assertEqual([self.tuner.oracle.objective],
self.tuner.oracle._get_objective())
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
@mock.patch.object(tf.summary, "create_file_writer", autospec=True)
@mock.patch.object(hparams_api, "hparams", autospec=True)
def test_add_logging_user_specified(
self, mock_hparams, mock_create_file_writer, mock_super_tuner):
remote_tuner = self._remote_tuner(None, None, self._study_config)
callbacks = [tf.keras.callbacks.TensorBoard(
log_dir=remote_tuner.directory,
write_images=True)]
remote_tuner._add_logging(callbacks, self._test_trial)
expected_logdir = os.path.join(
remote_tuner.directory, self._test_trial.trial_id, "logs")
expected_hparams = {hparams_api.HParam(
"learning_rate", hparams_api.Discrete([1e-4, 1e-3, 1e-2])): 1e-4}
self.assertLen(callbacks, 1)
self.assertEqual(callbacks[0].log_dir, expected_logdir)
self.assertEqual(callbacks[0].write_images, True)
mock_create_file_writer.assert_called_once_with(expected_logdir)
self.assertEqual(mock_hparams.call_count, 1)
self.assertEqual(
repr(mock_hparams.call_args[0][0]), repr(expected_hparams))
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
@mock.patch.object(tf.summary, "create_file_writer", autospec=True)
@mock.patch.object(hparams_api, "hparams", autospec=True)
def test_add_logging_not_specified(
self, mock_hparams, mock_create_file_writer, mock_super_tuner):
remote_tuner = self._remote_tuner(None, None, self._study_config)
callbacks = []
remote_tuner._add_logging(callbacks, self._test_trial)
expected_logdir = os.path.join(
remote_tuner.directory, self._test_trial.trial_id, "logs")
self.assertLen(callbacks, 1)
self.assertEqual(callbacks[0].log_dir, expected_logdir)
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
@mock.patch.object(tf.summary, "create_file_writer", autospec=True)
@mock.patch.object(hparams_api, "hparams", autospec=True)
def test_add_logging_mismatched_dir(
self, mock_hparams, mock_create_file_writer, mock_super_tuner):
remote_tuner = self._remote_tuner(None, None, self._study_config)
callbacks = [tf.keras.callbacks.TensorBoard(
log_dir=os.path.join(remote_tuner.directory, "logs"))]
with self.assertRaisesRegex(
ValueError, "log_dir in TensorBoard callback should be "
"gs://remote_dir, but was gs://remote_dir/logs"):
remote_tuner._add_logging(callbacks, self._test_trial)
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
def test_add_model_checkpoint_callback(self, mock_super_tuner):
remote_tuner = self._remote_tuner(None, None, self._study_config)
callbacks = []
trial_id = "test_trial_id"
remote_tuner._add_model_checkpoint_callback(callbacks, trial_id)
self.assertLen(callbacks, 1)
self.assertEqual(
callbacks[0].filepath,
os.path.join(remote_tuner.directory, trial_id, "checkpoint"))
# TODO(b/175906531): Set autospec=True once correct args are passed.
@mock.patch.object(cloud_fit_client, "cloud_fit", autospec=False)
@mock.patch.object(google_api_client,
"wait_for_aip_training_job_completion", autospec=True)
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
@mock.patch.object(google_api_client, "is_aip_training_job_running",
autospec=True)
@mock.patch.object(tf_utils, "get_tensorboard_log_watcher_from_path",
autospec=True)
@mock.patch.object(tf.io.gfile, "makedirs", autospec=True)
def test_remote_run_trial_with_successful_job(
self, mock_tf_io, mock_log_watcher, mock_is_running, mock_super_tuner,
mock_job_status, mock_cloud_fit):
remote_tuner = self._remote_tuner(
None, None, self._study_config, max_trials=10)
mock_is_running.side_effect = [True, False]
remote_dir = os.path.join(
remote_tuner.directory, str(self._test_trial.trial_id))
mock_job_status.return_value = True
remote_tuner._get_remote_training_metrics = mock.Mock()
remote_tuner._get_remote_training_metrics.return_value = (
tuner._TrainingMetrics([{"loss": 0.001}], {}))
remote_tuner.oracle = mock.Mock()
remote_tuner.oracle.update_trial = mock.Mock()
remote_tuner.hypermodel = mock.Mock()
remote_tuner.run_trial(
self._test_trial, "fit_arg",
callbacks=["test_call_back"], fit_kwarg=1)
self.assertEqual(2, remote_tuner.oracle.update_trial.call_count)
mock_cloud_fit.assert_called_with(
"fit_arg",
fit_kwarg=1,
model=mock.ANY,
callbacks=["test_call_back", mock.ANY, mock.ANY],
remote_dir=remote_dir,
job_spec=mock.ANY,
region=self._region,
project_id=self._project_id,
image_uri=self._container_uri,
job_id=self._job_id)
train_log_path = os.path.join(remote_tuner._get_tensorboard_log_dir(
self._test_trial.trial_id), "train")
mock_log_watcher.assert_called_with(train_log_path)
self.assertEqual(
2, remote_tuner._get_remote_training_metrics.call_count)
mock_tf_io.assert_called_with(train_log_path)
# TODO(b/175906531): Set autospec=True once correct args are passed.
@mock.patch.object(cloud_fit_client, "cloud_fit", autospec=False)
@mock.patch.object(google_api_client,
"wait_for_aip_training_job_completion", autospec=True)
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
@mock.patch.object(google_api_client, "is_aip_training_job_running",
autospec=True)
@mock.patch.object(tf.io.gfile, "makedirs", autospec=True)
def test_remote_run_trial_with_failed_job(
self, mock_tf_io, mock_is_running, mock_super_tuner, mock_job_status,
mock_cloud_fit):
remote_tuner = self._remote_tuner(
None, None, self._study_config, max_trials=10)
mock_is_running.return_value = False
remote_tuner.hypermodel = mock.Mock()
mock_job_status.return_value = False
with self.assertRaises(RuntimeError):
remote_tuner.run_trial(
self._test_trial, "fit_arg",
callbacks=["test_call_back"], fit_kwarg=1)
@mock.patch.object(google_api_client, "stop_aip_training_job",
autospec=True)
# TODO(b/175906531): Set autospec=True once correct args are passed.
@mock.patch.object(cloud_fit_client, "cloud_fit", autospec=False)
@mock.patch.object(google_api_client,
"wait_for_aip_training_job_completion", autospec=True)
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
@mock.patch.object(google_api_client, "is_aip_training_job_running",
autospec=True)
@mock.patch.object(tf.io.gfile, "makedirs", autospec=True)
def test_remote_run_trial_with_oracle_canceling_job(
self, mock_tf_io, mock_is_running, mock_super_tuner,
mock_job_status, mock_cloud_fit, mock_stop_job):
remote_tuner = self._remote_tuner(
None, None, self._study_config, max_trials=10)
mock_is_running.side_effect = [True, False]
mock_job_status.return_value = True
remote_tuner._get_remote_training_metrics = mock.Mock()
remote_tuner._get_remote_training_metrics.return_value = (
tuner._TrainingMetrics([{"loss": 0.001}], {}))
remote_tuner.oracle = mock.create_autospec(
oracle_module.Oracle, instance=True, spec_set=True)
remote_tuner.oracle.update_trial = mock.Mock()
remote_tuner.oracle.update_trial.return_value = "STOPPED"
remote_tuner.hypermodel = mock.create_autospec(
hypermodel_module.HyperModel, instance=True, spec_set=True)
remote_tuner.run_trial(
self._test_trial, "fit_arg",
callbacks=["test_call_back"], fit_kwarg=1)
self.assertEqual(2, remote_tuner.oracle.update_trial.call_count)
self.assertEqual(
2, remote_tuner._get_remote_training_metrics.call_count)
mock_stop_job.assert_called_once_with(self._job_id, self._project_id)
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
def test_get_remote_training_metrics(self, mock_super_tuner):
remote_tuner = self._remote_tuner(
None, None, self._study_config, max_trials=10)
remote_tuner.directory = self.get_temp_dir()
log_dir = os.path.join(
remote_tuner.directory, str(self._test_trial.trial_id), "logs")
with tf.summary.create_file_writer(log_dir).as_default():
tf.summary.scalar(name="epoch_loss", data=0.1, step=0)
tf.summary.scalar(name="epoch_accuracy", data=0.2, step=0)
tf.summary.scalar(name="epoch_loss", data=0.3, step=1)
tf.summary.scalar(name="epoch_accuracy", data=0.4, step=1)
tf.summary.scalar(name="epoch_loss", data=0.5, step=2)
tf.summary.scalar(name="epoch_accuracy", data=0.6, step=2)
log_reader = tf_utils.get_tensorboard_log_watcher_from_path(log_dir)
results = remote_tuner._get_remote_training_metrics(log_reader, {})
self.assertLen(results.completed_epoch_metrics, 2)
self.assertIn("accuracy", results.completed_epoch_metrics[0])
self.assertIn("loss", results.completed_epoch_metrics[0])
self.assertEqual(
results.completed_epoch_metrics[0].get("loss"), tf.constant(0.1))
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
def test_remote_load_model(self, mock_super_tuner):
remote_tuner = self._remote_tuner(
None, None, self._study_config, max_trials=10)
with self.assertRaises(NotImplementedError):
remote_tuner.load_model(self._test_trial)
@mock.patch.object(super_tuner.Tuner, "save_model", autospec=True)
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
def test_remote_save_model(self, mock_super_tuner, mock_super_save_model):
remote_tuner = self._remote_tuner(
None, None, self._study_config, max_trials=10)
remote_tuner.save_model(self._test_trial.trial_id, mock.Mock(), step=0)
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
def test_init_with_non_gcs_directory_path(self, mock_super_tuner):
with self.assertRaisesRegex(
ValueError, "Directory must be a valid Google Cloud Storage path."):
self._remote_tuner(
None, None, self._study_config, max_trials=10,
directory="local_path")
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
@mock.patch.object(deploy, "_create_request_dict", autospec=True)
@mock.patch.object(validate, "_validate_cluster_config", autospec=True)
def test_get_job_spec_with_default_config(
self, mock_validate, mock_create_request, mock_super_tuner):
remote_tuner = self._remote_tuner(
None, None, self._study_config)
# Expected worker configuration based on replica setting
worker_count = 0
worker_config = None
remote_tuner._get_job_spec_from_config(self._job_id)
mock_validate.assert_called_with(
chief_config=remote_tuner._replica_config,
worker_count=worker_count,
worker_config=worker_config,
docker_parent_image=remote_tuner._container_uri)
mock_create_request.assert_called_with(
job_id=self._job_id,
region=remote_tuner._region,
image_uri=remote_tuner._container_uri,
chief_config=remote_tuner._replica_config,
worker_count=worker_count,
worker_config=worker_config,
entry_point_args=None,
job_labels=None,
service_account=None)
@mock.patch.object(super_tuner.Tuner, "__init__", autospec=True)
@mock.patch.object(deploy, "_create_request_dict", autospec=True)
@mock.patch.object(validate, "_validate_cluster_config", autospec=True)
def test_get_job_spec_with_default_with_custom_config(
self, mock_validate, mock_create_request, mock_super_tuner):
remote_tuner = self._remote_tuner(None, None, self._study_config)
replica_config = machine_config.COMMON_MACHINE_CONFIGS["K80_1X"]
replica_count = 2
remote_tuner._replica_config = replica_config
remote_tuner._replica_count = replica_count
# Expected worker configuration based on replica setting
worker_count = 1
worker_config = replica_config
remote_tuner._get_job_spec_from_config(self._job_id)
mock_validate.assert_called_with(
chief_config=replica_config,
worker_count=worker_count,
worker_config=worker_config,
docker_parent_image=remote_tuner._container_uri)
mock_create_request.assert_called_with(
job_id=self._job_id,
region=remote_tuner._region,
image_uri=remote_tuner._container_uri,
chief_config=replica_config,
worker_count=worker_count,
worker_config=replica_config,
entry_point_args=None,
job_labels=None,
service_account=None)
if __name__ == "__main__":
tf.test.main()
| |
# TODO: is Lock still required?
import select
from threading import Lock
import time
import six
import pycurl
from grab.util.log import PycurlSigintHandler
from grab.error import GrabTooManyRedirectsError
from grab.transport.curl import build_grab_exception
from grab.spider.base_service import BaseService
ERROR_TOO_MANY_REDIRECTS = -2
# Source: https://curl.haxx.se/libcurl/c/libcurl-errors.html
ERRNUM_PYCURL_TAG = {
0: 'E_OK',
1: 'E_UNSUPPORTED_PROTOCOL',
2: 'E_FAILED_INIT',
3: 'E_URL_MALFORMAT',
4: 'E_NOT_BUILT_IN',
5: 'E_COULDNT_RESOLVE_PROXY',
6: 'E_COULDNT_RESOLVE_HOST',
7: 'E_COULDNT_CONNECT',
8: 'E_FTP_WEIRD_SERVER_REPLY',
9: 'E_REMOTE_ACCESS_DENIED',
10: 'E_FTP_ACCEPT_FAILED',
11: 'E_FTP_WEIRD_PASS_REPLY',
12: 'E_FTP_ACCEPT_TIMEOUT',
13: 'E_FTP_WEIRD_PASV_REPLY',
14: 'E_FTP_WEIRD_227_FORMAT',
15: 'E_FTP_CANT_GET_HOST',
16: 'E_HTTP2', # CURLE_HTTP2_STREAM
17: 'E_FTP_COULDNT_SET_TYPE',
18: 'E_PARTIAL_FILE',
19: 'E_FTP_COULDNT_RETR_FILE',
21: 'E_QUOTE_ERROR',
22: 'E_HTTP_RETURNED_ERROR',
23: 'E_WRITE_ERROR',
25: 'E_UPLOAD_FAILED',
26: 'E_READ_ERROR',
27: 'E_OUT_OF_MEMORY',
28: 'E_OPERATION_TIMEDOUT',
30: 'E_FTP_PORT_FAILED',
31: 'E_FTP_COULDNT_USE_REST',
33: 'E_RANGE_ERROR',
34: 'E_HTTP_POST_ERROR',
35: 'E_SSL_CONNECT_ERROR',
36: 'E_BAD_DOWNLOAD_RESUME',
37: 'E_FILE_COULDNT_READ_FILE',
38: 'E_LDAP_CANNOT_BIND',
39: 'E_LDAP_SEARCH_FAILED',
41: 'E_FUNCTION_NOT_FOUND',
42: 'E_ABORTED_BY_CALLBACK',
43: 'E_BAD_FUNCTION_ARGUMENT',
45: 'E_INTERFACE_FAILED',
47: 'E_TOO_MANY_REDIRECTS',
48: 'E_UNKNOWN_OPTION',
49: 'E_TELNET_OPTION_SYNTAX',
51: 'E_PEER_FAILED_VERIFICATION',
52: 'E_GOT_NOTHING',
53: 'E_SSL_ENGINE_NOTFOUND',
54: 'E_SSL_ENGINE_SETFAILED',
55: 'E_SEND_ERROR',
56: 'E_RECV_ERROR',
58: 'E_SSL_CERTPROBLEM',
59: 'E_SSL_CIPHER',
60: 'E_SSL_CACERT',
61: 'E_BAD_CONTENT_ENCODING',
62: 'E_LDAP_INVALID_URL',
63: 'E_FILESIZE_EXCEEDED',
64: 'E_USE_SSL_FAILED',
65: 'E_SEND_FAIL_REWIND',
66: 'E_SSL_ENGINE_INITFAILED',
67: 'E_LOGIN_DENIED',
68: 'E_TFTP_NOTFOUND',
69: 'E_TFTP_PERM',
70: 'E_REMOTE_DISK_FULL',
71: 'E_TFTP_ILLEGAL',
72: 'E_TFTP_UNKNOWNID',
73: 'E_REMOTE_FILE_EXISTS',
74: 'E_TFTP_NOSUCHUSER',
75: 'E_CONV_FAILED',
76: 'E_CONV_REQD',
77: 'E_SSL_CACERT_BADFILE',
78: 'E_REMOTE_FILE_NOT_FOUND',
79: 'E_SSH',
80: 'E_SSL_SHUTDOWN_FAILED',
81: 'E_AGAIN',
82: 'E_SSL_CRL_BADFILE',
83: 'E_SSL_ISSUER_ERROR',
84: 'E_FTP_PRET_FAILED',
85: 'E_RTSP_CSEQ_ERROR',
86: 'E_RTSP_SESSION_ERROR',
87: 'E_FTP_BAD_FILE_LIST',
88: 'E_CHUNK_FAILED',
89: 'E_NO_CONNECTION_AVAILABLE',
90: 'E_SSL_PINNEDPUBKEYNOTMATCH',
91: 'E_SSL_INVALIDCERTSTATUS',
92: 'E_HTTP2_STREAM',
93: 'E_RECURSIVE_API_CALL',
}
ERRNUM_TAG = {
ERROR_TOO_MANY_REDIRECTS: 'too-many-redirects',
}
for code, tag in ERRNUM_PYCURL_TAG.items():
assert tag.startswith('E_')
ERRNUM_TAG[code] = tag[2:].replace('_', '-').lower()
class NetworkServiceMulticurl(BaseService):
def __init__(self, spider, socket_number):
"""
Args:
spider: argument is not used in multicurl transport
"""
self.spider = spider
self.socket_number = socket_number
self.multi = pycurl.CurlMulti()
self.multi.handles = []
self.freelist = []
self.registry = {}
self.connection_count = {}
self.sigint_handler = PycurlSigintHandler()
self.network_op_lock = Lock()
# Create curl instances
for _ in six.moves.range(self.socket_number):
curl = pycurl.Curl()
self.connection_count[id(curl)] = 0
self.freelist.append(curl)
# self.multi.handles.append(curl)
self.spawner = self.create_worker(self.spawner_callback)
self.async_loop = self.create_worker(self.async_loop_callback)
self.register_workers(self.spawner, self.async_loop)
def async_loop_callback(self, worker):
while not worker.stop_event.is_set():
worker.process_pause_signal()
self.process_handlers()
time.sleep(0.01)
def spawner_callback(self, worker):
while not worker.stop_event.is_set():
worker.process_pause_signal()
if self.get_free_threads_number():
task = self.spider.get_task_from_queue()
if task is None or task is True:
time.sleep(0.1)
else:
worker.is_busy_event.set()
try:
task.network_try_count += 1 # pylint: disable=no-member
is_valid, reason = self.spider.check_task_limits(task)
if is_valid:
grab = self.spider.setup_grab_for_task(task)
self.spider.submit_task_to_transport(task, grab)
else:
self.spider.log_rejected_task(task, reason)
# pylint: disable=no-member
handler = task.get_fallback_handler(self.spider)
# pylint: enable=no-member
if handler:
handler(task)
finally:
worker.is_busy_event.clear()
for result, task in self.iterate_results():
self.spider.task_dispatcher.input_queue.put(
(result, task, None),
)
def ready_for_task(self):
return len(self.freelist)
def get_free_threads_number(self):
return len(self.freelist)
def get_active_threads_number(self):
return self.socket_number - len(self.freelist)
def process_connection_count(self, curl):
curl_id = id(curl)
self.connection_count[curl_id] += 1
if self.connection_count[curl_id] > 100:
del self.connection_count[curl_id]
del curl
new_curl = pycurl.Curl()
self.connection_count[id(new_curl)] = 1
return new_curl
else:
return curl
def start_task_processing(self, task, grab, grab_config_backup):
curl = self.process_connection_count(self.freelist.pop())
self.registry[id(curl)] = {
'grab': grab,
'grab_config_backup': grab_config_backup,
'task': task,
}
grab.transport.curl = curl
try:
grab.prepare_request()
# Enable pycurl built-in redirect processing
# In non-spider mode Grab handles redirects itself
# by parsing headers and following Location URls
# In spider mode that would require to create
# new Task objects for each 30* redirect
# Maybe that would be implemented in future
# For now multicurl transport just uses builtin pycurl
# ability to handle 30* redirects
grab.transport.curl.setopt(
pycurl.FOLLOWLOCATION,
1 if grab.config['follow_location'] else 0
)
grab.log_request()
except Exception:
# If some error occurred while processing the request arguments
# then we should put curl object back to free list
del self.registry[id(curl)]
self.freelist.append(curl)
raise
else:
# Add configured curl instance to multi-curl processor
try:
self.network_op_lock.acquire()
with self.sigint_handler.handle_sigint():
self.multi.add_handle(curl)
finally:
self.network_op_lock.release()
def process_handlers(self):
try:
self.network_op_lock.acquire()
with self.sigint_handler.handle_sigint():
rlist, wlist, xlist = self.multi.fdset()
if rlist or wlist or xlist:
with self.sigint_handler.handle_sigint():
timeout = self.multi.timeout()
if timeout and timeout > 0:
select.select(rlist, wlist, xlist, timeout / 1000.0)
else:
pass
while True:
with self.sigint_handler.handle_sigint():
status, _ = self.multi.perform()
if status != pycurl.E_CALL_MULTI_PERFORM:
break
finally:
self.network_op_lock.release()
def iterate_results(self):
while True:
try:
self.network_op_lock.acquire()
with self.sigint_handler.handle_sigint():
queued_messages, ok_list, fail_list = (
self.multi.info_read()
)
finally:
self.network_op_lock.release()
#except Exception as ex:
# # Usually that should not happen
# logging.error('', exc_info=ex)
# continue
results = []
for curl in ok_list:
results.append((True, curl, None, None, None))
for curl, ecode, emsg in fail_list:
curl.grab_callback_interrupted = False
try:
raise pycurl.error(ecode, emsg)
except Exception as exc: # pylint: disable=broad-except
grab_exc = build_grab_exception(exc, curl)
# grab_exc could be None if the pycurl error
# was expected (could be in case of
# body_maxsize and other options)
if grab_exc:
results.append((False, curl, ecode, emsg, grab_exc))
else:
results.append((True, curl, None, None, None))
for is_ok, curl, ecode, emsg, grab_exc in results:
# FORMAT: {is_ok, grab, grab_config_backup, task,
# ecode, emsg, error_abbr, exc}
curl_id = id(curl)
task = self.registry[curl_id]['task']
grab = self.registry[curl_id]['grab']
grab_config_backup =\
self.registry[curl_id]['grab_config_backup']
try:
self.network_op_lock.acquire()
grab.process_request_result()
except GrabTooManyRedirectsError:
ecode = ERROR_TOO_MANY_REDIRECTS
emsg = 'Too many meta refresh redirects'
is_ok = False
finally:
self.network_op_lock.release()
#except Exception as ex:
# logging.error('', exc_info=ex)
# ecode = ERROR_INTERNAL_GRAB_ERROR
# emsg = 'Internal grab error'
# is_ok = False
grab.doc.error_code = ecode
grab.doc.error_msg = emsg
grab.exception = grab_exc
# Free resources
del self.registry[curl_id]
grab.transport.curl = None
if is_ok:
error_abbr = None
else:
error_abbr = ERRNUM_TAG.get(ecode, 'unknown-%d' % ecode)
yield {
'ok': is_ok,
'ecode': ecode,
'emsg': emsg,
'error_abbr': error_abbr,
'exc': grab_exc,
'grab': grab,
'grab_config_backup': grab_config_backup,
}, task
try:
self.network_op_lock.acquire()
with self.sigint_handler.handle_sigint():
self.multi.remove_handle(curl)
finally:
self.network_op_lock.release()
curl.reset()
self.freelist.append(curl)
if not queued_messages:
break
| |
# This file is part of the MapProxy project.
# Copyright (C) 2010-2012 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import hashlib
from io import BytesIO
from mapproxy.srs import bbox_equals
from mapproxy.util.times import format_httpdate
from mapproxy.test.image import is_jpeg, tmp_image
from mapproxy.test.http import mock_httpd
from mapproxy.test.helper import validate_with_xsd
from nose.tools import eq_
ns = {'kml': 'http://www.opengis.net/kml/2.2'}
from mapproxy.test.system import module_setup, module_teardown, SystemTest, make_base_config
test_config = {}
base_config = make_base_config(test_config)
def setup_module():
module_setup(test_config, 'kml_layer.yaml', with_cache_data=True)
def teardown_module():
module_teardown(test_config)
class TestKML(SystemTest):
config = test_config
def test_get_out_of_bounds_tile(self):
for coord in [(0, 0, -1), (-1, 0, 0), (0, -1, 0), (4, 2, 1), (1, 3, 0)]:
yield self.check_out_of_bounds, coord
def check_out_of_bounds(self, coord):
x, y, z = coord
url = '/kml/wms_cache/%d/%d/%d.kml' % (z, x, y)
resp = self.app.get(url , status=404)
assert 'outside the bounding box' in resp
def test_invalid_layer(self):
resp = self.app.get('/kml/inVAlid/0/0/0.png', status=404)
eq_(resp.content_type, 'text/plain')
assert 'unknown layer: inVAlid' in resp
def test_invalid_format(self):
resp = self.app.get('/kml/wms_cache/0/0/1.png', status=404)
eq_(resp.content_type, 'text/plain')
assert 'invalid format' in resp
def test_get_tile_tile_source_error(self):
resp = self.app.get('/kml/wms_cache/0/0/0.jpeg', status=500)
eq_(resp.content_type, 'text/plain')
assert 'No response from URL' in resp
def _check_tile_resp(self, resp):
eq_(resp.content_type, 'image/jpeg')
eq_(resp.content_length, len(resp.body))
data = BytesIO(resp.body)
assert is_jpeg(data)
def _update_timestamp(self):
timestamp = 1234567890.0
size = 10214
base_dir = base_config().cache.base_dir
os.utime(os.path.join(base_dir,
'wms_cache_EPSG900913/01/000/000/000/000/000/001.jpeg'),
(timestamp, timestamp))
max_age = base_config().tiles.expires_hours * 60 * 60
etag = hashlib.md5((str(timestamp) + str(size)).encode('ascii')).hexdigest()
return etag, max_age
def _check_cache_control_headers(self, resp, etag, max_age, timestamp=1234567890.0):
eq_(resp.headers['ETag'], etag)
if timestamp is None:
assert 'Last-modified' not in resp.headers
else:
eq_(resp.headers['Last-modified'], format_httpdate(timestamp))
eq_(resp.headers['Cache-control'], 'public, max-age=%d, s-maxage=%d' % (max_age, max_age))
def test_get_cached_tile(self):
etag, max_age = self._update_timestamp()
resp = self.app.get('/kml/wms_cache/1/0/1.jpeg')
self._check_cache_control_headers(resp, etag, max_age)
self._check_tile_resp(resp)
def test_if_none_match(self):
etag, max_age = self._update_timestamp()
resp = self.app.get('/kml/wms_cache/1/0/1.jpeg',
headers={'If-None-Match': etag})
eq_(resp.status, '304 Not Modified')
self._check_cache_control_headers(resp, etag, max_age)
resp = self.app.get('/kml/wms_cache/1/0/1.jpeg',
headers={'If-None-Match': etag + 'foo'})
self._check_cache_control_headers(resp, etag, max_age)
eq_(resp.status, '200 OK')
self._check_tile_resp(resp)
def test_get_kml(self):
resp = self.app.get('/kml/wms_cache/0/0/0.kml')
xml = resp.lxml
assert validate_with_xsd(xml, 'kml/2.2.0/ogckml22.xsd')
assert bbox_equals(
self._bbox(xml.xpath('/kml:kml/kml:Document', namespaces=ns)[0]),
(-180, -90, 180, 90)
)
assert bbox_equals(
self._bbox(xml.xpath('/kml:kml/kml:Document/kml:GroundOverlay', namespaces=ns)[0]),
(-180, 0, 0, 90)
)
eq_(xml.xpath('/kml:kml/kml:Document/kml:GroundOverlay/kml:Icon/kml:href/text()',
namespaces=ns),
['http://localhost/kml/wms_cache/EPSG900913/1/0/1.jpeg',
'http://localhost/kml/wms_cache/EPSG900913/1/1/1.jpeg',
'http://localhost/kml/wms_cache/EPSG900913/1/0/0.jpeg',
'http://localhost/kml/wms_cache/EPSG900913/1/1/0.jpeg']
)
eq_(xml.xpath('/kml:kml/kml:Document/kml:NetworkLink/kml:Link/kml:href/text()',
namespaces=ns),
['http://localhost/kml/wms_cache/EPSG900913/1/0/1.kml',
'http://localhost/kml/wms_cache/EPSG900913/1/1/1.kml',
'http://localhost/kml/wms_cache/EPSG900913/1/0/0.kml',
'http://localhost/kml/wms_cache/EPSG900913/1/1/0.kml']
)
etag = hashlib.md5(resp.body).hexdigest()
max_age = base_config().tiles.expires_hours * 60 * 60
self._check_cache_control_headers(resp, etag, max_age, None)
resp = self.app.get('/kml/wms_cache/0/0/0.kml',
headers={'If-None-Match': etag})
eq_(resp.status, '304 Not Modified')
def test_get_kml_init(self):
resp = self.app.get('/kml/wms_cache')
xml = resp.lxml
assert validate_with_xsd(xml, 'kml/2.2.0/ogckml22.xsd')
eq_(xml.xpath('/kml:kml/kml:Document/kml:GroundOverlay/kml:Icon/kml:href/text()',
namespaces=ns),
['http://localhost/kml/wms_cache/EPSG900913/1/0/1.jpeg',
'http://localhost/kml/wms_cache/EPSG900913/1/1/1.jpeg',
'http://localhost/kml/wms_cache/EPSG900913/1/0/0.jpeg',
'http://localhost/kml/wms_cache/EPSG900913/1/1/0.jpeg']
)
eq_(xml.xpath('/kml:kml/kml:Document/kml:NetworkLink/kml:Link/kml:href/text()',
namespaces=ns),
['http://localhost/kml/wms_cache/EPSG900913/1/0/1.kml',
'http://localhost/kml/wms_cache/EPSG900913/1/1/1.kml',
'http://localhost/kml/wms_cache/EPSG900913/1/0/0.kml',
'http://localhost/kml/wms_cache/EPSG900913/1/1/0.kml']
)
def test_get_kml_nw(self):
resp = self.app.get('/kml/wms_cache_nw/1/0/0.kml')
xml = resp.lxml
assert validate_with_xsd(xml, 'kml/2.2.0/ogckml22.xsd')
assert bbox_equals(
self._bbox(xml.xpath('/kml:kml/kml:Document', namespaces=ns)[0]),
(-180, -90, 0, 0)
)
assert bbox_equals(
self._bbox(xml.xpath('/kml:kml/kml:Document/kml:GroundOverlay', namespaces=ns)[0]),
(-180, -66.51326, -90, 0)
)
eq_(xml.xpath('/kml:kml/kml:Document/kml:GroundOverlay/kml:Icon/kml:href/text()',
namespaces=ns),
['http://localhost/kml/wms_cache_nw/EPSG900913/2/0/1.jpeg',
'http://localhost/kml/wms_cache_nw/EPSG900913/2/1/1.jpeg',
'http://localhost/kml/wms_cache_nw/EPSG900913/2/0/0.jpeg',
'http://localhost/kml/wms_cache_nw/EPSG900913/2/1/0.jpeg']
)
eq_(xml.xpath('/kml:kml/kml:Document/kml:NetworkLink/kml:Link/kml:href/text()',
namespaces=ns),
['http://localhost/kml/wms_cache_nw/EPSG900913/2/0/1.kml',
'http://localhost/kml/wms_cache_nw/EPSG900913/2/1/1.kml',
'http://localhost/kml/wms_cache_nw/EPSG900913/2/0/0.kml',
'http://localhost/kml/wms_cache_nw/EPSG900913/2/1/0.kml']
)
def test_get_kml2(self):
resp = self.app.get('/kml/wms_cache/1/0/1.kml')
xml = resp.lxml
assert validate_with_xsd(xml, 'kml/2.2.0/ogckml22.xsd')
def test_get_kml_multi_layer(self):
resp = self.app.get('/kml/wms_cache_multi/1/0/0.kml')
xml = resp.lxml
assert validate_with_xsd(xml, 'kml/2.2.0/ogckml22.xsd')
eq_(xml.xpath('/kml:kml/kml:Document/kml:GroundOverlay/kml:Icon/kml:href/text()',
namespaces=ns),
['http://localhost/kml/wms_cache_multi/EPSG4326/2/0/1.jpeg',
'http://localhost/kml/wms_cache_multi/EPSG4326/2/1/1.jpeg',
'http://localhost/kml/wms_cache_multi/EPSG4326/2/0/0.jpeg',
'http://localhost/kml/wms_cache_multi/EPSG4326/2/1/0.jpeg']
)
eq_(xml.xpath('/kml:kml/kml:Document/kml:NetworkLink/kml:Link/kml:href/text()',
namespaces=ns),
['http://localhost/kml/wms_cache_multi/EPSG4326/2/0/1.kml',
'http://localhost/kml/wms_cache_multi/EPSG4326/2/1/1.kml',
'http://localhost/kml/wms_cache_multi/EPSG4326/2/0/0.kml',
'http://localhost/kml/wms_cache_multi/EPSG4326/2/1/0.kml']
)
def test_get_tile(self):
with tmp_image((256, 256), format='jpeg') as img:
expected_req = ({'path': r'/service?LAYERs=foo,bar&SERVICE=WMS&FORMAT=image%2Fjpeg'
'&REQUEST=GetMap&HEIGHT=256&SRS=EPSG%3A900913&styles='
'&VERSION=1.1.1&BBOX=-20037508.3428,-20037508.3428,0.0,0.0'
'&WIDTH=256'},
{'body': img.read(), 'headers': {'content-type': 'image/jpeg'}})
with mock_httpd(('localhost', 42423), [expected_req], bbox_aware_query_comparator=True):
resp = self.app.get('/kml/wms_cache/1/0/0.jpeg')
eq_(resp.content_type, 'image/jpeg')
self.created_tiles.append('wms_cache_EPSG900913/01/000/000/000/000/000/000.jpeg')
def _bbox(self, elem):
elems = elem.xpath('kml:Region/kml:LatLonAltBox', namespaces=ns)[0]
n, s, e, w = [float(elem.text) for elem in elems.getchildren()]
return w, s, e, n
| |
from abc import abstractmethod
from collections import defaultdict
import six
import requests
from google.protobuf.internal.decoder import _DecodeVarint
from ..text import to_unicode
from . import CoreNLP_pb2
from .data import Document, Sentence, Token, Entity
from .protobuf_json import pb2json, json2pb
__author__ = 'kelvinguu, vzhong, wmonroe4, chaganty'
if six.PY2:
from itertools import izip
else:
izip = zip
class AnnotationException(Exception):
"""
Exception raised when there was an error communicating with the CoreNLP server.
"""
pass
class TimeoutException(AnnotationException):
"""
Exception raised when the CoreNLP server timed out.
"""
pass
class CoreNLPClient(object):
"""
A CoreNLP client to the Stanford CoreNLP server.
"""
DEFAULT_ANNOTATORS = "tokenize ssplit lemma pos ner depparse".split()
def __init__(self, server='http://localhost:9000', default_annotators=DEFAULT_ANNOTATORS):
"""
Constructor.
:param (str) server: url of the CoreNLP server.
"""
self.server = server
self.default_annotators = default_annotators
assert requests.get(self.server).ok, 'Stanford CoreNLP server was not found at location {}'.format(self.server)
def _request(self, text, properties):
"""Send a request to the CoreNLP server.
:param (str | unicode) text: raw text for the CoreNLPServer to parse
:param (dict) properties: properties that the server expects
:return: request result
"""
text = to_unicode(text) # ensures unicode
try:
r = requests.post(self.server, params={'properties': str(properties)}, data=text.encode('utf-8'))
r.raise_for_status()
return r
except requests.HTTPError:
if r.text == "CoreNLP request timed out. Your document may be too long.":
raise TimeoutException(r.text)
else:
raise AnnotationException(r.text)
def annotate_json(self, text, annotators=None):
"""Return a JSON dict from the CoreNLP server, containing annotations of the text.
:param (str) text: Text to annotate.
:param (list[str]) annotators: a list of annotator names
:return (dict): a dict of annotations
"""
# WARN(chaganty): I'd like to deprecate this function -- we
# should just use annotate().json
#properties = {
# 'annotators': ','.join(annotators or self.default_annotators),
# 'outputFormat': 'json',
#}
#return self._request(text, properties).json(strict=False)
doc = self.annotate(text, annotators)
return doc.json
def annotate_proto(self, text, annotators=None):
"""Return a Document protocol buffer from the CoreNLP server, containing annotations of the text.
:param (str) text: text to be annotated
:param (list[str]) annotators: a list of annotator names
:return (CoreNLP_pb2.Document): a Document protocol buffer
"""
properties = {
'annotators': ','.join(annotators or self.default_annotators),
'outputFormat': 'serialized',
'serializer': 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer'
}
r = self._request(text, properties)
buffer = r.content # bytes
size, pos = _DecodeVarint(buffer, 0)
buffer = buffer[pos:(pos + size)]
doc = CoreNLP_pb2.Document()
doc.ParseFromString(buffer)
return doc
def annotate(self, text, annotators=None):
"""Return an AnnotatedDocument from the CoreNLP server.
:param (str) text: text to be annotated
:param (list[str]) annotators: a list of annotator names
See a list of valid annotator names here:
http://stanfordnlp.github.io/CoreNLP/annotators.html
:return (AnnotatedDocument): an annotated document
"""
doc_pb = self.annotate_proto(text, annotators)
return AnnotatedDocument.from_pb(doc_pb)
class ProtobufBacked(object):
"""An object backed by a Protocol buffer.
ProtobufBacked objects should keep their constructors private.
They should be exclusively initialized using `from_pb`.
"""
@abstractmethod
def _get_pb_class(cls):
"""
Returns associated protobuf class.
"""
pass
@abstractmethod
def _from_pb(cls, pb):
"""Instantiate the object from a protocol buffer.
Note: this should be a classmethod.
"""
pass
@classmethod
def from_pb(cls, pb):
"""Instantiate the object from a protocol buffer.
Args:
pb (protobuf)
Save a reference to the protocol buffer on the object.
"""
obj = cls._from_pb(pb)
obj._pb = pb
return obj
@property
def pb(self):
"""Get the backing protocol buffer."""
return self._pb
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.pb == other.pb
def __ne__(self, other):
return not self.__eq__(other)
@property
def json(self):
"""The object represented as JSON.
In the future, this should be computed from the protocol buffer. For now, it is manually set.
"""
try:
return pb2json(self._pb)
except AttributeError:
raise AttributeError('No JSON representation available.')
def to_json(self):
"""Same as the json property.
Provided just because people are accustomed to calling `to_json` on objects.
"""
return self.json
@classmethod
def from_json(cls, json_dict):
pb = cls._get_pb_class()()
json2pb(pb, json_dict)
return cls.from_pb(pb)
class AnnotatedDocument(Document, ProtobufBacked):
"""
A shim over the protobuffer exposing key methods.
"""
# ProtobufBacked methods
@classmethod
def _get_pb_class(cls):
return CoreNLP_pb2.Document
@classmethod
def _from_pb(cls, pb):
return cls(pb)
def __init__(self, pb):
"""Keep this method private."""
self._sentences = [AnnotatedSentence.from_pb(sent_pb) for sent_pb in pb.sentence]
for sent in self._sentences:
sent.document = self
self._mentions = self.__construct_mentions(pb)
def __construct_mentions(self, pb):
mentions = []
# Get from NER sequence because they tend to be nicer for name
# mentions. And people only care about name mentions.
for sentence in self:
for mention in AnnotatedEntity.from_ner(sentence):
mentions.append(mention)
# Get from coref chain
for chain in pb.corefChain:
chain_mentions = []
for mention_pb in chain.mention:
# If this mention refers to a mention that already
# exists, use the NER mention instead.
try:
entity = next(mention for mention in mentions if mention.sentence.sentenceIndex == mention_pb.sentenceIndex and mention.head_token == mention_pb.headIndex )
except StopIteration:
entity = AnnotatedEntity(
self.sentences[mention_pb.sentenceIndex],
(mention_pb.beginIndex, mention_pb.endIndex),
mention_pb.headIndex
)
mentions.append(entity)
chain_mentions.append(entity)
# representative mention
rep_mention = chain_mentions[chain.representative]
for mention in chain_mentions:
if mention != rep_mention:
mention._canonical_entity = rep_mention
return mentions
def __getitem__(self, i):
return self._sentences[i]
def __len__(self):
return len(self._sentences)
def __str__(self):
return self.pb.text
def __repr__(self):
PREVIEW_LEN = 50
return "[Document: {}]".format(self.pb.text[:PREVIEW_LEN] + ("..." if len(self.pb.text) > PREVIEW_LEN else ""))
@staticmethod
def _reconstruct_text_from_sentence_pbs(sentence_pbs):
before = lambda sentence_pb: sentence_pb.token[0].before
after = lambda sentence_pb: sentence_pb.token[-1].after
text = []
for i, sent in enumerate(sentence_pbs):
if i == 0:
text.append(before(sent))
text.append(sent.text)
text.append(after(sent))
return ''.join(text)
@property
def doc_id(self):
return self.pb.docID
@property
def text(self):
if len(self.pb.text) != 0:
return self.pb.text
before = lambda sent: sent[0].before
after = lambda sent: sent[len(sent) - 1].after
text = []
for i, sent in enumerate(self):
if i == 0:
text.append(before(sent))
text.append(sent.text)
text.append(after(sent))
return ''.join(text)
def __getattr__(self, attr):
"""
If you are looking for an entry in the protobuf that hasn't been
defined above, this will access it.
"""
if attr == "_pb":
raise AttributeError("_pb" is not set)
return getattr(self.pb, attr)
@property
def character_span(self):
"""
Returns the character span of the sentence
"""
return (self._sentences[0].character_span[0], self._sentences[-1].character_span[1])
@property
def sentences(self):
return self._sentences
@property
def mentions(self):
"""
Returns all coreferent mentions (as lists of entities)
"""
return self._mentions
# These are features that are yet to be supported. In the mean time,
# users can struggle with the protobuf
# TODO(kelvin): finish specifying the Simple interface for AnnotatedSentence
# http://stanfordnlp.github.io/CoreNLP/simple.html
# In particular, all the methods that take arguments.
# TODO(kelvin): protocol buffers insert undesirable default values. Deal with these somehow.
class AnnotatedSentence(Sentence, ProtobufBacked):
# ProtobufBacked methods
@classmethod
def _get_pb_class(cls):
return CoreNLP_pb2.Sentence
@classmethod
def _from_pb(cls, pb):
# Fill in the text attribute if needed.
return cls(pb)
def __init__(self, pb):
"""Keep this method private."""
self._tokens = [AnnotatedToken.from_pb(tok_pb) for tok_pb in pb.token]
@classmethod
def from_tokens(cls, text, toks, pos_toks = None):
"""
A helper method that allows you to construct an AnnotatedSentence with just token information:
:param (str) text -- full text of the sentence.
:param (list[str]) toks -- tokens
"""
sentence_pb = CoreNLP_pb2.Sentence()
sentence_pb.characterOffsetBegin = 0
sentence_pb.characterOffsetEnd = len(text)
sentence_pb.sentenceIndex = 0
sentence_pb.tokenOffsetBegin = 0
sentence_pb.tokenOffsetEnd = len(toks)
TOK_MAP = {
"``" : '"',
"''" : '"',
"-LRB-" : '(',
"-RRB-" : ')',
"-LSB-" : '[',
"-RSB-" : ']',
"-LCB-" : '{',
"-RCB-" : '}',
}
# TODO: handle ` -> ' only sometimes.
# Track progress in sentence and tokens.
char_idx = 0
tok_idx = 0
buf = ""
token_pb = None
while char_idx < len(text):
assert tok_idx < len(toks), "text has more tokens than input"
tok = toks[tok_idx]
pos_tok = pos_toks and pos_toks[tok_idx]
tok_text = TOK_MAP.get(tok, tok)
# Scan to the beginning of the token.
if text[char_idx] != tok_text[0]:
buf += text[char_idx]
char_idx += 1
# Aha! we have found the token. Assert that they match.
else:
assert text[char_idx:char_idx+len(tok_text)] == tok_text, "text did not match a token"
# Create a new Token from this text.
if token_pb: token_pb.after = buf
token_pb = sentence_pb.token.add()
token_pb.before = buf
token_pb.beginChar = char_idx
token_pb.endChar = char_idx + len(tok_text)
token_pb.value = tok
token_pb.word = tok
if pos_tok is not None: token_pb.pos = pos_tok
token_pb.originalText = text[char_idx:char_idx+len(tok_text)]
buf = ""
char_idx += len(tok_text)
tok_idx += 1
if token_pb: token_pb.after = buf
assert tok_idx == len(toks), "text does not match all tokens"
return AnnotatedSentence.from_pb(sentence_pb)
@property
def document(self):
try:
return self._document
except AttributeError:
raise AttributeError("Document has not been set.")
@document.setter
def document(self, val):
self._document = val
@classmethod
def _reconstruct_text_from_token_pbs(cls, token_pbs):
text = []
for i, tok in enumerate(token_pbs):
if i != 0:
text.append(tok.before)
text.append(tok.word)
return ''.join(text)
@ProtobufBacked.json.setter
def json(self, json_dict):
self._json = json_dict
# propagate JSON to children
for tok, tok_json in izip(self._tokens, json_dict['tokens']):
tok.json = tok_json
def __getitem__(self, i):
return self._tokens[i]
def __len__(self):
return len(self._tokens)
def __str__(self):
if six.PY2:
return self.text.encode('utf-8')
else:
# encode in py3 returns a byte string.
return self.text
def __unicode__(self):
return self.text
def __repr__(self):
PREVIEW_LEN = 50
return "[Sentence: {}]".format(self.text[:PREVIEW_LEN] + ("..." if len(self.pb.text) > PREVIEW_LEN else ""))
@property
def paragraph(self):
"""
Returns the paragraph index.
"""
return self.pb.paragraph
@property
def sentenceIndex(self):
"""
Returns the paragraph index.
"""
return self.pb.sentenceIndex
def next_sentence(self):
"""
Returns the next sentence
"""
return self.document[self.sentenceIndex + 1]
def previous_sentence(self):
"""
Returns the previous sentence
"""
return self.document[self.sentenceIndex - 1]
def word(self, i):
return self._tokens[i].word
@property
def before(self):
return self._tokens[0].before
@property
def after(self):
return self._tokens[-1].after
@property
def words(self):
return [tok.word for tok in self._tokens]
@property
def text(self):
if len(self.pb.text) != 0:
return self.pb.text
text = []
for i, tok in enumerate(self):
if i != 0:
text.append(tok.before)
text.append(tok.word)
return ''.join(text)
def text_for_tokens(self, tokens):
text = []
for i in tokens:
tok = self.tokens[i]
if i != 0:
text.append(tok.before)
text.append(tok.word)
return ''.join(text)
def pos_tag(self, i):
return self._tokens[i].pos
@property
def pos_tags(self):
return [tok.pos for tok in self._tokens]
def lemma(self, i):
return self._tokens[i].lemma
@property
def lemmas(self):
return [tok.lemma for tok in self._tokens]
def ner_tag(self, i):
return self._tokens[i].ner
@property
def ner_tags(self):
return [tok.ner for tok in self._tokens]
@property
def tokens(self):
return self._tokens
def token(self, i):
return self._tokens[i]
def depparse(self, mode="enhancedPlusPlus"):
"""
Retrieves the appropriate dependency parse.
Must be one of:
- basic
- alternative
- collapsedCCProcessed
- collapsed
- enhanced
- enhancedPlusPlus
"""
assert mode in [
"basic",
"alternative",
"collapsedCCProcessed",
"collapsed",
"enhanced",
"enhancedPlusPlus", ], "Invalid mode"
dep_pb = getattr(self.pb, mode + "Dependencies")
if dep_pb is None:
raise AttributeError("No dependencies for mode: " + mode)
else:
tree = AnnotatedDependencyParseTree(dep_pb)
tree.sentence = self
return tree
@property
def character_span(self):
"""
Returns the character span of the sentence
"""
return (self._tokens[0].character_span[0], self._tokens[-1].character_span[1])
def __getattr__(self, attr):
if attr == "_pb":
raise AttributeError("_pb" is not set)
return getattr(self.pb, attr)
# @property
# def parse(self):
# raise NotImplementedError()
# @property
# def natlog_polarities(self):
# raise NotImplementedError
# @property
# def relations(self, mode="kbp"):
# """
# Returns any relations found by the annotators.
# Valid modes are:
# - kbp
# - openie
# - relation (?)
# """
# raise NotImplementedError()
# @property
# def openie(self):
# raise NotImplementedError
# @property
# def openie_triples(self):
# raise NotImplementedError
# @property
# def mentions(self):
# """
# Supposed to return mentions contained in the sentence.
# """
# raise NotImplementedError
class AnnotatedToken(Token, ProtobufBacked):
# ProtobufBacked methods
@classmethod
def _get_pb_class(cls):
return CoreNLP_pb2.Token
@classmethod
def _from_pb(cls, pb):
return cls()
def __str__(self):
return self.pb.word
def __repr__(self):
return "[Token: {}]".format(self.pb.word)
@property
def word(self):
return self.pb.word
@property
def pos(self):
return self.pb.pos
@property
def ner(self):
return self.pb.ner
@property
def lemma(self):
return self.pb.lemma
@property
def originalText(self):
return self.pb.originalText
@property
def before(self):
return self.pb.before
@property
def after(self):
return self.pb.after
@property
def normalized_ner(self):
return self.pb.normalizedNER
@property
def wikipedia_entity(self):
return self.pb.wikipediaEntity
@property
def character_span(self):
"""
Returns the character span of the token
"""
return (self.pb.beginChar, self.pb.endChar)
class AnnotatedDependencyParseTree(ProtobufBacked):
"""
Represents a dependency parse tree
"""
@classmethod
def _get_pb_class(cls):
return CoreNLP_pb2.DependencyGraph
@classmethod
def _from_pb(cls, pb):
return cls(pb)
def __init__(self, pb):
self._pb = pb
self._roots = [r-1 for r in pb.root] # Dependency parses are +1 indexed in the pb.
self.graph, self.inv_graph = AnnotatedDependencyParseTree._parse_graph(pb.edge)
self._sentence = None
def __str__(self):
return "Dep: {}".format(self.text)
def __repr__(self):
return "[Dep: {}]".format(self.text)
@staticmethod
def _parse_graph(edges):
graph = defaultdict(list)
inv_graph = defaultdict(list)
for edge in edges:
graph[edge.source-1].append((edge.target-1, edge.dep))
inv_graph[edge.target-1].append((edge.source-1, edge.dep))
return graph, inv_graph
def to_json(self):
"""
Represented as a list of edges:
dependent: index of child
dep: dependency label
governer: index of parent
dependentgloss: gloss of parent
governergloss: gloss of parent
"""
edges = []
for root in self.roots:
edges.append({
'governer': 0,
'dep': "root",
'dependent': root+1,
'governergloss': "root",
'dependentgloss': self.sentence[root].word,
})
for gov, dependents in self.graph.items():
for dependent, dep in dependents:
edges.append({
'governer': gov+1,
'dep': dep,
'dependent': dependent+1,
'governergloss': self.sentence[gov].word,
'dependentgloss': self.sentence[dependent].word,
})
return edges
@classmethod
def from_graph(cls, graph, roots, sentence=None, language=CoreNLP_pb2.Language.Value('UniversalEnglish')):
"""
Creates a new dependency parse tree PB from the graph structure.
"""
sentence_index = (sentence and sentence.sentenceIndex) or 0
pb = CoreNLP_pb2.DependencyGraph()
for i in sorted(graph.keys()): # Get all the tokens from the graph.
n = pb.node.add()
n.sentenceIndex = sentence_index
n.index = i+1 # indices are +1 indexed in the PB...
for j, l in graph[i]:
e = pb.edge.add()
e.source = i+1
e.target = j+1
e.dep = l
e.language = language
for idx, i in enumerate(roots):
pb.root.insert(idx, i+1)
dp = AnnotatedDependencyParseTree._from_pb(pb)
dp.sentence = sentence
return dp
@property
def sentence(self):
return self._sentence
@sentence.setter
def sentence(self, val):
self._sentence = val
@property
def tokens(self):
return sorted(set.union(*[self.descendants(i) for i in self.roots]))
@property
def roots(self):
return self._roots
def parents(self, i):
"""
Return the parent of this node; guaranteed to only be one.
"""
return self.inv_graph[i]
def children(self, i):
return self.graph[i]
def ancestors(self, i, stop=None):
"""
@returns the list of indices of recursive parents for i and [] if no parents.
"""
stop = stop or []
ret = [i]
q = [j for j,_ in self.inv_graph[i]]
while len(q) > 0:
i = q.pop()
parents = [j for j,_ in self.inv_graph[i] if j not in stop]
ret += parents
q += parents
return ret
def descendants(self, i, stop=None):
"""
@returns the list of recursive children for i; [] if no children. Ignore any children in the set @stop
"""
stop = stop or []
# Have to de-duplicate because _sometimes_ the parse "tree" is
# actually a DAG.kk
ret = set([i])
q = [i]
while len(q) > 0:
i = q.pop()
children = [j for j,_ in self.graph[i] if j not in stop and j not in ret]
q += children
ret.update(children)
return ret
@property
def text(self):
"""
Linearizes the string by printing out tokens in the subtree..
"""
return self.sentence.text_for_tokens(self.tokens)
class AnnotatedEntity(Entity):
"""
A set of entities
"""
def __str__(self):
return self._gloss
def __repr__(self):
return "[Entity: {}]".format(self._gloss)
def __init__(self, sentence, token_span, head_token):
"""
@arg doc: parent document for this coref mention
@arg pb: CorefMention protobuf
"""
self._sentence = sentence
self._token_span = token_span
self._head_token = head_token
token_pbs = sentence.pb.token[token_span[0]:token_span[1]]
self._gloss = AnnotatedEntity._reconstruct_text_from_token_pbs(token_pbs)
self._canonical_entity = None
@classmethod
def from_ner(cls, sentence):
# Every change in token type, could be a new entity.
start_idx, current_ner = 0, 'O'
for idx, token in enumerate(sentence):
if token.ner != current_ner:
if current_ner != 'O':
end_idx = idx
head_idx = end_idx-1
yield AnnotatedEntity(sentence, (start_idx, end_idx), head_idx)
current_ner = token.ner
start_idx = idx
if current_ner != 'O':
end_idx = len(sentence)
head_idx = end_idx-1
yield AnnotatedEntity(sentence, (start_idx, end_idx), head_idx)
@classmethod
def _reconstruct_text_from_token_pbs(cls, token_pbs):
text = []
for i, tok in enumerate(token_pbs):
if i != 0:
text.append(tok.before)
text.append(tok.word)
return ''.join(text)
@property
def sentence(self):
"""Returns the referring sentence"""
return self._sentence
@property
def token_span(self):
"""Returns the index of the end token."""
return self._token_span
@property
def head_token(self):
"""Returns the index of the end token."""
return self._head_token
@property
def character_span(self):
"""
Returns the character span of the token
"""
begin, end = self.token_span
return (self.sentence[begin].character_span[0], self.sentence[end-1].character_span[-1])
@property
def type(self):
"""Returns the type of the string"""
return self.sentence[self.head_token].ner
@property
def gloss(self):
"""Returns the exact string of the entity"""
return self._gloss
@property
def canonical_entity(self):
"""Returns the exact string of the canonical reference"""
if self._canonical_entity:
return self._canonical_entity
else:
return self
# TODO(kelvin): sentence and doc classes that lazily perform annotations
class LazyDocument(Sentence):
pass
class LazySentence(Sentence):
pass
| |
#! /usr/bin/env python
# coding: utf-8
import bisect
import Queue
try:
# bad performance on my laptop(windows xp)
from blist import blist
except:
pass
################################### B-Tree #####################################
# based on chapter 18 of Introduction to Algorithms(Second Edition)
class BNode(object):
def __init__(self):
# Will be better with deque?
self.keys = list()
self.values = list()
self.children = list()
def is_leaf(self):
return not bool(self.children)
def min(self):
node = self
while node.children:
node = node.children[0]
return node
def max(self):
node = self
while node.children:
node = node.children[-1]
return node
def __str__(self):
return '|%s|' % ' '.join(['{%s:%s}' % e for e in zip(self.keys, self.values)])
__repr__ = __str__
class BTree(object):
def __init__(self, degree = 3):
self.degree = degree
self.root = BNode()
self._minkeys = self.degree - 1
self._minchildren = self.degree
self._maxkeys = 2 * self.degree - 1
self._maxchildren = 2 * self.degree
#self.disk_write(self.root)
def search(self, node, key):
i = bisect.bisect_left(node.keys, key)
if i < len(node.keys) and key == node.keys[i]:
return (node, i)
if node.is_leaf():
return (None, None)
else:
# self.disk_read(node.children[i])
return self.search(node.children[i], key)
def ceiling(self, node, key):
i = bisect.bisect_left(node.keys, key)
if i < len(node.keys) and key == node.keys[i]:
return key
if node.is_leaf():
if i == len(node.keys):
return node.keys[-1]
return node.keys[i]
else:
return self.ceiling(node.children[i], key)
def split_child(self, x, i, y):
z = BNode()
z.keys = y.keys[self.degree:]
z.values = y.values[self.degree:]
if not y.is_leaf():
z.children = y.children[self.degree:]
x.children.insert(i+1, z)
x.keys.insert(i, y.keys[self.degree-1])
x.values.insert(i, y.values[self.degree-1])
y.keys = y.keys[:self.degree-1]
y.values = y.values[:self.degree-1]
y.children = y.children[:self.degree]
#self.disk_write(y)
#self.disk_write(z)
#self.disk_write(x)
def insert(self, key, value):
if len(self.root.keys) == self._maxkeys:
oldroot = self.root
self.root = BNode()
self.root.children.append(oldroot)
self.split_child(self.root, 0, oldroot)
self.insert_nonfull(self.root, key, value)
else:
self.insert_nonfull(self.root, key, value)
def insert_nonfull(self, x, key, value):
# performance bottleneck fixed by bisect
#while i > 0 and key < x.keys[i-1]:
# i -= 1
i = bisect.bisect_left(x.keys, key)
if x.is_leaf():
x.keys.insert(i, key)
x.values.insert(i, value)
#self.disk_write(x)
else:
#self.disk_read(x.children[i])
if len(x.children[i].keys) == self._maxkeys:
self.split_child(x, i, x.children[i])
if key > x.keys[i]:
i += 1
self.insert_nonfull(x.children[i], key, value)
def delete(self, key):
self._delete(self.root, key)
def _delete(self, node, key):
if key in node.keys:
if node.is_leaf():
index = node.keys.index(key)
node.keys.pop(index)
node.values.pop(index)
else:
ki = node.keys.index(key)
if len(node.children[ki].keys) >= self.degree:
nmax = node.children[ki].max()
kp = nmax.keys[-1]
vp = nmax.values[-1]
self._delete(node.children[ki], kp)
node.keys[ki] = kp
node.values[ki] = vp
elif len(node.children[ki+1].keys) >= self.degree:
nmin = node.children[ki+1].min()
kp = nmin.keys[0]
vp = nmin.values[0]
self._delete(node.children[ki+1], kp)
node.keys[ki] = kp
node.values[ki] = vp
else:
node.children[ki].keys.append(node.keys.pop(ki))
node.children[ki].values.append(node.values.pop(ki))
rnode = node.children.pop(ki+1)
node.children[ki].keys.extend(rnode.keys)
node.children[ki].values.extend(rnode.values)
node.children[ki].children.extend(rnode.children)
if node == self.root and not node.keys:
self.root = node.children[ki]
self._delete(node.children[ki], key)
else:
ci = bisect.bisect_left(node.keys, key)
if len(node.children[ci].keys) == self._minkeys:
if ci >= 1 and len(node.children[ci-1].keys) > self._minkeys:
node.children[ci].keys.insert(0, node.keys[ci-1])
node.children[ci].values.insert(0, node.values[ci-1])
node.keys[ci-1] = node.children[ci-1].keys.pop(-1)
node.values[ci-1] = node.children[ci-1].values.pop(-1)
node.children[ci].children = node.children[ci-1].children[-1:] + node.children[ci].children
node.children[ci-1].children = node.children[ci-1].children[:-1]
self._delete(node.children[ci], key)
elif ci < len(node.keys) and len(node.children[ci+1].keys) > self._minkeys:
node.children[ci].keys.append(node.keys[ci])
node.children[ci].values.append(node.values[ci])
node.keys[ci] = node.children[ci+1].keys.pop(0)
node.values[ci] = node.children[ci+1].values.pop(0)
node.children[ci].children.extend(node.children[ci+1].children[:1])
node.children[ci+1].children = node.children[ci+1].children[1:]
self._delete(node.children[ci], key)
else:
if ci >= 1:
node.children[ci-1].keys.append(node.keys.pop(ci-1))
node.children[ci-1].values.append(node.values.pop(ci-1))
rnode = node.children.pop(ci)
node.children[ci-1].keys.extend(rnode.keys)
node.children[ci-1].values.extend(rnode.values)
node.children[ci-1].children.extend(rnode.children)
if node == self.root and not node.keys:
self.root = node.children[ci-1]
self._delete(node.children[ci-1], key)
else:
node.children[ci].keys.append(node.keys.pop(ci))
node.children[ci].values.append(node.values.pop(ci))
rnode = node.children.pop(ci+1)
node.children[ci].keys.extend(rnode.keys)
node.children[ci].values.extend(rnode.values)
node.children[ci].children.extend(rnode.children)
if node == self.root and not node.keys:
self.root = node.children[ci]
self._delete(node.children[ci], key)
else:
self._delete(node.children[ci], key)
def keys(self, kmin = None, kmax = None):
keys = []
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._keys(self.root, kmin, kmax, keys)
def _keys(self, node, kmin, kmax, keys):
"""return [k for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
self._keys(e, kmin, kmax, keys)
keys.extend(node.keys[imin:imax])
return keys
def iterkeys(self, kmin = None, kmax = None):
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._iterkeys(self.root, kmin, kmax)
def _iterkeys(self, node, kmin, kmax):
"""return [k for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
for k in self._iterkeys(e, kmin, kmax):
yield k
for i in xrange(imin, imax):
yield node.keys[i]
def values(self, kmin = None, kmax = None):
values = []
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._values(self.root, kmin, kmax, values)
def _values(self, node, kmin, kmax, values):
"""return [v for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
self._values(e, kmin, kmax, values)
values.extend(node.values[imin:imax])
return values
def itervalues(self, kmin = None, kmax = None):
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._itervalues(self.root, kmin, kmax)
def _itervalues(self, node, kmin, kmax):
"""return [k for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
for v in self._itervalues(e, kmin, kmax):
yield v
for i in xrange(imin, imax):
yield node.values[i]
def items(self, kmin = None, kmax = None):
items = []
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._items(self.root, kmin, kmax, items)
def _items(self, node, kmin, kmax, items):
"""return [(k,v) for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
self._items(e, kmin, kmax, items)
items.extend(zip(node.keys[imin:imax], node.values[imin:imax]))
return items
def iteritems(self, kmin = None, kmax = None):
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._iteritems(self.root, kmin, kmax)
def _iteritems(self, node, kmin, kmax):
"""return [k for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
for i in self._iteritems(e, kmin, kmax):
yield i
for i in xrange(imin, imax):
yield (node.keys[i], node.values[i])
def min(self):
node = self.root
while node.children:
node = node.children[0]
return node.keys[0]
def max(self):
node = self.root
while node.children:
node = node.children[-1]
return node.keys[-1]
def bft(self, node, level = 1):
"""Breadth first traversal."""
q = Queue.Queue()
level = level
q.put((level, node))
while not q.empty():
level, node = q.get()
yield (level, node)
for e in node.children:
q.put((level+1, e))
def levels(self):
leveldict = {}
for level, node in self.bft(self.root):
leveldict.setdefault(level, []).append(node)
return leveldict
def pprint(self, width = 80):
leveldict = self.levels()
keys = leveldict.keys()
for k in keys:
print ' '.join(str(e) for e in leveldict[k]).center(width)
def __setitem__(self, k, v):
self.insert(k, v)
def __getitem__(self, k):
node, i = self.search(self.root, k)
if node:
return node.values[i]
else:
return None
def __delitem__(self, k):
self._delete(self.root, k)
def test_BTree():
b = BTree(2)
kv = [
(0, 'zero'),
(8, 'eight'),
(9, 'nine'),
(1, 'one'),
(7, 'seven'),
(2, 'two'),
(6, 'six'),
(3, 'three'),
(5, 'five'),
(4, 'four'),
(10, 'ten'),
(11, 'eleven'),
(12, 'twelve'),
]
for k, v in kv:
b[k] = v
n,i = b.search(b.root, 3)
print n.keys, n.values
b.pprint()
del b[12]
b.pprint()
print 'min key: ', b.min()
print 'max key: ', b.max()
print 'ceiling: ', b.ceiling(b.root, 9.4)
print 'keys :', b.keys()
print 'iterkeys() :', list(b.iterkeys())
print 'keys(min, max) :', b.keys(3.4, 7.9)
print 'iterkeys(min, max) :', list(b.iterkeys(3.4, 7.9))
print 'values() :', b.values()
print 'itervalues() :', list(b.itervalues())
print 'values(min, max) :', b.values(3.4, 7.9)
print 'itervalues(min, max):', list(b.itervalues(3.4, 7.9))
print 'items() :', b.items()
print 'iteritems() :', list(b.iteritems())
print 'items(min, max) :', b.items(3.4, 7.9)
print 'iteritems(min, max) :', list(b.iteritems(3.4, 7.9))
################################### B+Tree #####################################
from itertools import izip_longest
class BPNode(object):
def __init__(self):
self.keys = list()
self.values = list()
self.children = list()
self.next = None
def is_leaf(self):
return not bool(self.children)
def min(self):
node = self
while node.children:
node = node.children[0]
return node
def max(self):
node = self
while node.children:
node = node.children[-1]
return node
def __str__(self):
return '|%s|' % ' '.join(['{%s:%s}' % e for e in izip_longest(self.keys, self.values)])
__repr__ = __str__
class BPTree(object):
def __init__(self, degree = 3):
self.degree = degree
self.root = BPNode()
self._minkeys = self.degree - 1
self._minchildren = self.degree
self._maxkeys = 2 * self.degree - 1
self._maxchildren = 2 * self.degree
#self.disk_write(self.root)
def search(self, node, key):
i = bisect.bisect_left(node.keys, key)
if i < len(node.keys) and key == node.keys[i]:
if node.is_leaf():
return (node, i)
else:
return self.search(node.children[i+1], key)
if node.is_leaf():
return (None, None)
else:
# self.disk_read(node.children[i])
return self.search(node.children[i], key)
def ceiling(self, node, key):
i = bisect.bisect(node.keys, key)
if i < len(node.keys) and key == node.keys[i]:
if node.is_leaf():
return key
else:
return self.ceiling(node.children[i+1], key)
if node.is_leaf():
if i == len(node.keys):
kp = node.keys[-1]
if node.keys[-1] < key:
if len(node.next.keys) > 0:
return node.next.keys[0]
else:
return kp
return node.keys[i]
else:
return self.ceiling(node.children[i], key)
def split_child(self, x, i, y):
z = BPNode()
z.keys = y.keys[self.degree:]
z.values = y.values[self.degree:]
if not y.is_leaf():
z.children = y.children[self.degree:]
y.next = None
else:
z.keys.insert(0, y.keys[self.degree-1])
z.values.insert(0, y.values[self.degree-1])
z.next = y.next
y.next = z
x.children.insert(i+1, z)
x.keys.insert(i, y.keys[self.degree-1])
#x.values.insert(i, y.values[self.degree-1])
y.keys = y.keys[:self.degree-1]
y.values = y.values[:self.degree-1]
y.children = y.children[:self.degree]
#self.disk_write(y)
#self.disk_write(z)
#self.disk_write(x)
def insert(self, key, value):
if len(self.root.keys) == self._maxkeys:
oldroot = self.root
self.root = BPNode()
self.root.children.append(oldroot)
self.split_child(self.root, 0, oldroot)
self.insert_nonfull(self.root, key, value)
else:
self.insert_nonfull(self.root, key, value)
def insert_nonfull(self, x, key, value):
# performance bottleneck fixed by bisect
#while i > 0 and key < x.keys[i-1]:
# i -= 1
i = bisect.bisect_left(x.keys, key)
if x.is_leaf():
x.keys.insert(i, key)
x.values.insert(i, value)
#self.disk_write(x)
else:
#self.disk_read(x.children[i])
if len(x.children[i].keys) == self._maxkeys:
self.split_child(x, i, x.children[i])
if key > x.keys[i]:
i += 1
self.insert_nonfull(x.children[i], key, value)
def delete(self, key):
self._delete(self.root, key)
def _delete(self, node, key):
"""fixed!!!"""
if key in node.keys:
if node.is_leaf():
index = node.keys.index(key)
node.keys.pop(index)
node.values.pop(index)
else:
ki = node.keys.index(key)
if len(node.children[ki].keys) >= self.degree:
nmax = node.children[ki].max()
nmin = node.children[ki+1].min()
kp = nmax.keys[-1]
self._delete(node.children[ki], kp)
node.keys[ki] = kp
nmin.keys[0] = kp
nmin.values[0] = nmax.values[-1]
elif len(node.children[ki+1].keys) >= self.degree:
nmin = node.children[ki+1].min()
nmin.keys.pop(0)
nmin.values.pop(0)
kp = nmin.keys[0]
node.keys[ki] = nmin.keys[0]
else:
rnode = node.children.pop(ki+1)
if node.children[ki].is_leaf():
node.keys.pop(ki)
node.children[ki].keys.extend(rnode.keys)
node.children[ki].values.extend(rnode.values)
node.children[ki].next = rnode.next
else:
node.children[ki].keys.append(node.keys.pop(ki))
node.children[ki].keys.extend(rnode.keys)
node.children[ki].children.extend(rnode.children)
if node == self.root and not node.keys:
self.root = node.children[ki]
self._delete(node.children[ki], key)
else:
ci = bisect.bisect_left(node.keys, key)
if len(node.children[ci].keys) == self._minkeys:
if ci >= 1 and len(node.children[ci-1].keys) > self._minkeys:
if node.children[ci].is_leaf():
kp = node.children[ci-1].keys.pop(-1)
vp = node.children[ci-1].values.pop(-1)
node.keys[ci-1] = kp
node.children[ci].keys.insert(0, kp)
node.children[ci].values.insert(0, vp)
else:
node.children[ci].keys.insert(0, node.keys[ci-1])
node.keys[ci-1] = node.children[ci-1].keys.pop(-1)
node.children[ci].children = node.children[ci-1].children[-1:] + node.children[ci].children
node.children[ci-1].children = node.children[ci-1].children[:-1]
self._delete(node.children[ci], key)
elif ci < len(node.keys) and len(node.children[ci+1].keys) > self._minkeys:
if node.children[ci].is_leaf():
kp = node.children[ci+1].keys.pop(0)
vp = node.children[ci+1].values.pop(0)
node.children[ci].keys.append(kp)
node.children[ci].values.append(vp)
node.keys[ci] = node.children[ci+1].keys[0]
else:
node.children[ci].keys.append(node.keys[ci])
node.keys[ci] = node.children[ci+1].keys.pop(0)
node.children[ci].children.extend(node.children[ci+1].children[:1])
node.children[ci+1].children = node.children[ci+1].children[1:]
self._delete(node.children[ci], key)
else:
if ci >= 1:
rnode = node.children.pop(ci)
if node.children[ci-1].is_leaf():
node.keys.pop(ci-1)
node.children[ci-1].keys.extend(rnode.keys)
node.children[ci-1].values.extend(rnode.values)
node.children[ci-1].next = rnode.next
else:
node.children[ci-1].keys.append(node.keys.pop(ci-1))
node.children[ci-1].keys.extend(rnode.keys)
node.children[ci-1].children.extend(rnode.children)
if node == self.root and not node.keys:
self.root = node.children[ci-1]
self._delete(node.children[ci-1], key)
else:
rnode = node.children.pop(ci+1)
if node.children[ci].is_leaf():
node.keys.pop(ci)
node.children[ci].keys.extend(rnode.keys)
node.children[ci].values.extend(rnode.values)
node.children[ci].next = rnode.next
else:
node.children[ci].keys.append(node.keys.pop(ci))
node.children[ci].keys.extend(rnode.keys)
node.children[ci].children.extend(rnode.children)
if node == self.root and not node.keys:
self.root = node.children[ci]
self._delete(node.children[ci], key)
else:
self._delete(node.children[ci], key)
def keys(self, kmin = None, kmax = None):
keys = []
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._keys(self.root, kmin, kmax, keys)
def _keys(self, node, kmin, kmax, keys):
"""return [k for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
self._keys(e, kmin, kmax, keys)
if node.is_leaf():
keys.extend(node.keys[imin:imax])
return keys
def iterkeys(self, kmin = None, kmax = None):
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._iterkeys(self.root, kmin, kmax)
def _iterkeys(self, node, kmin, kmax):
"""return [k for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
for k in self._iterkeys(e, kmin, kmax):
yield k
if node.is_leaf():
for i in xrange(imin, imax):
yield node.keys[i]
def values(self, kmin = None, kmax = None):
values = []
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._values(self.root, kmin, kmax, values)
def _values(self, node, kmin, kmax, values):
"""return [v for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
self._values(e, kmin, kmax, values)
if node.is_leaf():
values.extend(node.values[imin:imax])
return values
def itervalues(self, kmin = None, kmax = None):
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._itervalues(self.root, kmin, kmax)
def _itervalues(self, node, kmin, kmax):
"""return [k for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
for v in self._itervalues(e, kmin, kmax):
yield v
if node.is_leaf():
for i in xrange(imin, imax):
yield node.values[i]
def items(self, kmin = None, kmax = None):
items = []
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._items(self.root, kmin, kmax, items)
def _items(self, node, kmin, kmax, items):
"""return [(k,v) for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
self._items(e, kmin, kmax, items)
if node.is_leaf():
items.extend(zip(node.keys[imin:imax], node.values[imin:imax]))
return items
def iteritems(self, kmin = None, kmax = None):
if kmin is None:
kmin = self.min()
if kmax is None:
kmax = self.max()
return self._iteritems(self.root, kmin, kmax)
def _iteritems(self, node, kmin, kmax):
"""return [k for k in allkeys if kmin <= k <= kmax]"""
imin = bisect.bisect_left(node.keys, kmin)
imax = bisect.bisect(node.keys, kmax)
if node.children:
for e in node.children[imin:imax+1]:
for i in self._iteritems(e, kmin, kmax):
yield i
if node.is_leaf():
for i in xrange(imin, imax):
yield (node.keys[i], node.values[i])
def min(self):
node = self.root
while node.children:
node = node.children[0]
return node.keys[0]
def max(self):
node = self.root
while node.children:
node = node.children[-1]
return node.keys[-1]
def bft(self, node, level = 1):
"""Breadth first traversal."""
q = Queue.Queue()
level = level
q.put((level, node))
while not q.empty():
level, node = q.get()
yield (level, node)
for e in node.children:
q.put((level+1, e))
def levels(self):
leveldict = {}
for level, node in self.bft(self.root):
leveldict.setdefault(level, []).append(node)
return leveldict
def pprint(self, width = 80):
leveldict = self.levels()
keys = leveldict.keys()
for k in keys:
print ' '.join(str(e) for e in leveldict[k]).center(width)
def __setitem__(self, k, v):
self.insert(k, v)
def __getitem__(self, k):
node, i = self.search(self.root, k)
if node:
return node.values[i]
else:
return None
def __delitem__(self, k):
self._delete(self.root, k)
def test_BPTree():
b = BPTree(2)
kv = [
(0, 'zero'),
(8, 'eight'),
(9, 'nine'),
(1, 'one'),
(7, 'seven'),
(2, 'two'),
(6, 'six'),
(3, 'three'),
(5, 'five'),
(4, 'four'),
(10, 'ten'),
(11, 'eleven'),
]
for k, v in kv:
b[k] = v
b.pprint()
n,i = b.search(b.root, 0)
while n.next:
print n.next
n = n.next
del b[11]
del b[1]
del b[2]
del b[3]
del b[9]
b.pprint()
del b[4]
b.pprint()
print b[10]
print 'min key: ', b.min()
print 'max key: ', b.max()
print 'ceiling: ', b.ceiling(b.root, 7.4)
print 'keys :', b.keys()
print 'iterkeys() :', list(b.iterkeys())
print 'keys(min, max) :', b.keys(3.4, 7.9)
print 'iterkeys(min, max) :', list(b.iterkeys(3.4, 7.9))
print 'values() :', b.values()
print 'itervalues() :', list(b.itervalues())
print 'values(min, max) :', b.values(3.4, 7.9)
print 'itervalues(min, max):', list(b.itervalues(3.4, 7.9))
print 'items() :', b.items()
print 'iteritems() :', list(b.iteritems())
print 'items(min, max) :', b.items(3.4, 7.9)
print 'iteritems(min, max) :', list(b.iteritems(3.4, 7.9))
#################################### END #######################################
if __name__ == '__main__':
test_BTree()
#test_BPTree()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Windowing concepts.
A WindowInto transform logically divides up or groups the elements of a
PCollection into finite windows according to a windowing function (derived from
WindowFn).
The output of WindowInto contains the same elements as input, but they have been
logically assigned to windows. The next GroupByKey(s) transforms, including one
within a composite transform, will group by the combination of keys and windows.
Windowing a PCollection allows chunks of it to be processed individually, before
the entire PCollection is available. This is especially important for
PCollection(s) with unbounded size, since the full PCollection is never
available at once, since more data is continually arriving. For PCollection(s)
with a bounded size (aka. conventional batch mode), by default, all data is
implicitly in a single window (see GlobalWindows), unless WindowInto is
applied.
For example, a simple form of windowing divides up the data into fixed-width
time intervals, using FixedWindows.
Seconds are used as the time unit for the built-in windowing primitives here.
Integer or floating point seconds can be passed to these primitives.
Internally, seconds, with microsecond granularity, are stored as
timeutil.Timestamp and timeutil.Duration objects. This is done to avoid
precision errors that would occur with floating point representations.
Custom windowing function classes can be created, by subclassing from
WindowFn.
"""
from __future__ import absolute_import
from google.protobuf import struct_pb2
from google.protobuf import wrappers_pb2
from apache_beam import coders
from apache_beam.internal import pickler
from apache_beam.runners.api import beam_runner_api_pb2
from apache_beam.transforms import timeutil
from apache_beam.transforms.timeutil import Duration
from apache_beam.transforms.timeutil import MAX_TIMESTAMP
from apache_beam.transforms.timeutil import MIN_TIMESTAMP
from apache_beam.transforms.timeutil import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
# TODO(ccy): revisit naming and semantics once Java Apache Beam finalizes their
# behavior.
class OutputTimeFn(object):
"""Determines how output timestamps of grouping operations are assigned."""
OUTPUT_AT_EOW = beam_runner_api_pb2.END_OF_WINDOW
OUTPUT_AT_EARLIEST = beam_runner_api_pb2.EARLIEST_IN_PANE
OUTPUT_AT_LATEST = beam_runner_api_pb2.LATEST_IN_PANE
# TODO(robertwb): Add this to the runner API or remove it.
OUTPUT_AT_EARLIEST_TRANSFORMED = 'OUTPUT_AT_EARLIEST_TRANSFORMED'
@staticmethod
def get_impl(output_time_fn, window_fn):
if output_time_fn == OutputTimeFn.OUTPUT_AT_EOW:
return timeutil.OutputAtEndOfWindowImpl()
elif output_time_fn == OutputTimeFn.OUTPUT_AT_EARLIEST:
return timeutil.OutputAtEarliestInputTimestampImpl()
elif output_time_fn == OutputTimeFn.OUTPUT_AT_LATEST:
return timeutil.OutputAtLatestInputTimestampImpl()
elif output_time_fn == OutputTimeFn.OUTPUT_AT_EARLIEST_TRANSFORMED:
return timeutil.OutputAtEarliestTransformedInputTimestampImpl(window_fn)
else:
raise ValueError('Invalid OutputTimeFn: %s.' % output_time_fn)
class WindowFn(object):
"""An abstract windowing function defining a basic assign and merge."""
class AssignContext(object):
"""Context passed to WindowFn.assign()."""
def __init__(self, timestamp, element=None):
self.timestamp = Timestamp.of(timestamp)
self.element = element
def assign(self, assign_context):
"""Associates a timestamp to an element."""
raise NotImplementedError
class MergeContext(object):
"""Context passed to WindowFn.merge() to perform merging, if any."""
def __init__(self, windows):
self.windows = list(windows)
def merge(self, to_be_merged, merge_result):
raise NotImplementedError
def merge(self, merge_context):
"""Returns a window that is the result of merging a set of windows."""
raise NotImplementedError
def is_merging(self):
"""Returns whether this WindowFn merges windows."""
return True
def get_window_coder(self):
return coders.WindowCoder()
def get_transformed_output_time(self, window, input_timestamp): # pylint: disable=unused-argument
"""Given input time and output window, returns output time for window.
If OutputTimeFn.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the Windowing,
the output timestamp for the given window will be the earliest of the
timestamps returned by get_transformed_output_time() for elements of the
window.
Arguments:
window: Output window of element.
input_timestamp: Input timestamp of element as a timeutil.Timestamp
object.
Returns:
Transformed timestamp.
"""
# By default, just return the input timestamp.
return input_timestamp
_known_urns = {}
@classmethod
def register_urn(cls, urn, parameter_type, constructor):
cls._known_urns[urn] = parameter_type, constructor
@classmethod
def from_runner_api(cls, fn_proto, context):
parameter_type, constructor = cls._known_urns[fn_proto.spec.urn]
return constructor(
proto_utils.unpack_Any(fn_proto.spec.parameter, parameter_type),
context)
def to_runner_api(self, context):
urn, typed_param = self.to_runner_api_parameter(context)
return beam_runner_api_pb2.FunctionSpec(
spec=beam_runner_api_pb2.UrnWithParameter(
urn=urn,
parameter=proto_utils.pack_Any(typed_param)))
@staticmethod
def from_runner_api_parameter(fn_parameter, unused_context):
return pickler.loads(fn_parameter.value)
def to_runner_api_parameter(self, context):
return (urns.PICKLED_WINDOW_FN,
wrappers_pb2.BytesValue(value=pickler.dumps(self)))
WindowFn.register_urn(
urns.PICKLED_WINDOW_FN,
wrappers_pb2.BytesValue,
WindowFn.from_runner_api_parameter)
class BoundedWindow(object):
"""A window for timestamps in range (-infinity, end).
Attributes:
end: End of window.
"""
def __init__(self, end):
self.end = Timestamp.of(end)
def max_timestamp(self):
return self.end.predecessor()
def __cmp__(self, other):
# Order first by endpoint, then arbitrarily.
return cmp(self.end, other.end) or cmp(hash(self), hash(other))
def __eq__(self, other):
raise NotImplementedError
def __hash__(self):
return hash(self.end)
def __repr__(self):
return '[?, %s)' % float(self.end)
class IntervalWindow(BoundedWindow):
"""A window for timestamps in range [start, end).
Attributes:
start: Start of window as seconds since Unix epoch.
end: End of window as seconds since Unix epoch.
"""
def __init__(self, start, end):
super(IntervalWindow, self).__init__(end)
self.start = Timestamp.of(start)
def __hash__(self):
return hash((self.start, self.end))
def __eq__(self, other):
return self.start == other.start and self.end == other.end
def __repr__(self):
return '[%s, %s)' % (float(self.start), float(self.end))
def intersects(self, other):
return other.start < self.end or self.start < other.end
def union(self, other):
return IntervalWindow(
min(self.start, other.start), max(self.end, other.end))
class TimestampedValue(object):
"""A timestamped value having a value and a timestamp.
Attributes:
value: The underlying value.
timestamp: Timestamp associated with the value as seconds since Unix epoch.
"""
def __init__(self, value, timestamp):
self.value = value
self.timestamp = Timestamp.of(timestamp)
class GlobalWindow(BoundedWindow):
"""The default window into which all data is placed (via GlobalWindows)."""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(GlobalWindow, cls).__new__(cls)
return cls._instance
def __init__(self):
super(GlobalWindow, self).__init__(MAX_TIMESTAMP)
self.start = MIN_TIMESTAMP
def __repr__(self):
return 'GlobalWindow'
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windows are always and only equal to each other.
return self is other or type(self) is type(other)
class NonMergingWindowFn(WindowFn):
def is_merging(self):
return False
def merge(self, merge_context):
pass # No merging.
class GlobalWindows(NonMergingWindowFn):
"""A windowing function that assigns everything to one global window."""
@classmethod
def windowed_value(cls, value, timestamp=MIN_TIMESTAMP):
return WindowedValue(value, timestamp, (GlobalWindow(),))
def assign(self, assign_context):
return [GlobalWindow()]
def get_window_coder(self):
return coders.GlobalWindowCoder()
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windowfn is always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
@staticmethod
def from_runner_api_parameter(unused_fn_parameter, unused_context):
return GlobalWindows()
def to_runner_api_parameter(self, context):
return urns.GLOBAL_WINDOWS_FN, None
WindowFn.register_urn(
urns.GLOBAL_WINDOWS_FN, None, GlobalWindows.from_runner_api_parameter)
class FixedWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to one time interval.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * size + offset, (N + 1) * size + offset)
Attributes:
size: Size of the window as seconds.
offset: Offset of this window as seconds since Unix epoch. Windows start at
t=N * size + offset where t=0 is the epoch. The offset must be a value
in range [0, size). If it is not it will be normalized to this range.
"""
def __init__(self, size, offset=0):
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.offset = Timestamp.of(offset) % self.size
def assign(self, context):
timestamp = context.timestamp
start = timestamp - (timestamp - self.offset) % self.size
return [IntervalWindow(start, start + self.size)]
def __eq__(self, other):
if type(self) == type(other) == FixedWindows:
return self.size == other.size and self.offset == other.offset
def __ne__(self, other):
return not self == other
@staticmethod
def from_runner_api_parameter(fn_parameter, unused_context):
return FixedWindows(
size=Duration(micros=fn_parameter['size']),
offset=Timestamp(micros=fn_parameter['offset']))
def to_runner_api_parameter(self, context):
return (urns.FIXED_WINDOWS_FN,
proto_utils.pack_Struct(size=self.size.micros,
offset=self.offset.micros))
WindowFn.register_urn(
urns.FIXED_WINDOWS_FN,
struct_pb2.Struct,
FixedWindows.from_runner_api_parameter)
class SlidingWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to a set of sliding windows.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * period + offset, N * period + offset + size)
Attributes:
size: Size of the window as seconds.
period: Period of the windows as seconds.
offset: Offset of this window as seconds since Unix epoch. Windows start at
t=N * period + offset where t=0 is the epoch. The offset must be a value
in range [0, period). If it is not it will be normalized to this range.
"""
def __init__(self, size, period, offset=0):
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.period = Duration.of(period)
self.offset = Timestamp.of(offset) % size
def assign(self, context):
timestamp = context.timestamp
start = timestamp - (timestamp - self.offset) % self.period
return [IntervalWindow(Timestamp.of(s), Timestamp.of(s) + self.size)
for s in range(start, start - self.size, -self.period)]
def __eq__(self, other):
if type(self) == type(other) == SlidingWindows:
return (self.size == other.size
and self.offset == other.offset
and self.period == other.period)
@staticmethod
def from_runner_api_parameter(fn_parameter, unused_context):
return SlidingWindows(
size=Duration(micros=fn_parameter['size']),
offset=Timestamp(micros=fn_parameter['offset']),
period=Duration(micros=fn_parameter['period']))
def to_runner_api_parameter(self, context):
return (urns.SLIDING_WINDOWS_FN,
proto_utils.pack_Struct(
size=self.size.micros,
offset=self.offset.micros,
period=self.period.micros))
WindowFn.register_urn(
urns.SLIDING_WINDOWS_FN,
struct_pb2.Struct,
SlidingWindows.from_runner_api_parameter)
class Sessions(WindowFn):
"""A windowing function that groups elements into sessions.
A session is defined as a series of consecutive events
separated by a specified gap size.
Attributes:
gap_size: Size of the gap between windows as floating-point seconds.
"""
def __init__(self, gap_size):
if gap_size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.gap_size = Duration.of(gap_size)
def assign(self, context):
timestamp = context.timestamp
return [IntervalWindow(timestamp, timestamp + self.gap_size)]
def merge(self, merge_context):
to_merge = []
end = timeutil.MIN_TIMESTAMP
for w in sorted(merge_context.windows, key=lambda w: w.start):
if to_merge:
if end > w.start:
to_merge.append(w)
if w.end > end:
end = w.end
else:
if len(to_merge) > 1:
merge_context.merge(to_merge,
IntervalWindow(to_merge[0].start, end))
to_merge = [w]
end = w.end
else:
to_merge = [w]
end = w.end
if len(to_merge) > 1:
merge_context.merge(to_merge, IntervalWindow(to_merge[0].start, end))
def __eq__(self, other):
if type(self) == type(other) == Sessions:
return self.gap_size == other.gap_size
@staticmethod
def from_runner_api_parameter(fn_parameter, unused_context):
return Sessions(gap_size=Duration(micros=fn_parameter['gap_size']))
def to_runner_api_parameter(self, context):
return (urns.SESSION_WINDOWS_FN,
proto_utils.pack_Struct(gap_size=self.gap_size.micros))
WindowFn.register_urn(
urns.SESSION_WINDOWS_FN,
struct_pb2.Struct,
Sessions.from_runner_api_parameter)
| |
import glob
import os
import distutils.spawn
def local_path(path):
return os.path.join(os.path.dirname(__file__), path)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'TEST_NAME': ':memory:'
}
}
DEBUG = False
SITE_ID = 1
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.auth',
'django.contrib.admin',
'pipeline',
'tests.tests'
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware'
)
ROOT_URLCONF = 'tests.urls'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware'
)
MEDIA_URL = '/media/'
MEDIA_ROOT = local_path('media')
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATIC_ROOT = local_path('static/')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
('pipeline', local_path('assets/')),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
SECRET_KEY = "django-pipeline"
PIPELINE = {
'PIPELINE_ENABLED': True,
'JS_COMPRESSOR': None,
'CSS_COMPRESSOR': None,
'STYLESHEETS': {
'screen': {
'source_filenames': (
'pipeline/css/first.css',
'pipeline/css/second.css',
'pipeline/css/urls.css'
),
'output_filename': 'screen.css'
}
},
'JAVASCRIPT': {
'scripts': {
'source_filenames': (
'pipeline/js/first.js',
'pipeline/js/second.js',
'pipeline/js/application.js',
'pipeline/templates/**/*.jst'
),
'output_filename': 'scripts.js'
},
'scripts_async': {
'source_filenames': (
'pipeline/js/first.js',
'pipeline/js/second.js',
'pipeline/js/application.js',
'pipeline/templates/**/*.jst'
),
'output_filename': 'scripts_async.js',
'extra_context': {
'async': True,
}
},
'scripts_defer': {
'source_filenames': (
'pipeline/js/first.js',
'pipeline/js/second.js',
'pipeline/js/application.js',
'pipeline/templates/**/*.jst'
),
'output_filename': 'scripts_defer.js',
'extra_context': {
'defer': True,
}
},
'scripts_async_defer': {
'source_filenames': (
'pipeline/js/first.js',
'pipeline/js/second.js',
'pipeline/js/application.js',
'pipeline/templates/**/*.jst'
),
'output_filename': 'scripts_async_defer.js',
'extra_context': {
'async': True,
'defer': True,
}
}
}
}
NODE_MODULES_PATH = local_path('node_modules')
NODE_BIN_PATH = os.path.join(NODE_MODULES_PATH, '.bin')
NODE_EXE_PATH = distutils.spawn.find_executable('node')
JAVA_EXE_PATH = distutils.spawn.find_executable('java')
CSSTIDY_EXE_PATH = distutils.spawn.find_executable('csstidy')
HAS_NODE = os.path.exists(NODE_BIN_PATH) and NODE_EXE_PATH
HAS_JAVA = bool(JAVA_EXE_PATH)
HAS_CSSTIDY = bool(CSSTIDY_EXE_PATH)
if HAS_NODE:
def node_exe_path(command):
exe_ext = '.cmd' if os.name == 'nt' else ''
return os.path.join(NODE_BIN_PATH, "%s%s" % (command, exe_ext))
PIPELINE.update({
'SASS_BINARY': node_exe_path('node-sass'),
'COFFEE_SCRIPT_BINARY': node_exe_path('coffee'),
'COFFEE_SCRIPT_ARGUMENTS': ['--no-header'],
'LESS_BINARY': node_exe_path('lessc'),
'BABEL_BINARY': node_exe_path('babel'),
'BABEL_ARGUMENTS': ['--presets', 'es2015'],
'STYLUS_BINARY': node_exe_path('stylus'),
'LIVE_SCRIPT_BINARY': node_exe_path('lsc'),
'LIVE_SCRIPT_ARGUMENTS': ['--no-header'],
'YUGLIFY_BINARY': node_exe_path('yuglify'),
'UGLIFYJS_BINARY': node_exe_path('uglifyjs'),
'CSSMIN_BINARY': node_exe_path('cssmin'),
})
if HAS_NODE and HAS_JAVA:
PIPELINE.update({
'CLOSURE_BINARY': [
JAVA_EXE_PATH, '-jar',
os.path.join(NODE_MODULES_PATH, 'google-closure-compiler', 'compiler.jar')],
'YUI_BINARY': [
JAVA_EXE_PATH, '-jar',
glob.glob(os.path.join(NODE_MODULES_PATH, 'yuicompressor', 'build', '*.jar'))[0]]
})
if HAS_CSSTIDY:
PIPELINE.update({'CSSTIDY_BINARY': CSSTIDY_EXE_PATH})
TEMPLATE_DIRS = (
local_path('templates'),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': TEMPLATE_DIRS,
},
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'DIRS': TEMPLATE_DIRS,
'OPTIONS': {
'extensions': ['pipeline.jinja2.PipelineExtension']
}
}
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'pipeline.templatetags.pipeline': {
'handlers': ['console'],
'level': 'ERROR',
},
},
}
| |
#!/usr/bin/env python3
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import glob
import os
import re
import argparse
from typing import Tuple
NAME_TO_PROP_AND_SUBPROP = {
"unreach-call": ("unreach-call.prp", None),
"cover-error": ("coverage-error-call.prp", None),
"unreach-label": ("ALL.prp", None),
"termination": ("termination.prp", None),
"no-overflow": ("no-overflow.prp", None),
"valid-memcleanup": ("valid-memcleanup.prp", None),
"valid-memsafety": ("valid-memsafety.prp", None),
"valid-deref": ("valid-memsafety.prp", "valid-deref"),
"valid-free": ("valid-memsafety.prp", "valid-free"),
"valid-memtrack": ("valid-memsafety.prp", "valid-memtrack"),
"def-behavior": ("def-behavior.prp", None),
}
CANDIDATE_REGEX = re.compile(r".*\.(c|i)")
DUMMY_SET = "__NO_SET__"
"""Dummy set name used for C files given on the command-line."""
def _get_prop(property_file, property_dir, task_dir):
return os.path.relpath(os.path.join(property_dir, property_file), task_dir)
def handle_c(task_file, args) -> Tuple[str, dict]:
"""Create yml task definition for the given file.
Return a tuple of a recommended new task name and the yml info as dictionary.
"""
properties = []
name_pcs_dot = task_file.split(".")
new_name_pcs_dot = []
for pd in name_pcs_dot:
name_pcs = pd.split("_")
new_name_pcs = []
for p in name_pcs:
offset = 0
for name, prop in NAME_TO_PROP_AND_SUBPROP.items():
if name not in p:
continue # with next name_pc p
if p.startswith("true"):
expected = "true"
offset += len("true-")
elif p.startswith("false"):
expected = "false"
offset += len("false-")
elif p.startswith("unknown-"):
expected = None
offset += len("unknown-")
else:
continue # with next name_pc p
properties.append((prop, expected))
offset += len(name)
break # for-loop over properties once one matches, because they are distinct
new_p = p[offset:]
if new_p or offset == 0:
new_name_pcs.append(new_p)
new_name_pcs_dot.append("_".join(new_name_pcs))
yml_info = (task_file, properties)
if args.change_filename:
new_task_file = ".".join(new_name_pcs_dot)
if new_task_file[-4:] == ".c.i":
new_task_file = new_task_file[:-4] + ".i"
else:
new_task_file = task_file
return new_task_file, yml_info
def parse_args():
parser = argparse.ArgumentParser(
description="Script to transform old-style benchexec benchmark tasks with property and verdict in file name to new yml-based task-info style"
)
parser.add_argument(
"--prop-dir",
dest="prop_dir",
type=str,
default="properties/",
required=False,
help="directory that contains program properties to link to",
)
parser.add_argument(
"files",
metavar="file",
nargs="+",
help=".set files that contain task lists or C files to create yml for",
)
parser.add_argument(
"--no-change-filename",
dest="change_filename",
action="store_false",
default=True,
help="do not shorten file names by removing the verdicts and avoiding collissions, but keep name as-is",
)
parser.add_argument(
"--no-collisions-across-directories",
dest="collisions_across_dirs",
action="store_true",
default=True,
help="do not avoid same file names across different directories, but only within same directories",
)
return parser.parse_args()
sets_to_tasks = {}
all_tasks = set()
if __name__ == "__main__":
args = parse_args()
prop_dir = args.prop_dir
verification_set_files = [f for f in args.files if f.endswith(".set")]
other_files = [f for f in args.files if f not in verification_set_files]
for verification_set in verification_set_files:
sets_to_tasks[verification_set] = []
with open(verification_set, "r") as inp:
for line in inp.readlines():
line = line.strip()
if not line:
continue
if "*" in line:
sets_to_tasks[verification_set].append("## " + line)
for task in sorted(filter(CANDIDATE_REGEX.match, glob.iglob(line))):
all_tasks.add(task)
sets_to_tasks[verification_set].append(task)
sets_to_tasks[DUMMY_SET] = other_files
all_tasks = all_tasks.union(set(other_files))
tasks_to_new_names_and_yml = {}
for task_file in all_tasks:
# check whether preprocessed .i file exists for current .c file
if task_file[-1] == "c" and (
glob.glob(task_file[:-1] + "i") or glob.glob(task_file + ".i")
):
print("Redundant file:", task_file)
continue
new_task_file, yml_info = handle_c(task_file, args)
tasks_to_new_names_and_yml[task_file] = [new_task_file, yml_info]
# sort tasks by their new names to be deterministic
sorted_tasks_to_new_names = sorted(
tasks_to_new_names_and_yml.items(), key=lambda e: e[1][0]
)
for old_name, new_info in sorted_tasks_to_new_names:
assert len(new_info) == 2
curr_task = new_info[0]
yml_info = new_info[1]
def _compute_collisions(curr_task, tasks_to_new_names_and_yml):
task_basename = os.path.basename(curr_task)
if args.collisions_across_dirs:
collisions = [
k
for k, v in tasks_to_new_names_and_yml.items()
if os.path.basename(v[0]).lower()[:-1] == task_basename.lower()[:-1]
and k != old_name
]
else:
collisions = [
k
for k, v in tasks_to_new_names_and_yml.items()
if v[0].lower()[:-1] == task_basename.lower()[:-1] and k != old_name
]
return collisions
# store original colissions for rename
collisions = _compute_collisions(curr_task, tasks_to_new_names_and_yml)
counter = 1
while _compute_collisions(curr_task, tasks_to_new_names_and_yml):
curr_task = f"{curr_task[:-2]}-{counter}{curr_task[-2:]}"
counter += 1
tasks_to_new_names_and_yml[old_name][0] = curr_task
for other in collisions:
new_name = tasks_to_new_names_and_yml[other][0]
while _compute_collisions(new_name, tasks_to_new_names_and_yml):
new_name = f"{new_name[:-2]}-{counter}{new_name[-2:]}"
counter += 1
tasks_to_new_names_and_yml[other][0] = new_name
task_basename = os.path.basename(curr_task)
yml_content = "format_version: '1.0'\n"
yml_content += "\n"
if args.change_filename:
yml_content += f"# old file name: {os.path.basename(old_name)}\n"
yml_content += f"input_files: '{task_basename}'\n"
yml_content += "\n"
task_dir = os.path.dirname(curr_task)
if not yml_info[1]:
yml_content += "properties: []\n"
else:
yml_content += "properties:\n"
for prop, expected in sorted(yml_info[1], key=lambda p: p[0][0]):
prop_file = _get_prop(prop[0], prop_dir, task_dir)
yml_content += f" - property_file: {prop_file}\n"
if expected:
yml_content += f" expected_verdict: {expected}\n"
if prop[1]:
yml_content += f" subproperty: {prop[1]}\n"
if "unreach-call" in prop_file and expected == "false":
prop_file = _get_prop(
NAME_TO_PROP_AND_SUBPROP["cover-error"][0], prop_dir, task_dir
)
yml_content += f" - property_file: {prop_file}\n"
yml_file = curr_task[:-2] + ".yml"
with open(yml_file, "w+") as outp:
outp.write(yml_content)
if old_name != curr_task:
os.rename(old_name, curr_task)
if old_name[-1] == "i":
# *.i -> *.c
if os.path.exists(old_name[:-1] + "c"):
old_c = old_name[:-1] + "c"
# *.c.i -> *.c
elif os.path.exists(old_name[:-2]):
old_c = old_name[:-2]
else:
old_c = None
if old_c:
assert old_c not in all_tasks
curr_task_name = os.path.basename(curr_task)
new_c_name = os.path.join(
os.path.dirname(old_c), curr_task_name[:-1] + "c"
)
os.rename(old_c, new_c_name)
for content in sets_to_tasks.values():
try:
idx = content.index(old_name)
content[idx] = yml_file
except ValueError:
pass
for task_set, content in sets_to_tasks.items():
new_content = []
remaining = set(content)
glob_suffix = "*"
for task in content:
if task not in remaining or task.startswith("#"):
continue
# get last occurrence of '/'
try:
last_pathsep = task.rindex("/")
prefix_len = last_pathsep + 1
except ValueError:
prefix_len = 0
prefix = task[:prefix_len]
globbed_tasks = glob.glob(prefix + glob_suffix)
globbed_tasks = [t for t in globbed_tasks if t.endswith(".yml")]
assert len(globbed_tasks) > 0
globbed_tasks = glob.glob(prefix + glob_suffix)
globbed_tasks = [t for t in globbed_tasks if t.endswith(".yml")]
new_content.append(prefix + "*.yml")
remaining -= set(globbed_tasks)
if task_set != DUMMY_SET:
with open(task_set, "w+") as outp:
outp.writelines(f"{line}\n" for line in new_content)
| |
"""
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use
the WKT constructors.
"""
import re
from decimal import Decimal
from django.db.backends.oracle.base import DatabaseOperations
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.utils import SpatialFunction
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.utils import six
class SDOOperation(SpatialFunction):
"Base class for SDO* Oracle operations."
sql_template = "%(function)s(%(geo_col)s, %(geometry)s) %(operator)s '%(result)s'"
def __init__(self, func, **kwargs):
kwargs.setdefault('operator', '=')
kwargs.setdefault('result', 'TRUE')
super(SDOOperation, self).__init__(func, **kwargs)
class SDODistance(SpatialFunction):
"Class for Distance queries."
sql_template = ('%(function)s(%(geo_col)s, %(geometry)s, %(tolerance)s) '
'%(operator)s %(result)s')
dist_func = 'SDO_GEOM.SDO_DISTANCE'
def __init__(self, op, tolerance=0.05):
super(SDODistance, self).__init__(self.dist_func,
tolerance=tolerance,
operator=op, result='%s')
class SDODWithin(SpatialFunction):
dwithin_func = 'SDO_WITHIN_DISTANCE'
sql_template = "%(function)s(%(geo_col)s, %(geometry)s, %%s) = 'TRUE'"
def __init__(self):
super(SDODWithin, self).__init__(self.dwithin_func)
class SDOGeomRelate(SpatialFunction):
"Class for using SDO_GEOM.RELATE."
relate_func = 'SDO_GEOM.RELATE'
sql_template = ("%(function)s(%(geo_col)s, '%(mask)s', %(geometry)s, "
"%(tolerance)s) %(operator)s '%(mask)s'")
def __init__(self, mask, tolerance=0.05):
# SDO_GEOM.RELATE(...) has a peculiar argument order: column, mask, geom, tolerance.
# Moreover, the runction result is the mask (e.g., 'DISJOINT' instead of 'TRUE').
super(SDOGeomRelate, self).__init__(self.relate_func, operator='=',
mask=mask, tolerance=tolerance)
class SDORelate(SpatialFunction):
"Class for using SDO_RELATE."
masks = 'TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|CONTAINS|COVERS|ANYINTERACT|ON'
mask_regex = re.compile(r'^(%s)(\+(%s))*$' % (masks, masks), re.I)
sql_template = "%(function)s(%(geo_col)s, %(geometry)s, 'mask=%(mask)s') = 'TRUE'"
relate_func = 'SDO_RELATE'
def __init__(self, mask):
if not self.mask_regex.match(mask):
raise ValueError('Invalid %s mask: "%s"' % (self.relate_func, mask))
super(SDORelate, self).__init__(self.relate_func, mask=mask)
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
class OracleOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = "django.contrib.gis.db.backends.oracle.compiler"
name = 'oracle'
oracle = True
valid_aggregates = {'Union', 'Extent'}
Adapter = OracleSpatialAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'SDO_GEOM.SDO_AREA'
gml = 'SDO_UTIL.TO_GMLGEOMETRY'
centroid = 'SDO_GEOM.SDO_CENTROID'
difference = 'SDO_GEOM.SDO_DIFFERENCE'
distance = 'SDO_GEOM.SDO_DISTANCE'
extent = 'SDO_AGGR_MBR'
intersection = 'SDO_GEOM.SDO_INTERSECTION'
length = 'SDO_GEOM.SDO_LENGTH'
num_geom = 'SDO_UTIL.GETNUMELEM'
num_points = 'SDO_UTIL.GETNUMVERTICES'
perimeter = length
point_on_surface = 'SDO_GEOM.SDO_POINTONSURFACE'
reverse = 'SDO_UTIL.REVERSE_LINESTRING'
sym_difference = 'SDO_GEOM.SDO_XOR'
transform = 'SDO_CS.TRANSFORM'
union = 'SDO_GEOM.SDO_UNION'
unionagg = 'SDO_AGGR_UNION'
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = 'SDO_UTIL.TO_WKTGEOMETRY(%s)'
distance_functions = {
'distance_gt': (SDODistance('>'), dtypes),
'distance_gte': (SDODistance('>='), dtypes),
'distance_lt': (SDODistance('<'), dtypes),
'distance_lte': (SDODistance('<='), dtypes),
'dwithin': (SDODWithin(), dtypes),
}
geometry_functions = {
'contains': SDOOperation('SDO_CONTAINS'),
'coveredby': SDOOperation('SDO_COVEREDBY'),
'covers': SDOOperation('SDO_COVERS'),
'disjoint': SDOGeomRelate('DISJOINT'),
'intersects': SDOOperation('SDO_OVERLAPBDYINTERSECT'), # TODO: Is this really the same as ST_Intersects()?
'equals': SDOOperation('SDO_EQUAL'),
'exact': SDOOperation('SDO_EQUAL'),
'overlaps': SDOOperation('SDO_OVERLAPS'),
'same_as': SDOOperation('SDO_EQUAL'),
'relate': (SDORelate, six.string_types), # Oracle uses a different syntax, e.g., 'mask=inside+touch'
'touches': SDOOperation('SDO_TOUCH'),
'within': SDOOperation('SDO_INSIDE'),
}
geometry_functions.update(distance_functions)
gis_terms = set(['isnull'])
gis_terms.update(geometry_functions)
truncate_params = {'relate': None}
def get_db_converters(self, internal_type):
converters = super(OracleOperations, self).get_db_converters(internal_type)
geometry_fields = (
'PointField', 'GeometryField', 'LineStringField',
'PolygonField', 'MultiPointField', 'MultiLineStringField',
'MultiPolygonField', 'GeometryCollectionField', 'GeomField',
'GMLField',
)
if internal_type in geometry_fields:
converters.append(self.convert_textfield_value)
return converters
def convert_extent(self, clob):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = Geometry(clob.read())
gtype = str(ext_geom.geom_type)
if gtype == 'Polygon':
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == 'Point':
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception('Unexpected geometry type returned for extent: %s' % gtype)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def convert_geom(self, clob, geo_field):
if clob:
return Geometry(clob.read(), geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return 'MDSYS.SDO_GEOMETRY'
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
# dwithin lookups on Oracle require a special string parameter
# that starts with "distance=".
if lookup_type == 'dwithin':
dist_param = 'distance=%s' % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
SDO_CS.TRANSFORM() function call.
"""
if value is None:
return 'NULL'
def transform_value(val, srid):
return val.srid != srid
if hasattr(value, 'expression'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
return placeholder % self.get_expression_column(value)
else:
if transform_value(value, f.srid):
return '%s(SDO_GEOMETRY(%%s, %s), %s)' % (self.transform, value.srid, f.srid)
else:
return 'SDO_GEOMETRY(%%s, %s)' % f.srid
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"Returns the SQL WHERE clause for use in Oracle spatial SQL construction."
geo_col, db_type = lvalue
# See if an Oracle Geometry function matches the lookup type next
lookup_info = self.geometry_functions.get(lookup_type, False)
if lookup_info:
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# 'dwithin' lookup types.
if isinstance(lookup_info, tuple):
# First element of tuple is lookup type, second element is the type
# of the expected argument (e.g., str, float)
sdo_op, arg_type = lookup_info
geom = value[0]
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, tuple):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
if len(value) != 2:
raise ValueError('2-element tuple required for %s lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
if lookup_type == 'relate':
# The SDORelate class handles construction for these queries,
# and verifies the mask argument.
return sdo_op(value[1]).as_sql(geo_col, self.get_geom_placeholder(field, geom))
else:
# Otherwise, just call the `as_sql` method on the SDOOperation instance.
return sdo_op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
else:
# Lookup info is a SDOOperation instance, whose `as_sql` method returns
# the SQL necessary for the geometry function call. For example:
# SDO_CONTAINS("geoapp_country"."poly", SDO_GEOMTRY('POINT(5 23)', 4326)) = 'TRUE'
return lookup_info.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, ('' if value else 'NOT ')), []
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__.lower()
if agg_name == 'union':
agg_name += 'agg'
if agg.is_extent:
sql_template = '%(function)s(%(field)s)'
else:
sql_template = '%(function)s(SDOAGGRTYPE(%(field)s,%(tolerance)s))'
sql_function = getattr(self, agg_name)
return self.select % sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import OracleGeometryColumns
return OracleGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys
return OracleSpatialRefSys
def modify_insert_params(self, placeholders, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888
"""
# This code doesn't work for bulk insert cases.
assert len(placeholders) == 1
return [[param for pholder, param
in six.moves.zip(placeholders[0], params[0]) if pholder != 'NULL'], ]
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import story
from telemetry import page as page_module
from telemetry import value
class TestBase(unittest.TestCase):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page_module.Page("http://www.bar.com/", story_set, story_set.base_dir))
story_set.AddStory(
page_module.Page("http://www.baz.com/", story_set, story_set.base_dir))
story_set.AddStory(
page_module.Page("http://www.foo.com/", story_set, story_set.base_dir))
self.story_set = story_set
@property
def pages(self):
return self.story_set.stories
class ValueForTest(value.Value):
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
pass
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
pass
def GetBuildbotDataType(self, output_context):
pass
def GetBuildbotValue(self):
pass
def GetChartAndTraceNameForComputedSummaryResult(
self, trace_tag):
pass
def GetRepresentativeNumber(self):
pass
def GetRepresentativeString(self):
pass
@staticmethod
def GetJSONTypeName():
pass
class ValueForAsDictTest(ValueForTest):
@staticmethod
def GetJSONTypeName():
return 'baz'
class ValueForFromDictTest(ValueForTest):
@staticmethod
def FromDict(value_dict, page_dict):
kwargs = value.Value.GetConstructorKwArgs(value_dict, page_dict)
return ValueForFromDictTest(**kwargs)
@staticmethod
def GetJSONTypeName():
return 'value_for_from_dict_test'
class ValueTest(TestBase):
def testCompat(self):
page0 = self.pages[0]
page1 = self.pages[0]
a = value.Value(page0, 'x', 'unit', important=False, description=None,
tir_label='foo', grouping_keys=None)
b = value.Value(page1, 'x', 'unit', important=False, description=None,
tir_label='foo', grouping_keys=None)
self.assertTrue(b.IsMergableWith(a))
a = value.Value(page0, 'x', 'unit', important=False, description=None,
tir_label='foo', grouping_keys=None)
b = value.Value(page0, 'x', 'unit', important=False, description=None,
tir_label='bar', grouping_keys=None)
self.assertTrue(b.IsMergableWith(a))
def testIncompat(self):
page0 = self.pages[0]
a = value.Value(page0, 'x', 'unit', important=False, description=None,
tir_label=None, grouping_keys=None)
b = value.Value(page0, 'x', 'incompatUnit', important=False,
tir_label=None, description=None, grouping_keys=None)
self.assertFalse(b.IsMergableWith(a))
a = value.Value(page0, 'x', 'unit', important=False, description=None,
tir_label=None, grouping_keys=None)
b = value.Value(page0, 'x', 'unit', important=True, description=None,
tir_label=None, grouping_keys=None)
self.assertFalse(b.IsMergableWith(a))
a = value.Value(page0, 'x', 'unit', important=False, description=None,
tir_label=None, grouping_keys=None)
b = ValueForTest(page0, 'x', 'unit', important=True, description=None,
tir_label=None, grouping_keys=None)
self.assertFalse(b.IsMergableWith(a))
def testNameMustBeString(self):
with self.assertRaises(ValueError):
value.Value(None, 42, 'unit', important=False, description=None,
tir_label=None, grouping_keys=None)
def testUnitsMustBeString(self):
with self.assertRaises(ValueError):
value.Value(None, 'x', 42, important=False, description=None,
tir_label=None, grouping_keys=None)
def testImportantMustBeBool(self):
with self.assertRaises(ValueError):
value.Value(None, 'x', 'unit', important='foo', description=None,
tir_label=None, grouping_keys=None)
def testDescriptionMustBeStringOrNone(self):
with self.assertRaises(ValueError):
value.Value(None, 'x', 'unit', important=False, description=42,
tir_label=None, grouping_keys=None)
def testInteractionRecordMustBeStringOrNone(self):
with self.assertRaises(ValueError):
value.Value(None, 'x', 'unit', important=False, description=None,
tir_label=42, grouping_keys=None)
def testGroupingKeysMustBeDictOrNone(self):
with self.assertRaises(ValueError):
value.Value(None, 'x', 'unit', important=False, description=None,
tir_label=42, grouping_keys='foo')
def testAsDictBaseKeys(self):
v = ValueForAsDictTest(None, 'x', 'unit', important=True, description=None,
tir_label='bar', grouping_keys={'foo': 'baz'})
d = v.AsDict()
self.assertEquals(d, {
'name': 'x',
'type': 'baz',
'units': 'unit',
'important': True,
'tir_label': 'bar',
'grouping_keys': {'foo': 'baz'}
})
def testAsDictWithPage(self):
page0 = self.pages[0]
v = ValueForAsDictTest(page0, 'x', 'unit', important=False,
description=None, tir_label=None, grouping_keys=None)
d = v.AsDict()
self.assertIn('page_id', d)
def testAsDictWithoutPage(self):
v = ValueForAsDictTest(None, 'x', 'unit', important=False, description=None,
tir_label=None, grouping_keys=None)
d = v.AsDict()
self.assertNotIn('page_id', d)
def testAsDictWithDescription(self):
v = ValueForAsDictTest(None, 'x', 'unit', important=False,
description='Some description.',
tir_label=None, grouping_keys=None)
d = v.AsDict()
self.assertEqual('Some description.', d['description'])
def testAsDictWithoutDescription(self):
v = ValueForAsDictTest(None, 'x', 'unit', important=False, description=None,
tir_label=None, grouping_keys=None)
self.assertNotIn('description', v.AsDict())
def testAsDictWithInteractionRecord(self):
v = ValueForAsDictTest(None, 'x', 'unit', important=False,
description='Some description.',
tir_label='foo', grouping_keys=None)
d = v.AsDict()
self.assertEqual('foo', d['tir_label'])
def testAsDictWithoutInteractionRecord(self):
v = ValueForAsDictTest(None, 'x', 'unit', important=False, description=None,
tir_label=None, grouping_keys=None)
self.assertNotIn('tir_label', v.AsDict())
def testFromDictBaseKeys(self):
d = {
'type': 'value_for_from_dict_test',
'name': 'x',
'units': 'unit'
}
v = value.Value.FromDict(d, None)
self.assertEquals(v.name, 'x')
self.assertTrue(isinstance(v, ValueForFromDictTest))
self.assertEquals(v.units, 'unit')
def testFromDictWithPage(self):
page0 = self.pages[0]
page_dict = {page0.id: page0}
d = {
'type': 'value_for_from_dict_test',
'name': 'x',
'units': 'unit',
'page_id': page0.id
}
v = value.Value.FromDict(d, page_dict)
self.assertEquals(v.page.id, page0.id)
def testFromDictWithPageId0(self):
page_dict = {0: 'foo'}
d = {
'type': 'value_for_from_dict_test',
'name': 'x',
'units': 'unit',
'page_id': 0
}
v = value.Value.FromDict(d, page_dict)
self.assertEquals(v.page, 'foo')
def testFromDictWithoutPage(self):
d = {
'type': 'value_for_from_dict_test',
'name': 'x',
'units': 'unit'
}
v = value.Value.FromDict(d, {})
self.assertEquals(v.page, None)
def testFromDictWithDescription(self):
d = {
'type': 'value_for_from_dict_test',
'name': 'x',
'units': 'unit',
'description': 'foo'
}
v = value.Value.FromDict(d, {})
self.assertEquals(v.description, 'foo')
def testFromDictWithoutDescription(self):
d = {
'type': 'value_for_from_dict_test',
'name': 'x',
'units': 'unit'
}
v = value.Value.FromDict(d, {})
self.assertEquals(v.description, None)
def testFromDictWithInteractionRecord(self):
d = {
'type': 'value_for_from_dict_test',
'name': 'x',
'units': 'unit',
'description': 'foo',
'tir_label': 'bar'
}
v = value.Value.FromDict(d, {})
self.assertEquals(v.tir_label, 'bar')
def testFromDictWithoutInteractionRecord(self):
d = {
'type': 'value_for_from_dict_test',
'name': 'x',
'units': 'unit'
}
v = value.Value.FromDict(d, {})
self.assertEquals(v.tir_label, None)
def testFromDictWithGroupingKeys(self):
d = {
'type': 'value_for_from_dict_test',
'name': 'x',
'units': 'unit',
'description': 'foo',
'tir_label': 'bar',
'grouping_keys': {'foo': 'bar'}
}
v = value.Value.FromDict(d, {})
self.assertEquals(v.grouping_keys, {'foo': 'bar'})
def testFromDictWithoutGroupingKeys(self):
d = {
'type': 'value_for_from_dict_test',
'name': 'x',
'units': 'unit'
}
v = value.Value.FromDict(d, {})
self.assertEquals(v.grouping_keys, {})
def testListOfValuesFromListOfDicts(self):
d0 = {
'type': 'value_for_from_dict_test',
'name': 'x',
'units': 'unit'
}
d1 = {
'type': 'value_for_from_dict_test',
'name': 'y',
'units': 'unit'
}
vs = value.Value.ListOfValuesFromListOfDicts([d0, d1], {})
self.assertEquals(vs[0].name, 'x')
self.assertEquals(vs[1].name, 'y')
def testMergedTirLabelForSameLabel(self):
v = ValueForTest(None, 'foo', 'ms', False, 'd', 'bar', {})
tir_label = value.MergedTirLabel([v, v])
self.assertEquals(tir_label, 'bar')
def testMergedTirLabelForDifferentLabels(self):
v0 = ValueForTest(None, 'foo', 'ms', False, 'd', 'bar', {})
v1 = ValueForTest(None, 'foo', 'ms', False, 'd', 'baz', {})
tir_label = value.MergedTirLabel([v0, v1])
self.assertIsNone(tir_label)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from ..utils.cmake import CMakeDefinition
def truthifier(value):
return "ON" if value else "OFF"
def or_else(value, default):
return value if value else default
def coalesce(value, fallback):
return fallback if value is None else value
LLVM_VERSION = 7
class CppConfiguration:
def __init__(self,
# toolchain
cc=None, cxx=None, cxx_flags=None,
build_type=None, warn_level=None,
cpp_package_prefix=None, install_prefix=None, use_conda=None,
build_static=True, build_shared=True, build_unity=True,
# tests & examples
with_tests=None, with_benchmarks=None, with_examples=None,
with_integration=None,
# static checks
use_asan=None, use_tsan=None, use_ubsan=None,
with_fuzzing=None,
# Components
with_compute=None, with_csv=None, with_cuda=None,
with_dataset=None, with_filesystem=None, with_flight=None,
with_gandiva=None, with_hdfs=None, with_hiveserver2=None,
with_ipc=True, with_json=None, with_jni=None,
with_mimalloc=None, with_jemalloc=None,
with_parquet=None, with_plasma=None, with_python=True,
with_r=None, with_s3=None,
# Compressions
with_brotli=None, with_bz2=None, with_lz4=None,
with_snappy=None, with_zlib=None, with_zstd=None,
# extras
with_lint_only=False,
use_gold_linker=True,
simd_level="DEFAULT",
cmake_extras=None):
self._cc = cc
self._cxx = cxx
self.cxx_flags = cxx_flags
self._build_type = build_type
self.warn_level = warn_level
self._install_prefix = install_prefix
self._package_prefix = cpp_package_prefix
self._use_conda = use_conda
self.build_static = build_static
self.build_shared = build_shared
self.build_unity = build_unity
self.with_tests = with_tests
self.with_benchmarks = with_benchmarks
self.with_examples = with_examples
self.with_integration = with_integration
self.use_asan = use_asan
self.use_tsan = use_tsan
self.use_ubsan = use_ubsan
self.with_fuzzing = with_fuzzing
self.with_compute = with_compute
self.with_csv = with_csv
self.with_cuda = with_cuda
self.with_dataset = with_dataset
self.with_filesystem = with_filesystem
self.with_flight = with_flight
self.with_gandiva = with_gandiva
self.with_hdfs = with_hdfs
self.with_hiveserver2 = with_hiveserver2
self.with_ipc = with_ipc
self.with_json = with_json
self.with_jni = with_jni
self.with_mimalloc = with_mimalloc
self.with_jemalloc = with_jemalloc
self.with_parquet = with_parquet
self.with_plasma = with_plasma
self.with_python = with_python
self.with_r = with_r
self.with_s3 = with_s3
self.with_brotli = with_brotli
self.with_bz2 = with_bz2
self.with_lz4 = with_lz4
self.with_snappy = with_snappy
self.with_zlib = with_zlib
self.with_zstd = with_zstd
self.with_lint_only = with_lint_only
self.use_gold_linker = use_gold_linker
self.simd_level = simd_level
self.cmake_extras = cmake_extras
# Fixup required dependencies by providing sane defaults if the caller
# didn't specify the option.
if self.with_r:
self.with_csv = coalesce(with_csv, True)
self.with_dataset = coalesce(with_dataset, True)
self.with_filesystem = coalesce(with_filesystem, True)
self.with_ipc = coalesce(with_ipc, True)
self.with_json = coalesce(with_json, True)
self.with_parquet = coalesce(with_parquet, True)
if self.with_python:
self.with_zlib = coalesce(with_zlib, True)
self.with_lz4 = coalesce(with_lz4, True)
if self.with_dataset:
self.with_filesystem = coalesce(with_filesystem, True)
self.with_parquet = coalesce(with_parquet, True)
if self.with_parquet:
self.with_snappy = coalesce(with_snappy, True)
@property
def build_type(self):
if self._build_type:
return self._build_type
if self.with_fuzzing:
return "relwithdebinfo"
return "release"
@property
def cc(self):
if self._cc:
return self._cc
if self.with_fuzzing:
return "clang-{}".format(LLVM_VERSION)
return None
@property
def cxx(self):
if self._cxx:
return self._cxx
if self.with_fuzzing:
return "clang++-{}".format(LLVM_VERSION)
return None
def _gen_defs(self):
if self.cxx_flags:
yield ("ARROW_CXXFLAGS", self.cxx_flags)
yield ("CMAKE_EXPORT_COMPILE_COMMANDS", truthifier(True))
yield ("CMAKE_BUILD_TYPE", self.build_type)
if not self.with_lint_only:
yield ("BUILD_WARNING_LEVEL",
or_else(self.warn_level, "production"))
# if not ctx.quiet:
# yield ("ARROW_VERBOSE_THIRDPARTY_BUILD", "ON")
maybe_prefix = self.install_prefix
if maybe_prefix:
yield ("CMAKE_INSTALL_PREFIX", maybe_prefix)
if self._package_prefix is not None:
yield ("ARROW_DEPENDENCY_SOURCE", "SYSTEM")
yield ("ARROW_PACKAGE_PREFIX", self._package_prefix)
yield ("ARROW_BUILD_STATIC", truthifier(self.build_static))
yield ("ARROW_BUILD_SHARED", truthifier(self.build_shared))
yield ("CMAKE_UNITY_BUILD", truthifier(self.build_unity))
# Tests and benchmarks
yield ("ARROW_BUILD_TESTS", truthifier(self.with_tests))
yield ("ARROW_BUILD_BENCHMARKS", truthifier(self.with_benchmarks))
yield ("ARROW_BUILD_EXAMPLES", truthifier(self.with_examples))
yield ("ARROW_BUILD_INTEGRATION", truthifier(self.with_integration))
# Static checks
yield ("ARROW_USE_ASAN", truthifier(self.use_asan))
yield ("ARROW_USE_TSAN", truthifier(self.use_tsan))
yield ("ARROW_USE_UBSAN", truthifier(self.use_ubsan))
yield ("ARROW_FUZZING", truthifier(self.with_fuzzing))
# Components
yield ("ARROW_COMPUTE", truthifier(self.with_compute))
yield ("ARROW_CSV", truthifier(self.with_csv))
yield ("ARROW_CUDA", truthifier(self.with_cuda))
yield ("ARROW_DATASET", truthifier(self.with_dataset))
yield ("ARROW_FILESYSTEM", truthifier(self.with_filesystem))
yield ("ARROW_FLIGHT", truthifier(self.with_flight))
yield ("ARROW_GANDIVA", truthifier(self.with_gandiva))
yield ("ARROW_PARQUET", truthifier(self.with_parquet))
yield ("ARROW_HDFS", truthifier(self.with_hdfs))
yield ("ARROW_HIVESERVER2", truthifier(self.with_hiveserver2))
yield ("ARROW_IPC", truthifier(self.with_ipc))
yield ("ARROW_JSON", truthifier(self.with_json))
yield ("ARROW_JNI", truthifier(self.with_jni))
yield ("ARROW_MIMALLOC", truthifier(self.with_mimalloc))
yield ("ARROW_JEMALLOC", truthifier(self.with_jemalloc))
yield ("ARROW_PLASMA", truthifier(self.with_plasma))
yield ("ARROW_PYTHON", truthifier(self.with_python))
yield ("ARROW_S3", truthifier(self.with_s3))
# Compressions
yield ("ARROW_WITH_BROTLI", truthifier(self.with_brotli))
yield ("ARROW_WITH_BZ2", truthifier(self.with_bz2))
yield ("ARROW_WITH_LZ4", truthifier(self.with_lz4))
yield ("ARROW_WITH_SNAPPY", truthifier(self.with_snappy))
yield ("ARROW_WITH_ZLIB", truthifier(self.with_zlib))
yield ("ARROW_WITH_ZSTD", truthifier(self.with_zstd))
yield ("ARROW_LINT_ONLY", truthifier(self.with_lint_only))
# Some configurations don't like gnu gold linker.
broken_with_gold_ld = [self.with_fuzzing, self.with_gandiva]
if self.use_gold_linker and not any(broken_with_gold_ld):
yield ("ARROW_USE_LD_GOLD", truthifier(self.use_gold_linker))
yield ("ARROW_SIMD_LEVEL", or_else(self.simd_level, "DEFAULT"))
# Detect custom conda toolchain
if self.use_conda:
for d, v in [('CMAKE_AR', 'AR'), ('CMAKE_RANLIB', 'RANLIB')]:
v = os.environ.get(v)
if v:
yield (d, v)
@property
def install_prefix(self):
if self._install_prefix:
return self._install_prefix
if self.use_conda:
return os.environ.get("CONDA_PREFIX")
return None
@property
def use_conda(self):
# If the user didn't specify a preference, guess via environment
if self._use_conda is None:
return os.environ.get("CONDA_PREFIX") is not None
return self._use_conda
@property
def definitions(self):
extras = list(self.cmake_extras) if self.cmake_extras else []
definitions = ["-D{}={}".format(d[0], d[1]) for d in self._gen_defs()]
return definitions + extras
@property
def environment(self):
env = os.environ.copy()
if self.cc:
env["CC"] = self.cc
if self.cxx:
env["CXX"] = self.cxx
return env
class CppCMakeDefinition(CMakeDefinition):
def __init__(self, source, conf, **kwargs):
self.configuration = conf
super().__init__(source, **kwargs,
definitions=conf.definitions, env=conf.environment,
build_type=conf.build_type)
| |
# -*- coding: utf-8 -*-
"""
Runs script responsible for communication with db
in isolation from view functions.
"""
from src.modele import User,ReviewRequestModel, Review, connect_and_get
from src.modele import connect_and_get as con_get
from src.hashing_ import hash_password,check_password
from src.config import TestConfig
from utilities import manipulate_db
from sqlalchemy.exc import ProgrammingError
import datetime
import time
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class TestConnection(unittest.TestCase):
def test_connection_ok(self):
query = "SELECT * FROM users"
response = connect_and_get(query)
self.assertTrue(response)
@unittest.skip("output clutters stdout")
def test_connection_not_ok(self):
# pawel types wrong query will he break his app?
query = "SELECT ** FROM users"
response = connect_and_get(query)
self.assertFalse(response)
#self.assertRaises(ProgrammingError,connect_and_get,query)
class UserTestSelects(unittest.TestCase):
user = User()
def test_check_pass(self):
"""
Tests if query returns right password for
right people.
"""
# Hugo logs in with valid pass
attempt = self.user.check_pass("Hugo","secret")
self.assertTrue(attempt)
# Hugo makes a mistake, logs in with invalid pass
attempt = self.user.check_pass("Hugo","secret1")
self.assertFalse(attempt)
# Someone who is not registered tries to login
attempt = self.user.check_pass("Marco","secret")
self.assertFalse(attempt)
# try to deal with unicode string
attempt = self.user.check_pass("Hugo",u'secret')
self.assertTrue(attempt)
def test_get_profile(self):
attempt = self.user.get_profile(1)
self.assertIsInstance(attempt,dict)
self.assertEqual(6,len(attempt))
self.assertIn("Hugo", attempt["username"])
self.assertIn(u'I love cats',attempt["about_me"])
# David does not exist but tries to get his profile
attempt = self.user.get_profile("David")
self.assertFalse(attempt)
def test_update_profile(self):
# Hugo wants to change about_me to 'loves dogs'
attempt = self.user.update_profile(1, about_me="I love dogs not cats")
#but then he changes his mind and changes it again to something else
attempt = self.user.update_profile(1, about_me="I love cats")
check_it = self.user.get_profile(1)
self.assertIn("I love cats",check_it["about_me"])
def test_get_uid(self):
attempt = self.user.get_id("Hugo")
self.assertEqual(attempt,1)
attempt = self.user.get_id("Debilo")
self.assertFalse(attempt)
class TestReviewRequest(unittest.TestCase):
req = ReviewRequestModel()
def test_select_user_requests(self):
# Hugo wants to see his requests
attempt = self.req.select_user_requests(1)
# he gets a list of dictionaries...
self.assertIsInstance(attempt,list)
self.assertIsInstance(attempt[0],dict)
# where first item has some title
self.assertIn("Faith and free market",attempt[0]["title"])
# Jurek doesn't exist but wants to see his requests
attempt = self.req.select_user_requests(99)
self.assertFalse(attempt)
def test_parse_all(self):
# John wants to see all requests
attempt = self.req.parse_all(0)
self.assertIsInstance(attempt,list)
self.assertEqual(len(attempt),5)
self.assertIsInstance(attempt[0],dict)
self.assertEqual(len(attempt[0].keys()),8)
self.assertIn(attempt[0]["username"],'Hugo')
# Mary clicks on page two, she needs result with
# offset 5 TO DO
def test_get_request_review(self):
attempt = self.req.get_request_review(102)
#logging.info(attempt)
self.assertIsInstance(attempt,dict)
self.assertIn("abook on love",attempt["title"])
#logging.info("Alice feels fine")
attempt = self.req.get_request_review(1)
self.assertFalse(attempt)
def test_update_review_request(self):
timestamp = datetime.datetime.fromtimestamp(time.time())
attempt = self.req.update_review_request(101,"new title",
"new content","erotic",timestamp)
self.assertTrue(attempt)
check_attempt = self.req.get_request_review(101)
self.assertIn("new title",check_attempt["title"])
self.req.update_review_request(101,"Faith and free market", \
"need a review of a fragment of my article on faith","academic",timestamp)
def test_rate_req_rev(self):
value_before = self.req.get_request_review(101)["rate_req"]
attempt = self.req.rate_req_rev(101)
value_after = self.req.get_request_review(101)["rate_req"]
self.assertEqual(value_before+1,value_after)
#request doesn't exist
attempt = self.req.rate_req_rev(190)
self.assertFalse(attempt)
def test_rate_req_rev_minus(self):
value_before = self.req.get_request_review(101)["rate_req"]
attempt = self.req.rate_req_rev_minus(101)
value_after = self.req.get_request_review(101)["rate_req"]
self.assertEqual(value_before-1,value_after)
def test_add_anonymous_request(self):
data = {'title':"Faith and free market", 'content': 'love, love, content',
'uid':1, 'category':'academic',
'deadline': datetime.datetime.fromtimestamp(time.time()),
'anonymous': True}
addAnonymous = self.req.insert_(data)
attempt = self.req.get_request_review(106)
self.assertTrue(attempt.get('anonymous'))
data = data.copy()
data['anonymous'] = False
addNonAnonymous = self.req.insert_(data)
attempt = self.req.get_request_review(107)
self.assertFalse(attempt.get('anonymous'))
class TestReviewObject(unittest.TestCase):
rev = Review()
def test_get_review(self):
attempt = self.rev.get_review(201)
self.assertIsInstance(attempt,dict)
self.assertIsInstance(attempt.get('revid'),long)
self.assertEqual(attempt["revid"], 201)
self.assertIn("Alice", attempt["reviewer"])
self.assertIn("Hugo", attempt["requesting"])
self.assertIn("title", attempt.keys())
def test_get_users_reviews(self):
#Alice wants to see again her review of Hugo
attempt = self.rev.get_reviews_by_user(2)
self.assertIsInstance(attempt,list)
#... she sees Hugo's name in username field
#self.assertIn(attempt["review_text"],"Well how")
self.assertIn(attempt[0]["title"],'Faith and free market')
self.assertIn(attempt[0]["reviewed"],"Hugo")
self.assertEqual(len(attempt[0]),8)
# she sees her review text
self.assertIn("Well how do I begin",attempt[0]["review_text"])
# Hugo wants to see his reviews but he doesn't
# have any
attempt = self.rev.get_reviews_by_user(1)
self.assertFalse(attempt)
def test_get_reviews_of_user(self):
# Hugo wants to see who wrote a review of his draft
attempt = self.rev.get_reviews_of_user(1)
self.assertIsInstance(attempt,list) #returns a list of responses
self.assertIn("Alice",attempt[0]["reviewer"])
self.assertIn("Faith",attempt[0]['title'])
self.assertIn("Well how do I",attempt[0]['review_text'])
# Don wants to see who reviewed his draft but no one did
attempt = self.rev.get_reviews_of_user(99)
self.assertFalse(attempt)
attempt = self.rev.get_reviews_of_user("hugo")
self.assertEqual(False,attempt)
def test_get_best_reviews(self):
attempt = self.rev.get_best_reviews()
self.assertIsInstance(attempt,list)
# reviews with offset and limit
attempt = self.rev.get_best_reviews(2,2)
self.assertIsInstance(attempt,list)
self.assertEqual(2,len(attempt))
def test_rate_review(self):
before = self.rev.get_review(201).get("rate_review")
do_update = self.rev.rate_review(201)
after = self.rev.get_review(201)["rate_review"]
self.assertEqual(before+1,after)
def test_rate_review_minus(self):
before = self.rev.get_review(201)["rate_review"]
do_update = self.rev.rate_review_minus(201)
after = self.rev.get_review(201)["rate_review"]
self.assertEqual(before-1,after)
def test_anonymous_review(self):
data = {'reqId':101, 'uid':2, 'review_text':'somehwere over the rain',
'rating':5,
'date_written': datetime.datetime.fromtimestamp(time.time()),
'anonymous': True}
self.rev.insert_(data)
attempt = self.rev.get_review(205)
self.assertTrue(attempt.get('anonymous'))
attempt = self.rev.get_reviews_by_user(2)
self.assertTrue(attempt[-1].get('anonymous'))
# delete review
self.rev.delete_review(205)
data2 = data.copy()
data2['anonymous'] = False
self.rev.insert_(data2)
attempt = self.rev.get_review(206)
self.assertFalse(attempt.get('anonymous'))
self.rev.delete_review(206)
if __name__ == "__main__":
manipulate_db.populateDb(TestConfig.DATABASE)
unittest.main()
| |
"""Generate mypy config."""
from __future__ import annotations
import configparser
import io
import os
from pathlib import Path
from typing import Final
from homeassistant.const import REQUIRED_PYTHON_VER
from .model import Config, Integration
# Modules which have type hints which known to be broken.
# If you are an author of component listed here, please fix these errors and
# remove your component from this list to enable type checks.
# Do your best to not add anything new here.
IGNORED_MODULES: Final[list[str]] = [
"homeassistant.components.blueprint.importer",
"homeassistant.components.blueprint.models",
"homeassistant.components.blueprint.websocket_api",
"homeassistant.components.cloud.client",
"homeassistant.components.cloud.http_api",
"homeassistant.components.conversation",
"homeassistant.components.conversation.default_agent",
"homeassistant.components.deconz.alarm_control_panel",
"homeassistant.components.deconz.binary_sensor",
"homeassistant.components.deconz.climate",
"homeassistant.components.deconz.cover",
"homeassistant.components.deconz.fan",
"homeassistant.components.deconz.light",
"homeassistant.components.deconz.lock",
"homeassistant.components.deconz.logbook",
"homeassistant.components.deconz.number",
"homeassistant.components.deconz.sensor",
"homeassistant.components.deconz.siren",
"homeassistant.components.deconz.switch",
"homeassistant.components.denonavr.config_flow",
"homeassistant.components.denonavr.media_player",
"homeassistant.components.denonavr.receiver",
"homeassistant.components.evohome",
"homeassistant.components.evohome.climate",
"homeassistant.components.evohome.water_heater",
"homeassistant.components.google_assistant.helpers",
"homeassistant.components.google_assistant.http",
"homeassistant.components.google_assistant.report_state",
"homeassistant.components.google_assistant.trait",
"homeassistant.components.gree.climate",
"homeassistant.components.gree.switch",
"homeassistant.components.harmony",
"homeassistant.components.harmony.config_flow",
"homeassistant.components.harmony.data",
"homeassistant.components.hassio",
"homeassistant.components.hassio.auth",
"homeassistant.components.hassio.binary_sensor",
"homeassistant.components.hassio.ingress",
"homeassistant.components.hassio.sensor",
"homeassistant.components.hassio.system_health",
"homeassistant.components.hassio.websocket_api",
"homeassistant.components.here_travel_time.sensor",
"homeassistant.components.home_plus_control",
"homeassistant.components.home_plus_control.api",
"homeassistant.components.homekit.aidmanager",
"homeassistant.components.homekit.config_flow",
"homeassistant.components.homekit.util",
"homeassistant.components.honeywell.climate",
"homeassistant.components.icloud",
"homeassistant.components.icloud.account",
"homeassistant.components.icloud.device_tracker",
"homeassistant.components.icloud.sensor",
"homeassistant.components.influxdb",
"homeassistant.components.input_datetime",
"homeassistant.components.izone.climate",
"homeassistant.components.konnected",
"homeassistant.components.konnected.config_flow",
"homeassistant.components.kostal_plenticore.helper",
"homeassistant.components.kostal_plenticore.select",
"homeassistant.components.kostal_plenticore.sensor",
"homeassistant.components.kostal_plenticore.switch",
"homeassistant.components.lovelace",
"homeassistant.components.lovelace.dashboard",
"homeassistant.components.lovelace.resources",
"homeassistant.components.lovelace.websocket",
"homeassistant.components.lutron_caseta",
"homeassistant.components.lutron_caseta.device_trigger",
"homeassistant.components.lutron_caseta.switch",
"homeassistant.components.lyric.climate",
"homeassistant.components.lyric.config_flow",
"homeassistant.components.lyric.sensor",
"homeassistant.components.melcloud",
"homeassistant.components.melcloud.climate",
"homeassistant.components.meteo_france.sensor",
"homeassistant.components.meteo_france.weather",
"homeassistant.components.minecraft_server",
"homeassistant.components.minecraft_server.helpers",
"homeassistant.components.minecraft_server.sensor",
"homeassistant.components.nilu.air_quality",
"homeassistant.components.nzbget",
"homeassistant.components.nzbget.config_flow",
"homeassistant.components.nzbget.coordinator",
"homeassistant.components.nzbget.switch",
"homeassistant.components.omnilogic.common",
"homeassistant.components.omnilogic.sensor",
"homeassistant.components.omnilogic.switch",
"homeassistant.components.onvif.base",
"homeassistant.components.onvif.binary_sensor",
"homeassistant.components.onvif.button",
"homeassistant.components.onvif.camera",
"homeassistant.components.onvif.config_flow",
"homeassistant.components.onvif.device",
"homeassistant.components.onvif.event",
"homeassistant.components.onvif.models",
"homeassistant.components.onvif.parsers",
"homeassistant.components.onvif.sensor",
"homeassistant.components.ozw",
"homeassistant.components.ozw.climate",
"homeassistant.components.ozw.entity",
"homeassistant.components.philips_js",
"homeassistant.components.philips_js.config_flow",
"homeassistant.components.philips_js.device_trigger",
"homeassistant.components.philips_js.light",
"homeassistant.components.philips_js.media_player",
"homeassistant.components.plex.media_player",
"homeassistant.components.profiler",
"homeassistant.components.solaredge.config_flow",
"homeassistant.components.solaredge.coordinator",
"homeassistant.components.solaredge.sensor",
"homeassistant.components.sonos",
"homeassistant.components.sonos.alarms",
"homeassistant.components.sonos.binary_sensor",
"homeassistant.components.sonos.diagnostics",
"homeassistant.components.sonos.entity",
"homeassistant.components.sonos.favorites",
"homeassistant.components.sonos.helpers",
"homeassistant.components.sonos.media_browser",
"homeassistant.components.sonos.media_player",
"homeassistant.components.sonos.number",
"homeassistant.components.sonos.sensor",
"homeassistant.components.sonos.speaker",
"homeassistant.components.sonos.statistics",
"homeassistant.components.system_health",
"homeassistant.components.telegram_bot.polling",
"homeassistant.components.template.number",
"homeassistant.components.template.sensor",
"homeassistant.components.toon",
"homeassistant.components.toon.config_flow",
"homeassistant.components.toon.models",
"homeassistant.components.unifi",
"homeassistant.components.unifi.config_flow",
"homeassistant.components.unifi.device_tracker",
"homeassistant.components.unifi.diagnostics",
"homeassistant.components.unifi.unifi_entity_base",
"homeassistant.components.upnp",
"homeassistant.components.upnp.binary_sensor",
"homeassistant.components.upnp.config_flow",
"homeassistant.components.upnp.device",
"homeassistant.components.upnp.sensor",
"homeassistant.components.vizio.config_flow",
"homeassistant.components.vizio.media_player",
"homeassistant.components.withings",
"homeassistant.components.withings.binary_sensor",
"homeassistant.components.withings.common",
"homeassistant.components.withings.config_flow",
"homeassistant.components.xbox",
"homeassistant.components.xbox.base_sensor",
"homeassistant.components.xbox.binary_sensor",
"homeassistant.components.xbox.browse_media",
"homeassistant.components.xbox.media_source",
"homeassistant.components.xbox.sensor",
"homeassistant.components.xiaomi_aqara",
"homeassistant.components.xiaomi_aqara.binary_sensor",
"homeassistant.components.xiaomi_aqara.lock",
"homeassistant.components.xiaomi_aqara.sensor",
"homeassistant.components.xiaomi_miio",
"homeassistant.components.xiaomi_miio.air_quality",
"homeassistant.components.xiaomi_miio.binary_sensor",
"homeassistant.components.xiaomi_miio.device",
"homeassistant.components.xiaomi_miio.device_tracker",
"homeassistant.components.xiaomi_miio.fan",
"homeassistant.components.xiaomi_miio.humidifier",
"homeassistant.components.xiaomi_miio.light",
"homeassistant.components.xiaomi_miio.sensor",
"homeassistant.components.xiaomi_miio.switch",
"homeassistant.components.yeelight",
"homeassistant.components.yeelight.light",
"homeassistant.components.yeelight.scanner",
"homeassistant.components.zha.alarm_control_panel",
"homeassistant.components.zha.api",
"homeassistant.components.zha.binary_sensor",
"homeassistant.components.zha.button",
"homeassistant.components.zha.climate",
"homeassistant.components.zha.config_flow",
"homeassistant.components.zha.core.channels",
"homeassistant.components.zha.core.channels.base",
"homeassistant.components.zha.core.channels.closures",
"homeassistant.components.zha.core.channels.general",
"homeassistant.components.zha.core.channels.homeautomation",
"homeassistant.components.zha.core.channels.hvac",
"homeassistant.components.zha.core.channels.lighting",
"homeassistant.components.zha.core.channels.lightlink",
"homeassistant.components.zha.core.channels.manufacturerspecific",
"homeassistant.components.zha.core.channels.measurement",
"homeassistant.components.zha.core.channels.protocol",
"homeassistant.components.zha.core.channels.security",
"homeassistant.components.zha.core.channels.smartenergy",
"homeassistant.components.zha.core.decorators",
"homeassistant.components.zha.core.device",
"homeassistant.components.zha.core.discovery",
"homeassistant.components.zha.core.gateway",
"homeassistant.components.zha.core.group",
"homeassistant.components.zha.core.helpers",
"homeassistant.components.zha.core.registries",
"homeassistant.components.zha.core.store",
"homeassistant.components.zha.core.typing",
"homeassistant.components.zha.cover",
"homeassistant.components.zha.device_action",
"homeassistant.components.zha.device_tracker",
"homeassistant.components.zha.entity",
"homeassistant.components.zha.fan",
"homeassistant.components.zha.light",
"homeassistant.components.zha.lock",
"homeassistant.components.zha.select",
"homeassistant.components.zha.sensor",
"homeassistant.components.zha.siren",
"homeassistant.components.zha.switch",
"homeassistant.components.zwave",
"homeassistant.components.zwave.migration",
"homeassistant.components.zwave.node_entity",
]
# Component modules which should set no_implicit_reexport = true.
NO_IMPLICIT_REEXPORT_MODULES: set[str] = {
"homeassistant.components",
"homeassistant.components.diagnostics.*",
}
HEADER: Final = """
# Automatically generated by hassfest.
#
# To update, run python3 -m script.hassfest -p mypy_config
""".lstrip()
GENERAL_SETTINGS: Final[dict[str, str]] = {
"python_version": ".".join(str(x) for x in REQUIRED_PYTHON_VER[:2]),
"show_error_codes": "true",
"follow_imports": "silent",
# Enable some checks globally.
"ignore_missing_imports": "true",
"strict_equality": "true",
"warn_incomplete_stub": "true",
"warn_redundant_casts": "true",
"warn_unused_configs": "true",
"warn_unused_ignores": "true",
}
# This is basically the list of checks which is enabled for "strict=true".
# "strict=false" in config files does not turn strict settings off if they've been
# set in a more general section (it instead means as if strict was not specified at
# all), so we need to list all checks manually to be able to flip them wholesale.
STRICT_SETTINGS: Final[list[str]] = [
"check_untyped_defs",
"disallow_incomplete_defs",
"disallow_subclassing_any",
"disallow_untyped_calls",
"disallow_untyped_decorators",
"disallow_untyped_defs",
"no_implicit_optional",
"warn_return_any",
"warn_unreachable",
# TODO: turn these on, address issues
# "disallow_any_generics",
# "no_implicit_reexport",
]
# Strict settings are already applied for core files.
# To enable granular typing, add additional settings if core files are given.
STRICT_SETTINGS_CORE: Final[list[str]] = [
"disallow_any_generics",
]
def _strict_module_in_ignore_list(
module: str, ignored_modules_set: set[str]
) -> str | None:
if module in ignored_modules_set:
return module
if module.endswith("*"):
module = module[:-1]
for ignored_module in ignored_modules_set:
if ignored_module.startswith(module):
return ignored_module
return None
def generate_and_validate(config: Config) -> str:
"""Validate and generate mypy config."""
config_path = config.root / ".strict-typing"
with config_path.open() as fp:
lines = fp.readlines()
# Filter empty and commented lines.
parsed_modules: list[str] = [
line.strip()
for line in lines
if line.strip() != "" and not line.startswith("#")
]
strict_modules: list[str] = []
strict_core_modules: list[str] = []
for module in parsed_modules:
if module.startswith("homeassistant.components"):
strict_modules.append(module)
else:
strict_core_modules.append(module)
ignored_modules_set: set[str] = set(IGNORED_MODULES)
for module in strict_modules:
if (
not module.startswith("homeassistant.components.")
and module != "homeassistant.components"
):
config.add_error(
"mypy_config", f"Only components should be added: {module}"
)
if ignored_module := _strict_module_in_ignore_list(module, ignored_modules_set):
config.add_error(
"mypy_config",
f"Module '{ignored_module}' is in ignored list in mypy_config.py",
)
# Validate that all modules exist.
all_modules = (
strict_modules
+ strict_core_modules
+ IGNORED_MODULES
+ list(NO_IMPLICIT_REEXPORT_MODULES)
)
for module in all_modules:
if module.endswith(".*"):
module_path = Path(module[:-2].replace(".", os.path.sep))
if not module_path.is_dir():
config.add_error("mypy_config", f"Module '{module} is not a folder")
else:
module = module.replace(".", os.path.sep)
module_path = Path(f"{module}.py")
if module_path.is_file():
continue
module_path = Path(module) / "__init__.py"
if not module_path.is_file():
config.add_error("mypy_config", f"Module '{module} doesn't exist")
# Don't generate mypy.ini if there're errors found because it will likely crash.
if any(err.plugin == "mypy_config" for err in config.errors):
return ""
mypy_config = configparser.ConfigParser()
general_section = "mypy"
mypy_config.add_section(general_section)
for key, value in GENERAL_SETTINGS.items():
mypy_config.set(general_section, key, value)
for key in STRICT_SETTINGS:
mypy_config.set(general_section, key, "true")
# By default enable no_implicit_reexport only for homeassistant.*
# Disable it afterwards for all components
components_section = "mypy-homeassistant.*"
mypy_config.add_section(components_section)
mypy_config.set(components_section, "no_implicit_reexport", "true")
for core_module in strict_core_modules:
core_section = f"mypy-{core_module}"
mypy_config.add_section(core_section)
for key in STRICT_SETTINGS_CORE:
mypy_config.set(core_section, key, "true")
# By default strict checks are disabled for components.
components_section = "mypy-homeassistant.components.*"
mypy_config.add_section(components_section)
for key in STRICT_SETTINGS:
mypy_config.set(components_section, key, "false")
mypy_config.set(components_section, "no_implicit_reexport", "false")
for strict_module in strict_modules:
strict_section = f"mypy-{strict_module}"
mypy_config.add_section(strict_section)
for key in STRICT_SETTINGS:
mypy_config.set(strict_section, key, "true")
if strict_module in NO_IMPLICIT_REEXPORT_MODULES:
mypy_config.set(strict_section, "no_implicit_reexport", "true")
for reexport_module in NO_IMPLICIT_REEXPORT_MODULES.difference(strict_modules):
reexport_section = f"mypy-{reexport_module}"
mypy_config.add_section(reexport_section)
mypy_config.set(reexport_section, "no_implicit_reexport", "true")
# Disable strict checks for tests
tests_section = "mypy-tests.*"
mypy_config.add_section(tests_section)
for key in STRICT_SETTINGS:
mypy_config.set(tests_section, key, "false")
for ignored_module in IGNORED_MODULES:
ignored_section = f"mypy-{ignored_module}"
mypy_config.add_section(ignored_section)
mypy_config.set(ignored_section, "ignore_errors", "true")
with io.StringIO() as fp:
mypy_config.write(fp)
fp.seek(0)
return HEADER + fp.read().strip()
def validate(integrations: dict[str, Integration], config: Config) -> None:
"""Validate mypy config."""
config_path = config.root / "mypy.ini"
config.cache["mypy_config"] = content = generate_and_validate(config)
if any(err.plugin == "mypy_config" for err in config.errors):
return
with open(str(config_path)) as fp:
if fp.read().strip() != content:
config.add_error(
"mypy_config",
"File mypy.ini is not up to date. Run python3 -m script.hassfest",
fixable=True,
)
def generate(integrations: dict[str, Integration], config: Config) -> None:
"""Generate mypy config."""
config_path = config.root / "mypy.ini"
with open(str(config_path), "w") as fp:
fp.write(f"{config.cache['mypy_config']}\n")
| |
"""Support to set a numeric value from a slider or text box."""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ENTITY_SERVICE_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
ATTR_MODE,
CONF_ICON,
CONF_NAME,
CONF_MODE,
)
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "input_number"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CONF_INITIAL = "initial"
CONF_MIN = "min"
CONF_MAX = "max"
CONF_STEP = "step"
MODE_SLIDER = "slider"
MODE_BOX = "box"
ATTR_INITIAL = "initial"
ATTR_VALUE = "value"
ATTR_MIN = "min"
ATTR_MAX = "max"
ATTR_STEP = "step"
SERVICE_SET_VALUE = "set_value"
SERVICE_INCREMENT = "increment"
SERVICE_DECREMENT = "decrement"
SERVICE_SET_VALUE_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_VALUE): vol.Coerce(float)}
)
def _cv_input_number(cfg):
"""Configure validation helper for input number (voluptuous)."""
minimum = cfg.get(CONF_MIN)
maximum = cfg.get(CONF_MAX)
if minimum >= maximum:
raise vol.Invalid(
f"Maximum ({minimum}) is not greater than minimum ({maximum})"
)
state = cfg.get(CONF_INITIAL)
if state is not None and (state < minimum or state > maximum):
raise vol.Invalid(f"Initial value {state} not in range {minimum}-{maximum}")
return cfg
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.All(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_MIN): vol.Coerce(float),
vol.Required(CONF_MAX): vol.Coerce(float),
vol.Optional(CONF_INITIAL): vol.Coerce(float),
vol.Optional(CONF_STEP, default=1): vol.All(
vol.Coerce(float), vol.Range(min=1e-3)
),
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(ATTR_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_MODE, default=MODE_SLIDER): vol.In(
[MODE_BOX, MODE_SLIDER]
),
},
_cv_input_number,
)
)
},
required=True,
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up an input slider."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = []
for object_id, cfg in config[DOMAIN].items():
name = cfg.get(CONF_NAME)
minimum = cfg.get(CONF_MIN)
maximum = cfg.get(CONF_MAX)
initial = cfg.get(CONF_INITIAL)
step = cfg.get(CONF_STEP)
icon = cfg.get(CONF_ICON)
unit = cfg.get(ATTR_UNIT_OF_MEASUREMENT)
mode = cfg.get(CONF_MODE)
entities.append(
InputNumber(
object_id, name, initial, minimum, maximum, step, icon, unit, mode
)
)
if not entities:
return False
component.async_register_entity_service(
SERVICE_SET_VALUE, SERVICE_SET_VALUE_SCHEMA, "async_set_value"
)
component.async_register_entity_service(
SERVICE_INCREMENT, ENTITY_SERVICE_SCHEMA, "async_increment"
)
component.async_register_entity_service(
SERVICE_DECREMENT, ENTITY_SERVICE_SCHEMA, "async_decrement"
)
await component.async_add_entities(entities)
return True
class InputNumber(RestoreEntity):
"""Representation of a slider."""
def __init__(
self, object_id, name, initial, minimum, maximum, step, icon, unit, mode
):
"""Initialize an input number."""
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = name
self._current_value = initial
self._initial = initial
self._minimum = minimum
self._maximum = maximum
self._step = step
self._icon = icon
self._unit = unit
self._mode = mode
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the input slider."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the state of the component."""
return self._current_value
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_INITIAL: self._initial,
ATTR_MIN: self._minimum,
ATTR_MAX: self._maximum,
ATTR_STEP: self._step,
ATTR_MODE: self._mode,
}
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
if self._current_value is not None:
return
state = await self.async_get_last_state()
value = state and float(state.state)
# Check against None because value can be 0
if value is not None and self._minimum <= value <= self._maximum:
self._current_value = value
else:
self._current_value = self._minimum
async def async_set_value(self, value):
"""Set new value."""
num_value = float(value)
if num_value < self._minimum or num_value > self._maximum:
_LOGGER.warning(
"Invalid value: %s (range %s - %s)",
num_value,
self._minimum,
self._maximum,
)
return
self._current_value = num_value
await self.async_update_ha_state()
async def async_increment(self):
"""Increment value."""
new_value = self._current_value + self._step
if new_value > self._maximum:
_LOGGER.warning(
"Invalid value: %s (range %s - %s)",
new_value,
self._minimum,
self._maximum,
)
return
self._current_value = new_value
await self.async_update_ha_state()
async def async_decrement(self):
"""Decrement value."""
new_value = self._current_value - self._step
if new_value < self._minimum:
_LOGGER.warning(
"Invalid value: %s (range %s - %s)",
new_value,
self._minimum,
self._maximum,
)
return
self._current_value = new_value
await self.async_update_ha_state()
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.instances \
import forms as project_forms
from openstack_dashboard.dashboards.admin.instances \
import tables as project_tables
from openstack_dashboard.dashboards.admin.instances import tabs
from openstack_dashboard.dashboards.project.instances import views
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
from openstack_dashboard.utils import futurist_utils
# re-use console from project.instances.views to make reflection work
def console(args, **kvargs):
return views.console(args, **kvargs)
# re-use vnc from project.instances.views to make reflection work
def vnc(args, **kvargs):
return views.vnc(args, **kvargs)
# re-use spice from project.instances.views to make reflection work
def spice(args, **kvargs):
return views.spice(args, **kvargs)
# re-use rdp from project.instances.views to make reflection work
def rdp(args, **kvargs):
return views.rdp(args, **kvargs)
# re-use mks from project.instances.views to make reflection work
def mks(args, **kvargs):
return views.mks(args, **kvargs)
class AdminUpdateView(views.UpdateView):
workflow_class = update_instance.AdminUpdateInstance
success_url = reverse_lazy("horizon:admin:instances:index")
class AdminIndexView(tables.DataTableView):
table_class = project_tables.AdminInstancesTable
page_title = _("Instances")
def has_more_data(self, table):
return self._more
def needs_filter_first(self, table):
return self._needs_filter_first
def _get_tenants(self):
# Gather our tenants to correlate against IDs
try:
tenants, __ = api.keystone.tenant_list(self.request)
return dict([(t.id, t) for t in tenants])
except Exception:
msg = _('Unable to retrieve instance project information.')
exceptions.handle(self.request, msg)
return {}
def _get_images(self, instances=()):
# Gather our images to correlate our instances to them
try:
# NOTE(aarefiev): request images, instances was booted from.
img_ids = (instance.image.get('id') for instance in
instances if isinstance(instance.image, dict))
real_img_ids = list(filter(None, img_ids))
images = api.glance.image_list_detailed_by_ids(
self.request, real_img_ids)
image_map = dict((image.id, image) for image in images)
return image_map
except Exception:
exceptions.handle(self.request, ignore=True)
return {}
def _get_flavors(self):
# Gather our flavors to correlate against IDs
try:
flavors = api.nova.flavor_list(self.request)
return dict([(str(flavor.id), flavor) for flavor in flavors])
except Exception:
msg = _("Unable to retrieve flavor list.")
exceptions.handle(self.request, msg)
return {}
def _get_instances(self, search_opts):
try:
instances, self._more = api.nova.server_list(
self.request,
search_opts=search_opts)
except Exception:
self._more = False
instances = []
exceptions.handle(self.request,
_('Unable to retrieve instance list.'))
return instances
def get_data(self):
marker = self.request.GET.get(
project_tables.AdminInstancesTable._meta.pagination_param, None)
default_search_opts = {'marker': marker,
'paginate': True,
'all_tenants': True}
search_opts = self.get_filters(default_search_opts.copy())
# If filter_first is set and if there are not other filters
# selected, then search criteria must be provided and return an empty
# list
filter_first = getattr(settings, 'FILTER_DATA_FIRST', {})
if (filter_first.get('admin.instances', False) and
len(search_opts) == len(default_search_opts)):
self._needs_filter_first = True
self._more = False
return []
self._needs_filter_first = False
instances = self._get_instances(search_opts)
results = futurist_utils.call_functions_parallel(
(self._get_images, [tuple(instances)]),
self._get_flavors,
self._get_tenants)
image_dict, flavor_dict, tenant_dict = results
non_api_filter_info = (
('project', 'tenant_id', tenant_dict.values()),
('image_name', 'image', image_dict.values()),
('flavor_name', 'flavor', flavor_dict.values()),
)
if not views.process_non_api_filters(search_opts, non_api_filter_info):
self._more = False
return []
# Loop through instances to get image, flavor and tenant info.
for inst in instances:
if hasattr(inst, 'image') and isinstance(inst.image, dict):
image_id = inst.image.get('id')
if image_id in image_dict:
inst.image = image_dict[image_id]
# In case image not found in image_map, set name to empty
# to avoid fallback API call to Glance in api/nova.py
# until the call is deprecated in api itself
else:
inst.image['name'] = _("-")
flavor_id = inst.flavor["id"]
try:
if flavor_id in flavor_dict:
inst.full_flavor = flavor_dict[flavor_id]
else:
# If the flavor_id is not in flavor_dict list,
# gets it via nova api.
inst.full_flavor = api.nova.flavor_get(
self.request, flavor_id)
except Exception:
msg = _('Unable to retrieve instance size information.')
exceptions.handle(self.request, msg)
tenant = tenant_dict.get(inst.tenant_id, None)
inst.tenant_name = getattr(tenant, "name", None)
return instances
class LiveMigrateView(forms.ModalFormView):
form_class = project_forms.LiveMigrateForm
template_name = 'admin/instances/live_migrate.html'
context_object_name = 'instance'
success_url = reverse_lazy("horizon:admin:instances:index")
page_title = _("Live Migrate")
success_label = page_title
def get_context_data(self, **kwargs):
context = super(LiveMigrateView, self).get_context_data(**kwargs)
context["instance_id"] = self.kwargs['instance_id']
return context
@memoized.memoized_method
def get_hosts(self, *args, **kwargs):
try:
services = api.nova.service_list(self.request,
binary='nova-compute')
return [s.host for s in services]
except Exception:
redirect = reverse("horizon:admin:instances:index")
msg = _('Unable to retrieve host information.')
exceptions.handle(self.request, msg, redirect=redirect)
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
return api.nova.server_get(self.request, instance_id)
except Exception:
redirect = reverse("horizon:admin:instances:index")
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
initial = super(LiveMigrateView, self).get_initial()
_object = self.get_object()
if _object:
current_host = getattr(_object, 'OS-EXT-SRV-ATTR:host', '')
initial.update({'instance_id': self.kwargs['instance_id'],
'current_host': current_host,
'hosts': self.get_hosts()})
return initial
class DetailView(views.DetailView):
tab_group_class = tabs.AdminInstanceDetailTabs
redirect_url = 'horizon:admin:instances:index'
image_url = 'horizon:admin:images:detail'
volume_url = 'horizon:admin:volumes:detail'
def _get_actions(self, instance):
table = project_tables.AdminInstancesTable(self.request)
return table.render_row_actions(instance)
class RescueView(views.RescueView):
form_class = project_forms.RescueInstanceForm
submit_url = "horizon:admin:instances:rescue"
success_url = reverse_lazy('horizon:admin:instances:index')
template_name = 'admin/instances/rescue.html'
def get_initial(self):
return {'instance_id': self.kwargs["instance_id"]}
| |
"""
byceps.blueprints.admin.orga_team.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import abort, request
from flask_babel import gettext
from ....permissions.orga_team import OrgaTeamPermission
from ....services.orga_team import service as orga_team_service
from ....services.party import service as party_service
from ....services.user import service as user_service
from ....util.framework.blueprint import create_blueprint
from ....util.framework.flash import flash_error, flash_success
from ....util.framework.templating import templated
from ....util.views import permission_required, redirect_to, respond_no_content
from .forms import (
MembershipCreateForm,
MembershipUpdateForm,
OrgaTeamCreateForm,
OrgaTeamsCopyForm,
)
blueprint = create_blueprint('orga_team_admin', __name__)
# -------------------------------------------------------------------- #
# teams
@blueprint.get('/teams/<party_id>')
@permission_required(OrgaTeamPermission.view)
@templated
def teams_for_party(party_id):
"""List organizer teams for that party."""
party = _get_party_or_404(party_id)
teams_and_members = orga_team_service.get_teams_and_members_for_party(
party.id
)
def sort_members(members):
return sorted(
members,
key=lambda m: user_service.get_sort_key_for_screen_name(m.user),
)
teams_and_members = sorted(teams_and_members, key=lambda tam: tam[0].title)
teams_and_members = [
(teams, sort_members(members)) for teams, members in teams_and_members
]
return {
'party': party,
'teams_and_members': teams_and_members,
}
@blueprint.get('/teams/<party_id>/create')
@permission_required(OrgaTeamPermission.create)
@templated
def team_create_form(party_id, erroneous_form=None):
"""Show form to create an organizer team for a party."""
party = _get_party_or_404(party_id)
form = erroneous_form if erroneous_form else OrgaTeamCreateForm()
return {
'party': party,
'form': form,
}
@blueprint.post('/teams/<party_id>')
@permission_required(OrgaTeamPermission.create)
def team_create(party_id):
"""Create an organizer team for a party."""
party = _get_party_or_404(party_id)
form = OrgaTeamCreateForm(request.form)
if not form.validate():
return team_create_form(party.id, form)
title = form.title.data.strip()
team = orga_team_service.create_team(party.id, title)
flash_success(
gettext(
'Team "%(team_title)s" for party "%(party_title)s" has been created.',
team_title=team.title,
party_title=party.title,
)
)
return redirect_to('.teams_for_party', party_id=party.id)
@blueprint.delete('/teams/<uuid:team_id>')
@permission_required(OrgaTeamPermission.delete)
@respond_no_content
def team_delete(team_id):
"""Delete the team."""
team = _get_team_or_404(team_id)
if orga_team_service.has_team_memberships(team.id):
flash_error(
gettext(
'Team "%(team_title)s" cannot be deleted because it has members.',
team_title=team.title,
)
)
return
title = team.title
orga_team_service.delete_team(team.id)
flash_success(gettext('Team "%(title)s" has been deleted.', title=title))
@blueprint.get('/teams/<target_party_id>/copy')
@permission_required(OrgaTeamPermission.create)
@templated
def teams_copy_form(target_party_id, erroneous_form=None):
"""Show form to copy all organizer teams from another party."""
target_party = _get_party_or_404(target_party_id)
team_count = orga_team_service.count_teams_for_party(target_party.id)
if team_count:
flash_error(
gettext(
'This party already has teams. No additional teams can be copied to it.'
)
)
return redirect_to('.teams_for_party', party_id=target_party.id)
parties = party_service.get_parties_for_brand(target_party.brand_id)
# Do not offer to copy teams from target party.
parties = [p for p in parties if p.id != target_party.id]
party_ids = {party.id for party in parties}
team_count_per_party = orga_team_service.count_teams_for_parties(party_ids)
# Exclude parties without orga teams.
parties = [p for p in parties if team_count_per_party.get(p.id, 0)]
if not parties:
flash_error(
gettext('No other parties exist from which teams could be copied.')
)
return redirect_to('.teams_for_party', party_id=target_party.id)
parties.sort(key=lambda p: p.starts_at, reverse=True)
form = erroneous_form if erroneous_form else OrgaTeamsCopyForm()
form.set_party_choices(parties, team_count_per_party)
return {
'party': target_party,
'form': form,
}
@blueprint.post('/teams/<target_party_id>/copy')
@permission_required(OrgaTeamPermission.create)
def teams_copy(target_party_id):
"""Copy all organizer teams from another party."""
target_party = _get_party_or_404(target_party_id)
target_team_count = orga_team_service.count_teams_for_party(target_party.id)
if target_team_count:
flash_error(
gettext(
'This party already has teams. No additional teams can be copied to it.'
)
)
return redirect_to('.teams_for_party', party_id=target_party.id)
parties = party_service.get_parties_for_brand(target_party.brand_id)
form = OrgaTeamsCopyForm(request.form)
form.set_party_choices(parties)
if not form.validate():
return teams_copy_form(target_party.id, form)
source_party = party_service.get_party(form.party_id.data)
copied_teams_count = orga_team_service.copy_teams_and_memberships(
source_party.id, target_party.id
)
flash_success(
gettext(
'%(copied_teams_count)s team(s) has/have been copied from party '
'"%(source_party_title)s" to party "%(target_party_title)s".',
copied_teams_count=copied_teams_count,
source_party_title=source_party.title,
target_party_title=target_party.title,
)
)
return redirect_to('.teams_for_party', party_id=target_party.id)
# -------------------------------------------------------------------- #
# memberships
@blueprint.get('/teams/<uuid:team_id>/memberships/create')
@permission_required(OrgaTeamPermission.administrate_memberships)
@templated
def membership_create_form(team_id, erroneous_form=None):
"""Show form to assign an organizer to that team."""
team = _get_team_or_404(team_id)
party = party_service.get_party(team.party_id)
unassigned_orgas = orga_team_service.get_unassigned_orgas_for_party(
team.party_id
)
if not unassigned_orgas:
return {
'team': team,
'party': party,
'unassigned_orgas_available': False,
}
unassigned_orgas = sorted(
unassigned_orgas, key=user_service.get_sort_key_for_screen_name
)
form = erroneous_form if erroneous_form else MembershipCreateForm()
form.set_user_choices(unassigned_orgas)
return {
'form': form,
'team': team,
'party': party,
'unassigned_orgas_available': True,
}
@blueprint.post('/teams/<uuid:team_id>/memberships')
@permission_required(OrgaTeamPermission.administrate_memberships)
def membership_create(team_id):
"""Assign an organizer to that team."""
team = _get_team_or_404(team_id)
unassigned_orgas = orga_team_service.get_unassigned_orgas_for_party(
team.party_id
)
form = MembershipCreateForm(request.form)
form.set_user_choices(unassigned_orgas)
if not form.validate():
return membership_create_form(team.id, form)
user = user_service.get_user(form.user_id.data)
duties = form.duties.data.strip()
membership = orga_team_service.create_membership(team.id, user.id, duties)
flash_success(
gettext(
'%(screen_name)s has been added to team "%(team_title)s".',
screen_name=user.screen_name,
team_title=team.title,
)
)
return redirect_to('.teams_for_party', party_id=team.party_id)
@blueprint.get('/memberships/<uuid:membership_id>/update')
@permission_required(OrgaTeamPermission.administrate_memberships)
@templated
def membership_update_form(membership_id, erroneous_form=None):
"""Show form to update a membership."""
membership = _get_membership_or_404(membership_id)
user = user_service.get_user(membership.user_id)
team = orga_team_service.find_team(membership.orga_team_id)
party = party_service.get_party(team.party_id)
teams = orga_team_service.get_teams_for_party(team.party_id)
form = (
erroneous_form
if erroneous_form
else MembershipUpdateForm(obj=membership)
)
form.set_orga_team_choices(teams)
return {
'form': form,
'membership': membership,
'user': user,
'team': team,
'party': party,
}
@blueprint.post('/memberships/<uuid:membership_id>')
@permission_required(OrgaTeamPermission.administrate_memberships)
def membership_update(membership_id):
"""Update a membership."""
membership = _get_membership_or_404(membership_id)
user = user_service.get_user(membership.user_id)
team = orga_team_service.find_team(membership.orga_team_id)
teams = orga_team_service.get_teams_for_party(team.party_id)
form = MembershipUpdateForm(request.form)
form.set_orga_team_choices(teams)
if not form.validate():
return membership_update_form(membership.id, form)
team_id = form.orga_team_id.data
team = orga_team_service.find_team(team_id)
duties = form.duties.data.strip() or None
orga_team_service.update_membership(membership.id, team.id, duties)
flash_success(
gettext(
'Membership of %(screen_name)s has been updated.',
screen_name=user.screen_name,
)
)
return redirect_to('.teams_for_party', party_id=team.party_id)
@blueprint.delete('/memberships/<uuid:membership_id>')
@permission_required(OrgaTeamPermission.administrate_memberships)
@respond_no_content
def membership_remove(membership_id):
"""Remove an organizer from a team."""
membership = _get_membership_or_404(membership_id)
user = user_service.get_user(membership.user_id)
team = orga_team_service.find_team(membership.orga_team_id)
orga_team_service.delete_membership(membership.id)
flash_success(
gettext(
'%(screen_name)s has been removed from team "%(team_title)s".',
screen_name=user.screen_name,
team_title=team.title,
)
)
# -------------------------------------------------------------------- #
# helpers
def _get_party_or_404(party_id):
party = party_service.find_party(party_id)
if party is None:
abort(404)
return party
def _get_team_or_404(team_id):
team = orga_team_service.find_team(team_id)
if team is None:
abort(404)
return team
def _get_membership_or_404(membership_id):
membership = orga_team_service.find_membership(membership_id)
if membership is None:
abort(404)
return membership
| |
import cPickle
import gzip
import os
import sys
import time
import numpy
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer, MLP, rectified_linear, send_email
from utils import tile_raster_images
from generateTrainValTestData import generate_experiment_data_supervised, shared_dataset, normalizeImage, stupid_map_wrapper
import multiprocessing
from classifyImage import generate_patch_data_rows
from vsk_utils import shared_single_dataset
import matplotlib
import matplotlib.pyplot as plt
import getpass
class LeNetConvPoolLayer(object):
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2), activation=rectified_linear):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
self.poolsize = poolsize
self.image_shape = image_shape
self.filter_shape = filter_shape
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = activation(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
# keep track of model input
self.input = input
self.conv_out = conv_out
self.pooled_out = pooled_out
def visualize_filters(self):
self.W = self.W * 1.0
W = self.W.eval()
print W.shape
patchSize = self.filter_shape[2]
print patchSize
filterSize = self.filter_shape[2]**2
print filterSize
numFilters = self.filter_shape[0]
print numFilters
W = np.reshape(W, (numFilters, filterSize))
print W.shape
return tile_raster_images(X=W, img_shape=(patchSize, patchSize), tile_shape=(10,10), tile_spacing=(1, 1),
scale_rows_to_unit_interval=True,
output_pixel_vals=True)
class CNN(object):
def __init__(self,input, batch_size, patchSize, rng, nkerns, kernelSizes, hiddenSizes, fileName=None, activation=rectified_linear):
self.convLayers = []
self.trainingCost = []
self.validationError = []
self.nkerns = nkerns
self.kernelSizes = kernelSizes
self.hiddenSizes = hiddenSizes
self.patchSize = patchSize
self.batch_size = batch_size
input = input.reshape((self.batch_size, 1, self.patchSize, self.patchSize))
self.layer0_input = input
self.params = []
input_next = input
numberOfFeatureMaps = 1
featureMapSize = patchSize
for i in range(len(nkerns)):
layer = LeNetConvPoolLayer(
rng,
input=input_next,
image_shape=(batch_size, numberOfFeatureMaps, featureMapSize, featureMapSize),
filter_shape=(nkerns[i], numberOfFeatureMaps, kernelSizes[i], kernelSizes[i]),
poolsize=(2, 2)
)
input_next = layer.output
numberOfFeatureMaps = nkerns[i]
featureMapSize = np.int16(np.floor((featureMapSize - kernelSizes[i]+1) / 2))
self.params += layer.params
self.convLayers.append(layer)
# the 2 is there to preserve the batchSize
mlp_input = self.convLayers[-1].output.flatten(2)
self.mlp = MLP(rng=rng, input=mlp_input, n_in=nkerns[-1] * (featureMapSize ** 2), n_hidden=hiddenSizes,
n_out=2, activation=rectified_linear)
self.params += self.mlp.params
self.cost = self.mlp.negative_log_likelihood
self.errors = self.mlp.errors
self.p_y_given_x = self.mlp.p_y_given_x
self.debug_x = self.p_y_given_x
if not fileName is None:
with open(fileName, 'r') as file:
saved_convLayers, saved_hiddenLayers, saved_logRegressionLayer, self.trainingCost, self.validationError, saved_nkerns, saved_kernelSizes, saved_batch_size, saved_patchSize, saved_hiddenSizes = cPickle.load(file)
for s_cl, cl in zip(saved_convLayers, self.convLayers):
cl.W.set_value(s_cl.W.get_value())
cl.b.set_value(s_cl.b.get_value())
for s_hl, hl in zip(saved_hiddenLayers, self.mlp.hiddenLayers):
hl.W.set_value(np.float32(s_hl.W.eval()))
hl.b.set_value(s_hl.b.get_value())
self.mlp.logRegressionLayer.W.set_value(np.float32(saved_logRegressionLayer.W.eval()))
self.mlp.logRegressionLayer.b.set_value(saved_logRegressionLayer.b.get_value())
def save_CNN(self, filename):
with open(filename, 'wb') as file:
cPickle.dump((self.convLayers, self.mlp.hiddenLayers, self.mlp.logRegressionLayer, self.trainingCost, self.validationError, self.nkerns, self.kernelSizes, self.batch_size, self.patchSize, self.hiddenSizes), file)
def classify_image(self, img, normMean=None, norm_std=None):
start_time = time.clock()
row_range = 1
img = normalizeImage(img)
imSize = np.shape(img)
membraneProbabilities = np.zeros(np.shape(img))
patchSize = self.patchSize
data_shared = shared_single_dataset(np.zeros((imSize[0]*row_range,patchSize**2)), borrow=True)
classify = theano.function(
[],
self.p_y_given_x,
givens={x: data_shared}
)
for row in xrange(0,1024,row_range):
if row%100 == 0:
print row
data = generate_patch_data_rows(img, rowOffset=row, rowRange=row_range, patchSize=patchSize, imSize=imSize, data_mean=normMean, data_std=norm_std)
data_shared.set_value(np.float32(data))
result = classify()
membraneProbabilities[row,:] = result[:,1]
end_time = time.clock()
total_time = (end_time - start_time)
print "Image classification took %f seconds" % (total_time)
return np.array(membraneProbabilities)
def debug_whole_image_classification(self, img, normMean=None, norm_std=None):
start_time = time.clock()
row_range = 1
img = normalizeImage(img)
imSize = np.shape(img)
membraneProbabilities = np.zeros(np.shape(img))
patchSize = self.patchSize
data_shared = shared_single_dataset(np.zeros((imSize[0]*row_range,patchSize**2)), borrow=True)
classify = theano.function(
[],
self.debug_x,
givens={x: data_shared}
)
for row in xrange(0,33,row_range):
if row%100 == 0:
print row
data = generate_patch_data_rows(img, rowOffset=row, rowRange=row_range, patchSize=patchSize, imSize=imSize, data_mean=normMean, data_std=norm_std)
data_shared.set_value(np.float32(data))
result = classify()
#membraneProbabilities[row,:] = result[:,1]
membraneProbabilities = result
end_time = time.clock()
total_time = (end_time - start_time)
print "Image classification took %f seconds" % (total_time)
return np.array(membraneProbabilities)
def evaluate_lenet5(learning_rate=0.0001, n_epochs=20000, nkerns=[48,48,48], kernelSizes=[5,5,5], hiddenSizes=[200], doResample=True, batch_size=1, patchSize=65, train_samples=50000, val_samples=10000, test_samples=1000, validation_frequency = 100, doEmailUpdate=False, momentum=0.98, filename='tmp_cnn.pkl'):
def gradient_updates_momentum(cost, params, learning_rate, momentum):
updates = []
for param in params:
param_update = theano.shared(param.get_value()*0., broadcastable=param.broadcastable)
updates.append((param, param - learning_rate*param_update))
updates.append((param_update, momentum*param_update + (1. - momentum)*T.grad(cost, param)))
return updates
rng = numpy.random.RandomState(23455)
data, norm_mean, norm_std, grayImages, labelImages, maskImages = generate_experiment_data_supervised(purpose='train', nsamples=train_samples, patchSize=patchSize, balanceRate=0.5, data_mean=0.5, data_std=1.0)
train_set_x, train_set_y = shared_dataset(data, doCastLabels=True)
data = generate_experiment_data_supervised(purpose='validate', nsamples=val_samples, patchSize=patchSize, balanceRate=0.5, data_mean=norm_mean, data_std=norm_std)[0]
valid_set_x, valid_set_y = shared_dataset(data, doCastLabels=True)
data = generate_experiment_data_supervised(purpose='test', nsamples=test_samples, patchSize=patchSize, balanceRate=0.5, data_mean=norm_mean, data_std=norm_std)[0]
test_set_x, test_set_y = shared_dataset(data, doCastLabels=True)
# compute number of minibatches for training, validation and testing
n_train_batches = train_samples / batch_size
n_valid_batches = val_samples / batch_size
n_test_batches = test_samples / batch_size
learning_rate_shared = theano.shared(np.float32(learning_rate))
momentum_shared = theano.shared(np.float32(momentum))
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
lr = T.scalar('learning_rate')
m = T.scalar('momentum')
if doEmailUpdate:
gmail_pwd = getpass.getpass()
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
classifier = CNN(input=x, batch_size=batch_size, patchSize=patchSize, rng=rng,
nkerns=nkerns, kernelSizes=kernelSizes, hiddenSizes=hiddenSizes,
fileName=filename)
cost = classifier.cost(y)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
[index],
classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
[index],
classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
gparams = []
for param in classifier.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
#SGD
# updates = []
# for param, gparam in zip(classifier.params, gparams):
# updates.append((param, param - lr * gparam))
#updates = adadelta_updates(classifier.params, gparams, lr, 0.000001)
updates = gradient_updates_momentum(cost, classifier.params, lr, m)
train_model = theano.function(inputs=[index], outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size],
lr: learning_rate_shared,
m: momentum_shared})
###############
# TRAIN MODEL #
###############
print '... training'
best_validation_loss = numpy.inf
best_iter = 0
decrease_epoch = 1
decrease_patience = 1
test_score = 0.
start_time = time.clock()
epoch = 0
done_looping = False
# start pool for data
print "Starting worker."
pool = multiprocessing.Pool(processes=1)
futureData = pool.apply_async(stupid_map_wrapper, [[generate_experiment_data_supervised,True, 'train', train_samples, patchSize, 0.5, 0.5, 1.0]])
while (epoch < n_epochs) and (not done_looping):
minibatch_avg_costs = []
epoch = epoch + 1
if doResample and epoch>1:
print "Waiting for data."
data = futureData.get()
print "GOT NEW DATA"
train_set_x.set_value(np.float32(data[0]))
train_set_y.set_value(np.int32(data[1]))
futureData = pool.apply_async(stupid_map_wrapper, [[generate_experiment_data_supervised,True, 'train', train_samples, patchSize, 0.5, 0.5, 1.0]])
# try:
# data = futureData.get(timeout=1)
# print "GOT NEW DATA"
# train_set_x.set_value(np.float32(data[0]))
# train_set_y.set_value(np.int32(data[1]))
# futureData = pool.apply_async(stupid_map_wrapper, [[generate_experiment_data_supervised,True, 'train', train_samples, patchSize, 0.5, norm_mean, 1.0]])
# except multiprocessing.TimeoutError:
# print "TIMEOUT, TRAINING ANOTHER ROUND WITH CURRENT DATA"
# pass
#
for minibatch_index in xrange(n_train_batches):
minibatch_avg_costs.append(train_model(minibatch_index))
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
classifier.save_CNN('current_cnn.pkl')
# compute zero-one loss on validation set
validation_losses = np.array([validate_model(i) for i
in xrange(n_valid_batches)])
this_validation_loss = numpy.sum(validation_losses) * 100.0 / val_samples
msg = 'epoch %i, minibatch %i/%i, training error %.3f, validation error %.2f %%' % (epoch, minibatch_index + 1, n_train_batches, minibatch_avg_costs[-1], this_validation_loss)
print(msg)
classifier.trainingCost.append(minibatch_avg_costs[-1])
classifier.validationError.append(this_validation_loss*100)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
best_validation_loss = this_validation_loss
best_iter = iter
classifier.save_CNN('best_cnn_so_far.pkl')
print "New best score!"
if doEmailUpdate:
send_email(gmail_pwd, msg)
# test it on the test set
#test_losses = [test_model(i) for i
# in xrange(n_test_batches)]
#test_score = numpy.mean(test_losses)
#
#print(('epoch %i, minibatch %i/%i, test error of '
# 'best model %f %%') %
# (epoch, minibatch_index + 1, n_train_batches,
# test_score * 100.))
pool.close()
pool.join()
print "Pool closed."
end_time = time.clock()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
return classifier
if __name__ == '__main__':
rng = numpy.random.RandomState(929292)
import mahotas
import matplotlib.pyplot as plt
image = mahotas.imread('ac3_input_0141.tif')
x = T.matrix('x')
doDebug = False
###############################################
### DEBUGGING START
###############################################
if doDebug:
image = image[0:170,0:170]
test2 = CNN(input=x, batch_size=170, patchSize=65, rng=rng, nkerns=[48,48,48], kernelSizes=[5,5,5], hiddenSizes=[200], fileName='tmp_cnn.pkl')
fooBa = test2.debug_whole_image_classification(image, normMean=0.5, norm_std=1.0)
###############################################
### DEBUGGING END
###############################################
if not doDebug:
test2 = CNN(input=x, batch_size=1024, patchSize=65, rng=rng, nkerns=[48,48,48], kernelSizes=[5,5,5], hiddenSizes=[200], fileName='tmp_cnn.pkl')
prob = test2.classify_image(img=image, normMean=0.5, norm_std=1.0)
plt.imshow(1-prob)
plt.show()
mahotas.imsave('tmp_output_cnn_09.png', np.uint8((1-prob)*255))
cl = test2.convLayers[0]
plt.imshow(cl.visualize_filters())
plt.show()
mahotas.imsave('filter_output_cnn_09.png', np.uint8(cl.visualize_filters()))
plt.plot(np.array(test2.trainingCost), label='training')
plt.plot(np.array(test2.validationError), label='validation')
plt.legend()
plt.show()
if len(test2.validationError) > 5000:
plt.plot(np.array(test2.trainingCost)[-5000:], label='training')
plt.plot(np.array(test2.validationError)[-5000:], label='validation')
plt.legend()
plt.show()
print "best validation score: ", test2.validationError[-1]
| |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Workspace used for editing and executing measurements.
"""
import logging
import os
import re
import enaml
from atom.api import Typed, Value, Property, set_default
from enaml.application import deferred_call
from enaml.workbench.ui.api import Workspace
from enaml.widgets.api import FileDialogEx
from enaml.layout.api import InsertItem, InsertTab
from ...utils.traceback import format_exc
from ...tasks.api import RootTask
from ..measurement import Measurement
from ..plugin import MeasurementPlugin
from .measurement_tracking import MeasurementTracker
with enaml.imports():
from .checks_display import ChecksDisplay
from ..engines.selection import EngineSelector
from .content import MeasureContent
from .measurement_edition import MeasurementEditorDockItem
from .tools_edition import ToolsEditorDockItem
from .manifest import MeasurementSpaceMenu
# ID used when adding handler to the logger.
LOG_ID = 'exopy.measurement.workspace'
logger = logging.getLogger(__name__)
LAYOUT = None
class MeasurementSpace(Workspace):
"""Workspace dedicated tot measurement edition and execution.
"""
#: Reference to the plugin to which the workspace is linked.
plugin = Typed(MeasurementPlugin)
#: Reference to the log panel model received from the log plugin.
log_model = Value()
#: Reference to the last edited measurement used for saving.
last_selected_measurement = Property()
window_title = set_default('Measurement')
def start(self):
"""Start the workspace, create a blanck measurement if necessary and
get engine contribution.
"""
# Add a reference to the workspace in the plugin and keep a reference
# to the plugin.
plugin = self.workbench.get_plugin('exopy.measurement')
plugin.workspace = self
self.plugin = plugin
# Add handler to the root logger to display messages in panel.
core = self.workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.app.logging.add_handler'
self.log_model = core.invoke_command(cmd,
{'id': LOG_ID, 'mode': 'ui'},
self)[0]
# Create content.
self.content = MeasureContent(workspace=self)
# Contribute menus.
self.workbench.register(MeasurementSpaceMenu(workspace=self))
# Check whether or not a measurement is already being edited.
if not plugin.edited_measurements.measurements:
self.new_measurement()
else:
panels = self.plugin._workspace_state['measurement_panels']
self._insert_new_edition_panels(
plugin.edited_measurements.measurements,
False, panels)
# Check whether or not an engine can contribute.
if plugin.selected_engine:
id_ = plugin.selected_engine
engine = plugin.get_declarations('engine', [id_])[id_]
deferred_call(engine.contribute_to_workspace, self)
if self.plugin._workspace_state:
self.dock_area.layout = self.plugin._workspace_state['layout']
plugin.observe('selected_engine', self._update_engine_contribution)
self._selection_tracker.start(
plugin.edited_measurements.measurements[0])
def stop(self):
"""Stop the workspace and clean.
"""
plugin = self.plugin
# Hide the monitors window. Not closing allow to preserve the
# position and layout.
if plugin.processor.monitors_window:
plugin.processor.monitors_window.hide()
plugin.unobserve('selected_engine', self._update_engine_contribution)
if plugin.selected_engine:
engine = plugin._engines.contributions[plugin.selected_engine]
engine.clean_workspace(self)
# HINT : we save the layout after removing the engine contribution.
# which means that the layout is not prefectly preserved. To avoid that
# we would need to insert the engine in sync way (not using
# deferred_call) but this can lead to other issues.
layout = self.dock_area.save_layout()
m_edit_panels = [di for di in self.dock_area.dock_items() if
isinstance(di, MeasurementEditorDockItem)]
m_tools_panels = {di.measurement: di
for di in self.dock_area.dock_items()
if isinstance(di, ToolsEditorDockItem)}
names = {di.measurement: (di.name,
getattr(m_tools_panels.get(di.measurement),
'name', ''))
for di in m_edit_panels}
self.plugin._workspace_state = {'layout': layout,
'measurement_panels': names}
# Remove handler from the root logger.
core = self.workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.app.logging.remove_handler'
core.invoke_command(cmd, {'id': LOG_ID}, self)
self.workbench.unregister('exopy.measurement.workspace.menus')
self.plugin.workspace = None
self._selection_tracker.stop()
def new_measurement(self, dock_item=None):
"""Create a new edited measurement using the default tools.
Parameters
----------
dock_item :
Dock item used for editing the measurement, if None a new item will
be created and inserted in the dock area.
"""
# TODO make sure this name is unique.
measurement = Measurement(plugin=self.plugin, name='M', id='001')
measurement.root_task = RootTask()
self._attach_default_tools(measurement)
self.plugin.edited_measurements.add(measurement)
if dock_item is None:
self._insert_new_edition_panels((measurement,))
def save_measurement(self, measurement, auto=True):
""" Save a measurement in a file.
Parameters
----------
measurement : Measurement
Measurement to save.
auto : bool, optional
When true if a path is associated to the measurement save it there,
otherwise ask the user where to save it.
"""
if not auto or not measurement.path:
get_file = FileDialogEx.get_save_file_name
path = os.path.join((measurement.path or self.plugin.path),
measurement.name + '.meas.ini')
full_path = get_file(parent=self.content,
current_path=path,
name_filters=[u'*.meas.ini'])
if not full_path:
return
elif not full_path.endswith('.meas.ini'):
full_path += '.meas.ini'
self.plugin.path = os.path.dirname(full_path)
else:
full_path = measurement.path
try:
measurement.save(full_path)
except Exception:
core = self.plugin.workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.app.errors.signal'
msg = 'Failed to save measurement :\n' + format_exc()
core.invoke_command(cmd, dict(kind='error', message=msg))
def load_measurement(self, mode, dock_item=None):
""" Load a measurement.
Parameters
----------
mode : {'file', 'template'}
In file mode, ask the user to specify a file from which to load a
measurement. In template mode, ask the user to choose a template
and use the defaults settings of the plugin for the tools..
"""
if mode == 'file':
get_file = FileDialogEx.get_open_file_name
full_path = get_file(name_filters=[u'*.meas.ini'],
current_path=self.plugin.path)
if not full_path:
return
measurement, errors = Measurement.load(self.plugin, full_path)
if errors:
core = self.plugin.workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.app.errors.signal'
msg = 'Failed to load measurement.'
core.invoke_command(cmd, dict(kind='measurement-loading',
message=msg,
details=errors))
return
self.plugin.edited_measurements.add(measurement)
self.plugin.path = os.path.dirname(full_path)
elif mode == 'template':
# TODO create brand new measurement using defaults from plugin and
# load template
raise NotImplementedError()
if dock_item is None:
self._insert_new_edition_panels((measurement,))
else:
# If we were passed a dock item it means we are replacing an
# existing measurement with a different one, so the previous one is
# not edited anymore.
self.plugin.edited_measurements.remove((dock_item.measurement,))
dock_item.measurement = measurement
self._selection_tracker.set_selected_measurement(measurement)
# HINT: code used to track ref leak to root task
# requires to activtae root task instance tracking in tasks.base_tasks
# def print_infos(root):
# import gc
# gc.collect()
# import inspect
# from exopy.tasks.tasks.base_tasks import ROOTS
# for r in ROOTS:
# if r is not root:
# refs = [ref for ref in gc.get_referrers(r)
# if not inspect.isframe(ref)]
# print(('Root', refs))
# for ref in refs:
# print((ref,
# [re for re in gc.get_referrers(ref)
# if re is not refs and
# not inspect.isframe(re)]))
#
# deferred_call(print_infos, measurement.root_task)
# TODO : making this asynchronous or notifying the user would be super nice
def enqueue_measurement(self, measurement):
"""Put a measurement in the queue if it pass the tests.
Parameters
----------
measurement : Measurement
Instance of Measurement representing the measurement.
Returns
-------
bool
True if the measurement was successfully enqueued, False otherwise.
"""
# Reset the forced enqueued flag
measurement.forced_enqueued = False
# Collect the runtime dependencies
res, msg, errors = measurement.dependencies.collect_runtimes()
if not res:
if 'Failed' in msg:
dial = ChecksDisplay(errors=errors, title=msg)
dial.exec_()
measurement.dependencies.reset()
return False
# If some runtime are missing let the user know about it.
else:
msg = ('The following runtime dependencies of the measurement '
'{}, are not currently available. Some tests may be '
'skipped as a result but will be run before executing '
'the measurement.\n Missing dependencies from :\n{}')
msg = msg.format(measurement.name,
'\n'.join(('- '+id for id in errors)))
# TODO : log as debug and display in popup
logger.info(msg)
# Run the checks specifying what runtimes are missing.
missings = errors.get('unavailable', {})
check, errors = measurement.run_checks(missing=missings)
# Release the runtimes.
measurement.dependencies.release_runtimes()
if check:
# If check is ok but there are some errors, those are warnings
# which the user can either ignore and enqueue the measurement, or
# he can cancel the enqueuing and try again.
if errors:
dial = ChecksDisplay(errors=errors, is_warning=True)
dial.exec_()
if not dial.result:
measurement.dependencies.reset()
return False
else:
measurement.dependencies.reset()
dial = ChecksDisplay(errors=errors, is_warning=False)
dial.exec_()
if not dial.result:
measurement.dependencies.reset()
return False
measurement.forced_enqueued = True
default_filename = (measurement.name + '_' + measurement.id +
'.meas.ini')
path = os.path.join(measurement.root_task.default_path,
default_filename)
old_path = measurement.path
measurement.save(path)
measurement.path = old_path
b_deps = measurement.dependencies.get_build_dependencies()
meas, errors = Measurement.load(self.plugin, path, b_deps.dependencies)
# Clean dependencies cache as at next enqueueing dependencies may have
# changed
measurement.dependencies.reset()
# Provide a nice error message.
if not meas:
measurement.forced_enqueued = False
msg = 'Failed to rebuild measurement from config'
dial = ChecksDisplay(errors={'Building': errors}, title=msg)
dial.exec_()
return False
meas.forced_enqueued = measurement.forced_enqueued
try:
os.remove(path)
except OSError:
logger.debug('Failed to remove temp save file')
meas.status = 'READY'
meas.infos = 'The measurement is ready to be performed by an engine.'
self.plugin.enqueued_measurements.add(meas)
return True
def reenqueue_measurement(self, measurement):
""" Mark a measurement already in queue as fitted to be executed.
This method can be used to re-enqueue a measurement that previously
failed, for example because a profile was missing, the measurement can
then be edited again and will be executed in its turn.
WARNING : the test are run again !!!
Parameters
----------
measurement : Measurement
The measurement to re-enqueue
"""
measurement.enter_edition_state()
measurement.status = 'READY'
measurement.infos = 'Measurement re-enqueued by the user'
def remove_processed_measurements(self):
""" Remove all the measurements which have been processed from the
queue.
This method rely on the status of the measurement. Only measurements
whose status is 'READY' will be left in the queue.
"""
for measurement in self.plugin.enqueued_measurements.measurements[:]:
if measurement.status in ('SKIPPED', 'FAILED', 'COMPLETED',
'INTERRUPTED'):
self.plugin.enqueued_measurements.remove(measurement)
def start_processing_measurements(self):
""" Starts to perform the measurement in the queue.
Measurement are processed in their order of appearance in the queue.
"""
if not self._ensure_selected_engine():
return
measurement = self.plugin.find_next_measurement()
self.plugin.processor.continuous_processing = True
if measurement is not None:
self.plugin.processor.start_measurement(measurement)
else:
cmd = 'exopy.app.errors.signal'
core = self.workbench.get_plugin('enaml.workbench.core')
msg = 'None of the curently enqueued measurements can be run.'
core.invoke_command(cmd, {'kind': 'error', 'message': msg})
def process_single_measurement(self, measurement):
""" Performs a single measurement and then stops.
Parameters
----------
measurement : Measurement
Measurement to perform.
"""
if not self._ensure_selected_engine():
return
self.plugin.processor.continuous_processing = False
self.plugin.processor.start_measurement(measurement)
def pause_current_measurement(self):
"""Pause the currently active measurement.
"""
self.plugin.processor.pause_measurement()
def resume_current_measurement(self):
"""Resume the currently paused measurement.
"""
self.plugin.processor.resume_measurement()
def stop_current_measurement(self, no_post_exec=False, force=False):
"""Stop the execution of the currently executed measurement.
"""
self.plugin.processor.stop_measurement(no_post_exec, force)
def stop_processing_measurements(self, no_post_exec=False, force=False):
"""Stop processing enqueued measurement.
"""
self.plugin.processor.stop_processing(no_post_exec, force)
@property
def dock_area(self):
""" Getter for the dock_area of the content.
"""
if self.content and self.content.children:
return self.content.children[1]
# --- Private API ---------------------------------------------------------
#: Background thread determining the last edited measurement by analysing
#: the last selected widget.
_selection_tracker = Typed(MeasurementTracker, ())
def _attach_default_tools(self, measurement):
"""Add the default tools to a measurement.
"""
# TODO : use error plugin to report that kind of issues
for pre_id in self.plugin.default_pre_hooks:
if pre_id in self.plugin.pre_hooks:
measurement.add_tool('pre-hook', pre_id)
else:
msg = "Default pre-execution hook {} not found"
logger.warning(msg.format(pre_id))
for monitor_id in self.plugin.default_monitors:
if monitor_id in self.plugin.monitors:
measurement.add_tool('monitor', monitor_id)
else:
msg = "Default monitor {} not found."
logger.warning(msg.format(monitor_id))
for post_id in self.plugin.default_post_hooks:
if post_id in self.plugin.post_hooks:
measurement.add_tool('post-hook', post_id)
else:
msg = "Default post-execution hook {} not found"
logger.warning(msg.format(post_id))
def _insert_new_edition_panels(self, measurements, update=True,
panels=None):
"""Handle inserting a new MeasurementEditorDockItem in the content.
"""
if panels is None:
template = 'meas_%d'
items = self.dock_area.dock_items()
test = re.compile('meas\_([0-9]+)$')
measurement_items = [i for i in items if test.match(i.name)]
ops = []
for measurement in measurements:
if not measurement_items:
name = template % 0
ops.append(InsertItem(item=name, target='meas_exec'))
else:
indexes = [int(test.match(i.name).group(1))
for i in measurement_items]
indexes.sort()
if len(indexes) <= max(indexes):
ind = [i for i, x in enumerate(indexes) if i != x][0]
else:
ind = len(measurement_items)
name = template % ind
ops.append(InsertTab(item=name,
target=template % indexes[0]))
measurement_items.append(
MeasurementEditorDockItem(self.dock_area,
workspace=self,
measurement=measurement,
name=name)
)
if update:
deferred_call(self.dock_area.update_layout, ops)
else:
for m in measurements:
if m not in panels:
msg = ('Cannot insert edition panels for measurement %s, '
'no infos were provided. Panels exists for:\n%s')
raise RuntimeError(msg % (m.name + ' (id : %s)' % m.id,
', '.join(m.name for m in panels)
)
)
ed_name, t_name = panels[m]
MeasurementEditorDockItem(self.dock_area, workspace=self,
measurement=m, name=ed_name)
if t_name:
ToolsEditorDockItem(self.dock_area, measurement=m,
name=t_name)
def _update_engine_contribution(self, change):
"""Make sure that the engine contribution to the workspace does reflect
the currently selected engine.
"""
if 'oldvalue' in change:
old = change['oldvalue']
if old in self.plugin.engines:
engine = self.plugin.get_declarations('engine', [old])[old]
engine.clean_workspace(self)
new = change['value']
if new and new in self.plugin.engines:
engine = self.plugin.get_declarations('engine', [new])[new]
engine.contribute_to_workspace(self)
def _get_last_selected_measurement(self):
"""Wait for the background to finish processing the selected widgets.
"""
return self._selection_tracker.get_selected_measurement()
def _ensure_selected_engine(self):
"""Make sure an engine is selected and if not prompt the user to choose
one.
"""
if not self.plugin.selected_engine:
dial = EngineSelector(plugin=self.plugin)
if dial.exec_() and dial.selected_decl:
self.plugin.selected_engine = dial.selected_decl.id
return bool(self.plugin.selected_engine)
| |
# encoding: utf-8
"""
Gherkin step implementations for chart features.
"""
from __future__ import absolute_import, print_function
import hashlib
from itertools import islice
from behave import given, then, when
from pptx import Presentation
from pptx.chart.chart import Legend
from pptx.chart.data import BubbleChartData, CategoryChartData, ChartData, XyChartData
from pptx.enum.chart import XL_CHART_TYPE
from pptx.parts.embeddedpackage import EmbeddedXlsxPart
from pptx.util import Inches
from helpers import count, test_pptx
# given ===================================================
@given("a Chart object as chart")
def given_a_Chart_object_as_chart(context):
slide = Presentation(test_pptx("shp-common-props")).slides[0]
context.chart = slide.shapes[6].chart
@given("a chart having {a_or_no} title")
def given_a_chart_having_a_or_no_title(context, a_or_no):
shape_idx = {"no": 0, "a": 1}[a_or_no]
prs = Presentation(test_pptx("cht-chart-props"))
context.chart = prs.slides[0].shapes[shape_idx].chart
@given("a chart {having_or_not} a legend")
def given_a_chart_having_or_not_a_legend(context, having_or_not):
slide_idx = {"having": 0, "not having": 1}[having_or_not]
prs = Presentation(test_pptx("cht-legend"))
context.chart = prs.slides[slide_idx].shapes[0].chart
@given("a chart of size and type {spec}")
def given_a_chart_of_size_and_type_spec(context, spec):
slide_idx = {
"2x2 Clustered Bar": 0,
"2x2 100% Stacked Bar": 1,
"2x2 Clustered Column": 2,
"4x3 Line": 3,
"3x1 Pie": 4,
"3x2 XY": 5,
"3x2 Bubble": 6,
}[spec]
prs = Presentation(test_pptx("cht-replace-data"))
chart = prs.slides[slide_idx].shapes[0].chart
context.chart = chart
context.xlsx_sha1 = hashlib.sha1(chart._workbook.xlsx_part.blob).hexdigest()
@given("a chart of type {chart_type}")
def given_a_chart_of_type_chart_type(context, chart_type):
slide_idx, shape_idx = {
"Area": (0, 0),
"Stacked Area": (0, 1),
"100% Stacked Area": (0, 2),
"3-D Area": (0, 3),
"3-D Stacked Area": (0, 4),
"3-D 100% Stacked Area": (0, 5),
"Clustered Bar": (1, 0),
"Stacked Bar": (1, 1),
"100% Stacked Bar": (1, 2),
"Clustered Column": (1, 3),
"Stacked Column": (1, 4),
"100% Stacked Column": (1, 5),
"Line": (2, 0),
"Stacked Line": (2, 1),
"100% Stacked Line": (2, 2),
"Marked Line": (2, 3),
"Stacked Marked Line": (2, 4),
"100% Stacked Marked Line": (2, 5),
"Pie": (3, 0),
"Exploded Pie": (3, 1),
"XY (Scatter)": (4, 0),
"XY Lines": (4, 1),
"XY Lines No Markers": (4, 2),
"XY Smooth Lines": (4, 3),
"XY Smooth No Markers": (4, 4),
"Bubble": (5, 0),
"3D-Bubble": (5, 1),
"Radar": (6, 0),
"Marked Radar": (6, 1),
"Filled Radar": (6, 2),
"Line (with date categories)": (7, 0),
}[chart_type]
prs = Presentation(test_pptx("cht-chart-type"))
context.chart = prs.slides[slide_idx].shapes[shape_idx].chart
@given("a chart title")
def given_a_chart_title(context):
prs = Presentation(test_pptx("cht-chart-props"))
context.chart_title = prs.slides[0].shapes[1].chart.chart_title
@given("a chart title having {a_or_no} text frame")
def given_a_chart_title_having_a_or_no_text_frame(context, a_or_no):
prs = Presentation(test_pptx("cht-chart-props"))
shape_idx = {"no": 0, "a": 1}[a_or_no]
context.chart_title = prs.slides[1].shapes[shape_idx].chart.chart_title
# when ====================================================
@when("I add a Clustered bar chart with multi-level categories")
def when_I_add_a_clustered_bar_chart_with_multi_level_categories(context):
chart_type = XL_CHART_TYPE.BAR_CLUSTERED
chart_data = CategoryChartData()
WEST = chart_data.add_category("WEST")
WEST.add_sub_category("SF")
WEST.add_sub_category("LA")
EAST = chart_data.add_category("EAST")
EAST.add_sub_category("NY")
EAST.add_sub_category("NJ")
chart_data.add_series("Series 1", (1, 2, None, 4))
chart_data.add_series("Series 2", (5, None, 7, 8))
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when("I add a {kind} chart with {cats} categories and {sers} series")
def when_I_add_a_chart_with_categories_and_series(context, kind, cats, sers):
chart_type = {
"Area": XL_CHART_TYPE.AREA,
"Stacked Area": XL_CHART_TYPE.AREA_STACKED,
"100% Stacked Area": XL_CHART_TYPE.AREA_STACKED_100,
"Clustered Bar": XL_CHART_TYPE.BAR_CLUSTERED,
"Stacked Bar": XL_CHART_TYPE.BAR_STACKED,
"100% Stacked Bar": XL_CHART_TYPE.BAR_STACKED_100,
"Clustered Column": XL_CHART_TYPE.COLUMN_CLUSTERED,
"Stacked Column": XL_CHART_TYPE.COLUMN_STACKED,
"100% Stacked Column": XL_CHART_TYPE.COLUMN_STACKED_100,
"Doughnut": XL_CHART_TYPE.DOUGHNUT,
"Exploded Doughnut": XL_CHART_TYPE.DOUGHNUT_EXPLODED,
"Line": XL_CHART_TYPE.LINE,
"Line with Markers": XL_CHART_TYPE.LINE_MARKERS,
"Line Markers Stacked": XL_CHART_TYPE.LINE_MARKERS_STACKED,
"100% Line Markers Stacked": XL_CHART_TYPE.LINE_MARKERS_STACKED_100,
"Line Stacked": XL_CHART_TYPE.LINE_STACKED,
"100% Line Stacked": XL_CHART_TYPE.LINE_STACKED_100,
"Pie": XL_CHART_TYPE.PIE,
"Exploded Pie": XL_CHART_TYPE.PIE_EXPLODED,
"Radar": XL_CHART_TYPE.RADAR,
"Filled Radar": XL_CHART_TYPE.RADAR_FILLED,
"Radar with markers": XL_CHART_TYPE.RADAR_MARKERS,
}[kind]
category_count, series_count = int(cats), int(sers)
category_source = ("Foo", "Bar", "Baz", "Boo", "Far", "Faz")
series_value_source = count(1.1, 1.1)
chart_data = CategoryChartData()
chart_data.categories = category_source[:category_count]
for idx in range(series_count):
series_title = "Series %d" % (idx + 1)
series_values = tuple(islice(series_value_source, category_count))
chart_data.add_series(series_title, series_values)
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when("I add a {bubble_type} chart having 2 series of 3 points each")
def when_I_add_a_bubble_chart_having_2_series_of_3_pts(context, bubble_type):
chart_type = getattr(XL_CHART_TYPE, bubble_type)
data = (
("Series 1", ((-0.1, 0.5, 1.0), (16.2, 0.0, 2.0), (8.0, -0.2, 3.0))),
("Series 2", ((12.4, 0.8, 4.0), (-7.5, 0.5, 5.0), (5.1, -0.5, 6.0))),
)
chart_data = BubbleChartData()
for series_data in data:
series_label, points = series_data
series = chart_data.add_series(series_label)
for point in points:
x, y, size = point
series.add_data_point(x, y, size)
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when("I assign {value} to chart.has_legend")
def when_I_assign_value_to_chart_has_legend(context, value):
new_value = {"True": True, "False": False}[value]
context.chart.has_legend = new_value
@when("I assign {value} to chart.has_title")
def when_I_assign_value_to_chart_has_title(context, value):
context.chart.has_title = {"True": True, "False": False}[value]
@when("I assign {value} to chart_title.has_text_frame")
def when_I_assign_value_to_chart_title_has_text_frame(context, value):
context.chart_title.has_text_frame = {"True": True, "False": False}[value]
@when("I replace its data with {cats} categories and {sers} series")
def when_I_replace_its_data_with_categories_and_series(context, cats, sers):
category_count, series_count = int(cats), int(sers)
category_source = ("Foo", "Bar", "Baz", "Boo", "Far", "Faz")
series_value_source = count(1.1, 1.1)
chart_data = ChartData()
chart_data.categories = category_source[:category_count]
for idx in range(series_count):
series_title = "New Series %d" % (idx + 1)
series_values = tuple(islice(series_value_source, category_count))
chart_data.add_series(series_title, series_values)
context.chart.replace_data(chart_data)
@when("I replace its data with 3 series of 3 bubble points each")
def when_I_replace_its_data_with_3_series_of_three_bubble_pts_each(context):
chart_data = BubbleChartData()
for idx in range(3):
series_title = "New Series %d" % (idx + 1)
series = chart_data.add_series(series_title)
for jdx in range(3):
x, y, size = idx * 3 + jdx, idx * 2 + jdx, idx + jdx
series.add_data_point(x, y, size)
context.chart.replace_data(chart_data)
@when("I replace its data with 3 series of 3 points each")
def when_I_replace_its_data_with_3_series_of_three_points_each(context):
chart_data = XyChartData()
x = y = 0
for idx in range(3):
series_title = "New Series %d" % (idx + 1)
series = chart_data.add_series(series_title)
for jdx in range(3):
x, y = idx * 3 + jdx, idx * 2 + jdx
series.add_data_point(x, y)
context.chart.replace_data(chart_data)
# then ====================================================
@then("chart.category_axis is a {cls_name} object")
def then_chart_category_axis_is_a_cls_name_object(context, cls_name):
category_axis = context.chart.category_axis
type_name = type(category_axis).__name__
assert type_name == cls_name, "got %s" % type_name
@then("chart.chart_title is a ChartTitle object")
def then_chart_chart_title_is_a_ChartTitle_object(context):
class_name = type(context.chart.chart_title).__name__
assert class_name == "ChartTitle", "got %s" % class_name
@then("chart.chart_type is {enum_member}")
def then_chart_chart_type_is_value(context, enum_member):
expected_value = getattr(XL_CHART_TYPE, enum_member)
chart = context.chart
assert chart.chart_type is expected_value, "got %s" % chart.chart_type
@then("chart.font is a Font object")
def then_chart_font_is_a_Font_object(context):
actual = type(context.chart.font).__name__
expected = "Font"
assert actual == expected, "chart.font is a %s object" % actual
@then("chart.has_legend is {value}")
def then_chart_has_legend_is_value(context, value):
expected_value = {"True": True, "False": False}[value]
chart = context.chart
assert chart.has_legend is expected_value
@then("chart.has_title is {value}")
def then_chart_has_title_is_value(context, value):
chart = context.chart
actual_value = chart.has_title
expected_value = {"True": True, "False": False}[value]
assert actual_value is expected_value, "got %s" % actual_value
@then("chart.legend is a legend object")
def then_chart_legend_is_a_legend_object(context):
chart = context.chart
assert isinstance(chart.legend, Legend)
@then("chart.series is a SeriesCollection object")
def then_chart_series_is_a_SeriesCollection_object(context):
type_name = type(context.chart.series).__name__
assert type_name == "SeriesCollection", "got %s" % type_name
@then("chart.value_axis is a ValueAxis object")
def then_chart_value_axis_is_a_ValueAxis_object(context):
value_axis = context.chart.value_axis
assert type(value_axis).__name__ == "ValueAxis"
@then("chart_title.format is a ChartFormat object")
def then_chart_title_format_is_a_ChartFormat_object(context):
class_name = type(context.chart_title.format).__name__
assert class_name == "ChartFormat", "got %s" % class_name
@then("chart_title.format.fill is a FillFormat object")
def then_chart_title_format_fill_is_a_FillFormat_object(context):
class_name = type(context.chart_title.format.fill).__name__
assert class_name == "FillFormat", "got %s" % class_name
@then("chart_title.format.line is a LineFormat object")
def then_chart_title_format_line_is_a_LineFormat_object(context):
class_name = type(context.chart_title.format.line).__name__
assert class_name == "LineFormat", "got %s" % class_name
@then("chart_title.has_text_frame is {value}")
def then_chart_title_has_text_frame_is_value(context, value):
actual_value = context.chart_title.has_text_frame
expected_value = {"True": True, "False": False}[value]
assert actual_value is expected_value, "got %s" % actual_value
@then("chart_title.text_frame is a TextFrame object")
def then_chart_title_text_frame_is_a_TextFrame_object(context):
class_name = type(context.chart_title.text_frame).__name__
assert class_name == "TextFrame", "got %s" % class_name
@then("each series has a new name")
def then_each_series_has_a_new_name(context):
for series in context.chart.plots[0].series:
assert series.name.startswith("New ")
@then("each series has {count} values")
def then_each_series_has_count_values(context, count):
expected_count = int(count)
for series in context.chart.plots[0].series:
actual_value_count = len(series.values)
assert actual_value_count == expected_count
@then("len(chart.series) is {count}")
def then_len_chart_series_is_count(context, count):
expected_count = int(count)
assert len(context.chart.series) == expected_count
@then("the chart has an Excel data worksheet")
def then_the_chart_has_an_Excel_data_worksheet(context):
xlsx_part = context.chart._workbook.xlsx_part
assert isinstance(xlsx_part, EmbeddedXlsxPart)
@then("the chart has new chart data")
def then_the_chart_has_new_chart_data(context):
orig_xlsx_sha1 = context.xlsx_sha1
new_xlsx_sha1 = hashlib.sha1(context.chart._workbook.xlsx_part.blob).hexdigest()
assert new_xlsx_sha1 != orig_xlsx_sha1
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convert Tensorflow SavedModel to TensorFlow.js web format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.keras.saving.saving_utils import trace_model_call
from tensorflow.python.keras.saving.saving_utils import def_function
from tensorflow.python.keras.saving.saving_utils import model_input_signature
from tensorflow.python.saved_model.load import load
from tensorflow.python.saved_model import loader
from tensorflow.python.training.saver import export_meta_graph
from tensorflow.python.tools.saved_model_cli import get_signature_def_map
from google.protobuf.json_format import MessageToDict
import tensorflow_hub as hub
from tensorflowjs import write_weights
from tensorflowjs.converters import common
from tensorflowjs.converters import fold_batch_norms
from tensorflowjs.converters import fuse_prelu
from tensorflowjs.converters import fuse_depthwise_conv2d
from tensorflowjs.converters import graph_rewrite_util
from tensorflowjs import resource_loader
CLEARED_TENSOR_FIELDS = (
'tensor_content', 'half_val', 'float_val', 'double_val', 'int_val',
'string_val', 'scomplex_val', 'int64_val', 'bool_val',
'resource_handle_val', 'variant_val', 'uint32_val', 'uint64_val')
_HUB_V1_MODULE_PB = "tfhub_module.pb"
def load_graph(graph_filename):
"""Loads GraphDef. Returns Python Graph object.
Args:
graph_filename: string File name for the frozen graph.
"""
with tf.compat.v1.gfile.Open(graph_filename, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
# Set name to empty to avoid using the default name 'import'.
tf.import_graph_def(graph_def, name='')
return graph
def get_cluster():
"""Grappler optimization configuration for GPU."""
named_device = device_properties_pb2.NamedDevice()
named_device.name = '/GPU:0'
named_device.properties.type = 'GPU'
named_device.properties.environment['architecture'] = '4'
cluster = gcluster.Cluster(devices=[named_device])
return cluster
def validate(graph_def, skip_op_check, strip_debug_ops):
"""Validate if the node's op is compatible with TensorFlow.js.
Args:
graph_def: tf.GraphDef TensorFlow GraphDef proto object, which represents
the model topology.
skip_op_check: Bool whether to skip the op check.
strip_debug_ops: Bool whether to allow unsupported debug ops.
"""
nodes = [] + list(graph_def.node)
for func in graph_def.library.function:
nodes.extend(list(func.node_def))
if skip_op_check:
return set()
ops = []
for filename in resource_loader.list_dir('op_list'):
if os.path.splitext(filename)[1] == '.json':
with resource_loader.open_file(os.path.join('op_list',
filename)) as json_data:
ops += json.load(json_data)
names = {x['tfOpName'] for x in ops}
if strip_debug_ops:
names = names.union({'Assert', 'CheckNumerics', 'Print'})
not_supported = {x.op for x in [x for x in nodes if x.op not in names]}
return not_supported
def _run_grappler(config, graph_def, graph, signature_def):
meta_graph = export_meta_graph(
graph_def=graph_def, graph=graph)
meta_graph.signature_def["not_used_key"].CopyFrom(signature_def)
return tf_optimizer.OptimizeGraph(
config, meta_graph, cluster=get_cluster())
def optimize_graph(graph, signature_def, output_graph,
tf_version, quantization_dtype_map=None,
skip_op_check=False, strip_debug_ops=False,
weight_shard_size_bytes=1024 * 1024 * 4,
experiments=False,
initializer_graph=None,
metadata=None):
"""Takes a Python Graph object and optimizes the graph.
Args:
graph: The frozen graph to optimize.
signature_def: the SignatureDef of the inference graph.
output_graph: The location of the output graph.
tf_version: Tensorflow version of the input graph.
quantization_dtype_map: A mapping from dtype
(`uint8`, `uint16`, `float16`) to weights names. The weight mapping
supports wildcard substitution.
skip_op_check: Bool whether to skip the op check.
strip_debug_ops: Bool whether to strip debug ops.
weight_shard_size_bytes: Shard size (in bytes) of the weight files.
The size of each weight file will be <= this value.
initializer_graph: The frozen graph for initializers.
metadata: User defined metadata map.
"""
# Add a collection 'train_op' so that Grappler knows the outputs.
for _, output in signature_def.outputs.items():
name = output.name.split(':')[0]
graph.add_to_collection('train_op', graph.get_operation_by_name(name))
graph_def = graph.as_graph_def()
unsupported = validate(graph_def, skip_op_check,
strip_debug_ops)
if unsupported:
raise ValueError('Unsupported Ops in the model before optimization\n' +
', '.join(unsupported))
# first pass of grappler optimization, this is needed for batch norm folding.
config = config_pb2.ConfigProto()
rewriter_config = config.graph_options.rewrite_options
rewriter_config.optimizers[:] = [
'pruning', 'constfold', 'arithmetic', 'dependency', 'pruning',
'constfold', 'arithmetic', 'dependency'
]
if experiments:
rewriter_config.experimental_disable_compressed_tensor_optimization = True
if strip_debug_ops:
rewriter_config.optimizers.insert(0, 'debug_stripper')
optimized_graph = _run_grappler(config, graph_def, graph, signature_def)
# batch norm folding
optimized_graph = fold_batch_norms.fold_batch_norms(optimized_graph)
# set the device to CPU for all Conv2d and MatMul nodes, since grappler
# remap optimizer only support FusedConv2D and FusedMatMul for CPU.
for node in optimized_graph.node:
if node.op == 'Conv2D' or node.op == 'MatMul':
node.device = '/device:CPU:0'
# rerun grappler to fuse conv2d/matmul
config.graph_options.rewrite_options.optimizers[:] = [
'remap',
'constfold', 'arithmetic', 'dependency'
]
optimized_graph = _run_grappler(config, optimized_graph, graph, signature_def)
optimized_graph = _remove_unused_control_flow_inputs(optimized_graph)
# Because TF break the Prelu op into 6 ops, for performance we are
# fusing those ops into a single prelu
optimized_graph = fuse_prelu.fuse_ops_for_prelu(optimized_graph)
# Because grappler does not support DepthwiseConv2d fusing, we have
# implemented it here.
optimized_graph = fuse_depthwise_conv2d.fuse_depthwise_conv2d(optimized_graph)
# Since the grappler remap optimizer doe snot support prelu as the activation
# function for _FusedConv2D op, we are doing it manually here.
optimized_graph = fuse_prelu.fuse_prelu_with_fused_conv2d_or_matmul(
optimized_graph)
unsupported = validate(optimized_graph, skip_op_check,
strip_debug_ops)
if unsupported:
raise ValueError('Unsupported Ops in the model after optimization\n' +
', '.join(unsupported))
initializer_graph_def = None
if initializer_graph:
initializer_graph_def = initializer_graph.as_graph_def()
extract_weights(
optimized_graph, output_graph, tf_version,
signature_def, quantization_dtype_map, weight_shard_size_bytes,
initializer_graph_def, metadata=metadata)
def extract_const_nodes(nodes):
"""Takes a list of nodes and extract the weights. Return weight manifest
object.
Args:
nodes: list of tf.NodeDef TensorFlow NodeDef proto object.
"""
constants = [node for node in nodes if node.op == 'Const']
const_inputs = {}
# removed the conditional inputs for constants
for const in constants:
const_inputs[const.name] = const.input[:]
del const.input[:]
const_manifest = []
for const in constants:
const_manifest.append({
'name': const.name,
'data': graph_rewrite_util.values_from_const(const)
})
# Restore the conditional inputs
const.input[:] = const_inputs[const.name]
# Remove the binary array from tensor and save it to the external file.
for field_name in CLEARED_TENSOR_FIELDS:
const.attr["value"].tensor.ClearField(field_name)
return const_manifest
def extract_weights(graph_def,
output_graph,
tf_version,
signature_def,
quantization_dtype_map=None,
weight_shard_size_bytes=1024 * 1024 * 4,
initializer_graph_def=None,
metadata=None):
"""Takes a Python GraphDef object and extract the weights.
Args:
graph_def: tf.GraphDef TensorFlow GraphDef proto object, which represents
the model topology.
tf_version: Tensorflow version of the input graph.
signature_def: the SignatureDef of the inference graph.
quantization_dtype_map: A mapping from dtype
(`uint8`, `uint16`, `float16`) to weights names. The weight mapping
compression. Only np.uint8 and np.uint16 are supported.
supports wildcard substitution.
weight_shard_size_bytes: Shard size (in bytes) of the weight files.
The size of each weight file will be <= this value.
initializer_graph_def: tf.GraphDef proto object for initializer graph.
metadata: User defined metadata map.
"""
global_manifest = extract_const_nodes(graph_def.node)
function_manifests = []
for func in graph_def.library.function:
nodes = graph_rewrite_util.rename_constants(
func.node_def, func.signature.name)
del func.node_def[:]
func.node_def.extend(nodes)
function_manifests += extract_const_nodes(func.node_def)
initializer_manifests = []
if initializer_graph_def:
initializer_manifests = extract_const_nodes(initializer_graph_def.node)
print('Writing weight file ' + output_graph + '...')
write_artifacts(MessageToDict(graph_def),
[global_manifest +
function_manifests +
initializer_manifests],
output_graph,
tf_version, signature_def,
quantization_dtype_map=quantization_dtype_map,
weight_shard_size_bytes=weight_shard_size_bytes,
initializer_graph_def=initializer_graph_def,
metadata=metadata)
def write_artifacts(topology,
weights,
output_graph,
tf_version,
signature_def,
quantization_dtype_map=None,
weight_shard_size_bytes=1024 * 1024 * 4,
initializer_graph_def=None,
metadata=None):
"""Writes weights and topology to the output_dir.
If `topology` is Falsy (e.g., `None`), only emit weights to output_dir.
Args:
topology: tf.GraphDef TensorFlow GraphDef proto object, which represents
the model topology.
weights: an array of weight groups (as defined in tfjs write_weights).
output_graph: the output file name to hold all the contents.
tf_version: Tensorflow version of the input graph.
signature_def: the SignatureDef of the inference graph.
quantization_dtype_map: A mapping from dtype
(`uint8`, `uint16`, `float16`) to weights names. The weight mapping
supports wildcard substitution.
weight_shard_size_bytes: Shard size (in bytes) of the weight files.
The size of each weight file will be <= this value.
initializer_graph_def: tf.GraphDef proto object for initializer graph.
metadata: User defined metadata map.
"""
model_json = {
common.FORMAT_KEY: common.TFJS_GRAPH_MODEL_FORMAT,
# TODO(piyu): Add tensorflow version below by using `meta_info_def`.
common.GENERATED_BY_KEY: tf_version,
common.CONVERTED_BY_KEY: common.get_converted_by(),
common.SIGNATURE_KEY: MessageToDict(signature_def),
}
model_json[common.ARTIFACT_MODEL_TOPOLOGY_KEY] = topology or None
if metadata:
model_json[common.USER_DEFINED_METADATA_KEY] = metadata
if initializer_graph_def and initializer_graph_def.node:
model_json[common.ARTIFACT_MODEL_INITIALIZER] = MessageToDict(
initializer_graph_def)
weights_manifest = write_weights.write_weights(
weights, os.path.dirname(output_graph), write_manifest=False,
quantization_dtype_map=quantization_dtype_map,
shard_size_bytes=weight_shard_size_bytes)
assert isinstance(weights_manifest, list)
model_json[common.ARTIFACT_WEIGHTS_MANIFEST_KEY] = weights_manifest
with tf.io.gfile.GFile(output_graph, 'w') as f:
json.dump(model_json, f)
def _remove_unused_control_flow_inputs(input_graph_def):
result_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if (node.op == 'Placeholder' and
node.name.startswith('unused_control_flow_input')):
continue
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
result_graph_def.node.extend([new_node])
result_graph_def.library.CopyFrom(input_graph_def.library)
result_graph_def.versions.CopyFrom(input_graph_def.versions)
return result_graph_def
def _check_signature_in_model(saved_model, signature_name):
if signature_name not in saved_model.signatures:
raise ValueError("Signature '%s' does not exist. The following signatures "
"are available: %s" % (signature_name,
saved_model.signatures.keys()))
def _freeze_saved_model_v1(saved_model_dir, saved_model_tags,
output_node_names):
"""Freeze the graph by converting variables to constants for 1.x saved model.
Args:
saved_model_dir: dir where saved model files are stored.
saved_model_tags: inference graph tag.
output_node_names: List of name strings for the result nodes of the graph.
Returns:
A freezed and optimized graph.
Nullable. A freezed and optimized initializer graph.
Nullable. A list of output node names of initializer.
"""
# v1 loader need empty list if there are no saved_model tags.
if not saved_model_tags:
saved_model_tags = []
g = tf.Graph()
with g.as_default():
with tf.compat.v1.Session() as sess:
meta_graph = loader.load(sess, saved_model_tags, saved_model_dir)
meta_graph_def = g.as_graph_def()
frozen_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
sess, meta_graph_def, output_node_names)
frozen_graph = tf.Graph()
with frozen_graph.as_default():
tf.import_graph_def(frozen_graph_def, name='')
frozen_initializer_graph = None
initializer_output_names = None
# Only support table initializers for now.
if meta_graph.collection_def and meta_graph.collection_def[
'table_initializer']:
initializer_output_names = meta_graph.collection_def[
'table_initializer'].node_list.value
# This will use grappler to extract a subgraph with the
# table initializer ops as the outputs.
frozen_initializer_graph_def = (tf.compat.v1.graph_util
.convert_variables_to_constants(
sess, meta_graph_def,
initializer_output_names))
frozen_initializer_graph = tf.Graph()
with frozen_initializer_graph.as_default():
tf.import_graph_def(frozen_initializer_graph_def, name='')
return frozen_graph, frozen_initializer_graph
def _freeze_saved_model_v2(concrete_func, control_flow_v2=False):
if tf.__version__ < '2.2.0':
return convert_to_constants.convert_variables_to_constants_v2(
concrete_func, lower_control_flow=not control_flow_v2).graph
return convert_to_constants.convert_variables_to_constants_v2(
concrete_func, lower_control_flow=not control_flow_v2,
aggressive_inlining=True).graph
def _find_signature_def_name(tensor, signature_map):
if not signature_map:
return tensor.name
tensor_shape_str = tensor.shape.as_proto().SerializeToString()
names = []
for key in signature_map:
tensor_info = signature_map[key]
signature_shape_str = tensor_info.tensor_shape.SerializeToString()
if (tensor_info.dtype == tensor.dtype and
tensor_shape_str == signature_shape_str):
names.append(key)
if not names or len(names) > 1:
return tensor.name
else:
return names[0]
def _build_signature_def(frozen_graph, input_nodes, output_nodes,
signature_def=None):
signature = meta_graph_pb2.SignatureDef()
for input_tensor in input_nodes:
op_name = input_tensor.name.split(':')[0]
# The graph freezing may turn the original inputs into constants, or remove
# them from the graph, so we need to ignore those.
try:
op = frozen_graph.get_operation_by_name(op_name)
if op.type != 'Const':
name = input_tensor.name
if hasattr(signature_def, 'inputs'):
name = _find_signature_def_name(input_tensor, signature_def.inputs)
signature.inputs[name].name = input_tensor.name
signature.inputs[name].dtype = input_tensor.dtype.as_datatype_enum
signature.inputs[name].tensor_shape.CopyFrom(
input_tensor.shape.as_proto())
except KeyError:
# The original input was removed when the graph was frozen.
continue
for output_tensor in output_nodes:
if hasattr(output_tensor, 'name'):
name = output_tensor.name
if hasattr(signature_def, 'inputs'):
name = _find_signature_def_name(output_tensor, signature_def.outputs)
signature.outputs[name].name = output_tensor.name
signature.outputs[name].dtype = output_tensor.dtype.as_datatype_enum
signature.outputs[name].tensor_shape.CopyFrom(
output_tensor.shape.as_proto())
else: #just the tensor name string array
signature.outputs[output_tensor].name = output_tensor
return signature
def convert_tf_frozen_model(frozen_model_path,
output_node_names,
output_dir,
quantization_dtype_map=None,
skip_op_check=False,
strip_debug_ops=False,
weight_shard_size_bytes=1024 * 1024 * 4,
experiments=False,
metadata=None):
"""Convert frozen model and check the model compatibility with Tensorflow.js.
Optimize and convert the model to Tensorflow.js format, when the model passes
the compatiblity check.
Args:
frozen_model_path: string The path to frozen model.
output_node_names: string The names of the output nodes, comma separated.
output_dir: string The name of the output directory. The directory
will consist of
- a file named 'model.json'
- possibly sharded binary weight files.
quantization_dtype_map: A mapping from dtype
(`uint8`, `uint16`, `float16`) to weights names. The weight mapping
supports wildcard substitution.
skip_op_check: Bool whether to skip the op check.
strip_debug_ops: Bool whether to strip debug ops.
weight_shard_size_bytes: Shard size (in bytes) of the weight files.
The size of each weight file will be <= this value.
experiments: Bool enable experimental features.
metadata: User defined metadata map.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_graph = os.path.join(output_dir, common.ARTIFACT_MODEL_JSON_FILE_NAME)
graph = load_graph(frozen_model_path)
signature = _build_signature_def(
graph, [], output_node_names.split(','))
optimize_graph(graph, signature,
output_graph, tf.__version__,
quantization_dtype_map=quantization_dtype_map,
skip_op_check=skip_op_check,
strip_debug_ops=strip_debug_ops,
weight_shard_size_bytes=weight_shard_size_bytes,
experiments=experiments,
metadata=metadata)
def _load_model(saved_model_dir, saved_model_tags):
model = None
# Ensure any graphs created in eager mode are able to run.
with context.eager_mode():
if saved_model_tags:
model = load(saved_model_dir, saved_model_tags)
else:
model = load(saved_model_dir)
return model
def _find_signature(saved_model_dir, saved_model_tags, signature_def):
signature_def_map = get_signature_def_map(saved_model_dir, saved_model_tags)
if signature_def not in signature_def_map.keys():
raise ValueError('Signature "%s" does not exist in the saved model'
% (signature_def))
return signature_def_map[signature_def]
def _convert_tf_saved_model(output_dir,
saved_model_dir=None,
keras_model=None,
signature_def='serving_default',
saved_model_tags='serve',
quantization_dtype_map=None,
skip_op_check=False,
strip_debug_ops=False,
weight_shard_size_bytes=1024 * 1024 * 4,
control_flow_v2=False,
experiments=False,
metadata=None):
"""Take a SavedModel or KerasModel and convert to Tensorflow.js graph model.
Args:
output_dir: string The name of the output directory. The directory
will consist of
- a file named 'model.json'
- possibly sharded binary weight files.
saved_model_dir: string The saved model directory.
: string The names of the output nodes, comma separated.
keras_model: An in-memory Keras model object.
signature_def: string Tagset of the SignatureDef to load. Defaults to
'serving_default'.
saved_model_tags: tags of the GraphDef to load. Defaults to 'serve'.
quantization_dtype_map: A mapping from dtype
(`uint8`, `uint16`, `float16`) to weights names. The weight mapping
supports wildcard substitution.
skip_op_check: Bool whether to skip the op check.
strip_debug_ops: Bool whether to strip debug ops.
weight_shard_size_bytes: Shard size (in bytes) of the weight files.
The size of each weight file will be <= this value.
control_flow_v2: Bool whether to enable control flow v2 ops.
experiments: Bool enable experimental features.
metadata: User defined metadata map.
"""
if signature_def is None:
signature_def = 'serving_default'
if not tf.io.gfile.exists(output_dir):
tf.io.gfile.makedirs(output_dir)
output_graph = os.path.join(
output_dir, common.ARTIFACT_MODEL_JSON_FILE_NAME)
saved_model_tags_list = None
if saved_model_tags:
saved_model_tags_list = saved_model_tags.split(',')
model = None
concrete_func = None
saved_model_sigature = None
if saved_model_dir:
saved_model_sigature = _find_signature(saved_model_dir, saved_model_tags,
signature_def)
model = _load_model(saved_model_dir, saved_model_tags_list)
_check_signature_in_model(model, signature_def)
concrete_func = model.signatures[signature_def]
elif keras_model:
model = keras_model
input_signature = None
# If the model's call is not a `tf.function`, then we need to first get its
# input signature from `model_input_signature` method. We can't directly
# call `trace_model_call` because otherwise the batch dimension is set
# to None.
if not isinstance(model.call, def_function.Function):
# Pass `keep_original_batch_size=True` will ensure that we get an input
# signature including the batch dimension specified by the user.
input_signature = model_input_signature(
model, keep_original_batch_size=True)
func = trace_model_call(model, input_signature)
concrete_func = func.get_concrete_function()
else:
raise Exception('Provide either a saved model or keras model to convert.')
output_node_names = []
for output_tensor in concrete_func.outputs:
output_node_names.append(output_tensor.name.split(':')[0])
# TensorFlow doesn't encode the saved model version in the graph in a
# reliable way. Try to freeze the graph using V2 utils. If that fails, freeze
# the graph using V1 utils.
frozen_initializer_graph = None
try:
frozen_graph = _freeze_saved_model_v2(concrete_func, control_flow_v2)
except BaseException:
if saved_model_dir:
(frozen_graph,
frozen_initializer_graph) = _freeze_saved_model_v1(saved_model_dir,
saved_model_tags_list,
output_node_names)
else:
print('Can not freeze saved model v1.')
return
inputs = [x for x in concrete_func.inputs if not x.dtype == 'resource']
signature = _build_signature_def(
frozen_graph, inputs, concrete_func.outputs, saved_model_sigature)
define_transform_graph_func()
version = None
try:
version = model.tensorflow_version
except: # pylint: disable=W0702
# keras model does not have tensorflow_version, hard code to the latest
# tensorflow version.
version = tf.__version__
optimize_graph(frozen_graph, signature,
output_graph, version,
quantization_dtype_map=quantization_dtype_map,
skip_op_check=skip_op_check,
strip_debug_ops=strip_debug_ops,
weight_shard_size_bytes=weight_shard_size_bytes,
experiments=experiments,
initializer_graph=frozen_initializer_graph,
metadata=metadata)
def define_transform_graph_func():
"""Check if the TransformGraph is available to be imported, this package is
available in g3 but not in oss version of TensorFlow.
"""
transform_graph_available = True
try:
from tensorflow.tools.graph_transforms import TransformGraph # pylint: disable=C0415
except: # pylint: disable=W0702
transform_graph_available = False
# Define the strip graph functions when TransformGraph is available, this will
# strip the unused nodes from the graph.
if transform_graph_available:
def _strip_unused_nodes(frozen_graph, concrete_func, output_node_names):
# Find the names of the input nodes needed to extract the minimal
# inference graph. This is particularly useful for cases when the concrete
# function contains nodes that do not contribute the inference computation
# defined by the input/output pair. This would also eliminate op
# unsupported error caused by nodes outside of the minial infrerence
# graph.
input_node_names = []
input_tensors = {}
for input_tensor in concrete_func.inputs:
if input_tensor.dtype != 'resource':
op_name = input_tensor.name.split(':')[0]
# The graph freezing may turn the original inputs into constants, or
# remove them from the graph, so we need to ignore those.
try:
op = frozen_graph.get_operation_by_name(op_name)
if op.type != 'Const':
input_node_names.append(op_name)
input_tensors[op_name] = input_tensor
except KeyError:
# The original input was removed when the graph was frozen.
continue
graph_transformations = ['strip_unused_nodes']
stripped_graph_def = TransformGraph(
frozen_graph.as_graph_def(), input_node_names, output_node_names,
graph_transformations)
# The transform graph library cannot support input nodes that has dynamic
# shape, this code will update the dtype and shape based on the
# input tensor manually.
for node in stripped_graph_def.node:
if node.name in input_tensors:
if node.attr['shape'] and node.attr['shape'].shape:
node.attr['shape'].shape.CopyFrom(
input_tensors[node.name].shape.as_proto())
if node.attr['dtype'] and node.attr['dtype'].type:
node.attr['dtype'].type = input_tensors[
node.name].dtype.as_datatype_enum
with tf.Graph().as_default() as stripped_graph:
tf.import_graph_def(stripped_graph_def, name='')
return stripped_graph
def convert_tf_saved_model(saved_model_dir,
output_dir, signature_def='serving_default',
saved_model_tags='serve',
quantization_dtype_map=None,
skip_op_check=False,
strip_debug_ops=False,
weight_shard_size_bytes=1024 * 1024 * 4,
control_flow_v2=False,
experiments=False,
metadata=None):
"""Freeze the SavedModel and check the model compatibility with Tensorflow.js.
Optimize and convert the model to Tensorflow.js format, when the model passes
the compatiblity check.
Args:
saved_model_dir: string The saved model directory.
: string The names of the output nodes, comma separated.
output_dir: string The name of the output directory. The directory
will consist of
- a file named 'model.json'
- possibly sharded binary weight files.
signature_def: string Tagset of the SignatureDef to load. Defaults to
'serving_default'.
saved_model_tags: tags of the GraphDef to load. Defaults to 'serve'.
quantization_dtype_map: A mapping from dtype
(`uint8`, `uint16`, `float16`) to weights names. The weight mapping
supports wildcard substitution.
skip_op_check: Bool whether to skip the op check.
strip_debug_ops: Bool whether to strip debug ops.
weight_shard_size_bytes: Shard size (in bytes) of the weight files.
The size of each weight file will be <= this value.
control_flow_v2: Bool whether to enable control flow v2 ops.
experiments: Bool enable experimental features.
metadata: User defined metadata map.
"""
_convert_tf_saved_model(output_dir, saved_model_dir=saved_model_dir,
signature_def=signature_def,
saved_model_tags=saved_model_tags,
quantization_dtype_map=quantization_dtype_map,
skip_op_check=skip_op_check,
strip_debug_ops=strip_debug_ops,
weight_shard_size_bytes=weight_shard_size_bytes,
control_flow_v2=control_flow_v2,
experiments=experiments,
metadata=metadata)
def load_and_initialize_hub_module(module_path, signature='default'):
"""Loads graph of a TF-Hub module and initializes it into a session.
Args:
module_path: string Path to TF-Hub module.
signature: string Signature to use when creating the apply graph.
Return:
graph: tf.Graph Graph of the module.
session: tf.Session Session with initialized variables and tables.
inputs: dict Dictionary of input tensors.
outputs: dict Dictionary of output tensors.
Raises:
ValueError: If signature contains a SparseTensor on input or output.
"""
graph = tf.Graph()
with graph.as_default():
tf.compat.v1.logging.info('Importing %s', module_path)
module = hub.Module(module_path)
signature_inputs = module.get_input_info_dict(signature)
signature_outputs = module.get_output_info_dict(signature)
# First check there are no SparseTensors in input or output.
for key, info in list(signature_inputs.items()) + list(
signature_outputs.items()):
if info.is_sparse:
raise ValueError(
'Signature "%s" has a SparseTensor on input/output "%s".'
' SparseTensors are not supported.' % (signature, key))
# Create placeholders to represent the input of the provided signature.
inputs = {}
for input_key, input_info in signature_inputs.items():
inputs[input_key] = tf.compat.v1.placeholder(
shape=input_info.get_shape(), dtype=input_info.dtype, name=input_key)
outputs = module(inputs=inputs, signature=signature, as_dict=True)
session = tf.compat.v1.Session(graph=graph)
session.run(tf.compat.v1.global_variables_initializer())
session.run(tf.compat.v1.tables_initializer())
return graph, session, inputs, outputs
def convert_tf_hub_module_v1(module_path, output_dir,
signature='default', quantization_dtype_map=None,
skip_op_check=False, strip_debug_ops=False,
weight_shard_size_bytes=1024 * 1024 * 4,
experiments=False,
metadata=None):
"""Freeze the TF-Hub module and check compatibility with Tensorflow.js.
Optimize and convert the TF-Hub module to Tensorflow.js format, if it passes
the compatiblity check.
Args:
module_path: string Path to the module.
output_dir: string The name of the output directory. The directory
will consist of
- a file named 'model.json'
- possibly sharded binary weight files.
signature: string Signature to load.
quantization_dtype_map: A mapping from dtype
(`uint8`, `uint16`, `float16`) to weights names. The weight mapping
supports wildcard substitution.
skip_op_check: Bool whether to skip the op check.
strip_debug_ops: Bool whether to strip debug ops.
weight_shard_size_bytes: Shard size (in bytes) of the weight files.
The size of each weight file will be <= this value.
experiments: Bool enable experimental features.
metadata: User defined metadata map.
"""
if signature is None:
signature = 'default'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
graph, sess, inputs, outputs = load_and_initialize_hub_module(
module_path, signature)
input_node_names = []
output_node_names = []
for input_tensor in inputs.values():
input_node_names.append(input_tensor.name.split(':')[0])
for output_tensor in outputs.values():
output_node_names.append(output_tensor.name.split(':')[0])
print('Creating a model with inputs %s and outputs %s.' % (input_node_names,
output_node_names))
frozen_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), output_node_names)
output_graph = os.path.join(output_dir, common.ARTIFACT_MODEL_JSON_FILE_NAME)
frozen_file = output_graph + '.frozen'
try:
with tf.compat.v1.gfile.GFile(frozen_file, 'wb') as f:
f.write(frozen_graph_def.SerializeToString())
frozen_graph = load_graph(frozen_file)
signature = _build_signature_def(frozen_graph,
inputs.values(), outputs.values())
optimize_graph(frozen_graph, signature,
output_graph, tf.__version__,
quantization_dtype_map=quantization_dtype_map,
skip_op_check=skip_op_check,
strip_debug_ops=strip_debug_ops,
weight_shard_size_bytes=weight_shard_size_bytes,
experiments=experiments,
metadata=metadata)
finally:
# Clean up the temp files.
if os.path.exists(frozen_file):
os.remove(frozen_file)
def convert_tf_hub_module(module_handle, output_dir,
signature='default', saved_model_tags='serve',
quantization_dtype_map=None,
skip_op_check=False, strip_debug_ops=False,
weight_shard_size_bytes=1024 * 1024 * 4,
control_flow_v2=False,
experiments=False,
metadata=None):
"""Conversion for TF Hub modules V1 and V2.
See convert_tf_hub_module and convert_tf_saved_model.
Args:
module_path: string Path to the module.
output_dir: string The name of the output directory. The directory
will consist of
- a file named 'model.json'
- possibly sharded binary weight files.
signature: string Signature to load.
saved_model_tags: tags of the GraphDef to load. Defaults to ''.
quantization_dtype_map: A mapping from dtype
(`uint8`, `uint16`, `float16`) to weights names. The weight mapping
supports wildcard substitution.
skip_op_check: Bool whether to skip the op check.
strip_debug_ops: Bool whether to strip debug ops.
weight_shard_size_bytes: Shard size (in bytes) of the weight files.
The size of each weight file will be <= this value.
control_flow_v2: Bool whether to enable control flow v2 ops.
experiments: Bool enable experimental features.
metadata: User defined metadata map.
"""
module_path = hub.resolve(module_handle)
# TODO(vbardiovskyg): We can remove this v1 code path once loading of all v1
# modules is fixed on the TF side, or once the modules we cannot load become
# replaced with newer versions.
if tf.io.gfile.exists(os.path.join(module_path, _HUB_V1_MODULE_PB)):
print("Loading the module using TF 1.X interface from %s." % module_path)
convert_tf_hub_module_v1(module_path, output_dir, signature,
quantization_dtype_map,
skip_op_check, strip_debug_ops,
weight_shard_size_bytes,
experiments=experiments,
metadata=metadata)
else:
print("Loading the module using TF 2.X interface from %s." % module_path)
if signature is None:
signature = 'default'
convert_tf_saved_model(saved_model_dir=module_path,
output_dir=output_dir,
signature_def=signature,
saved_model_tags=saved_model_tags,
quantization_dtype_map=quantization_dtype_map,
skip_op_check=skip_op_check,
strip_debug_ops=strip_debug_ops,
weight_shard_size_bytes=weight_shard_size_bytes,
control_flow_v2=control_flow_v2,
experiments=experiments,
metadata=metadata)
def convert_keras_model_to_graph_model(keras_model,
output_dir,
saved_model_tags='serve',
quantization_dtype_map=None,
skip_op_check=False,
strip_debug_ops=False,
weight_shard_size_bytes=1024 * 1024 * 4,
control_flow_v2=False,
experiments=False,
metadata=None):
"""Convert an in-memory keras model to Tensorflow.js graph model format.
Args:
keras_model: Keras Model object.
output_dir: string The name of the output directory. The directory
will consist of
- a file named 'model.json'
- possibly sharded binary weight files.
saved_model_tags: tags of the GraphDef to load. Defaults to 'serve'.
quantization_dtype_map: A mapping from dtype
(`uint8`, `uint16`, `float16`) to weights names. The weight mapping
supports wildcard substitution.
skip_op_check: Bool whether to skip the op check.
strip_debug_ops: Bool whether to strip debug ops.
weight_shard_size_bytes: Shard size (in bytes) of the weight files.
The size of each weight file will be <= this value.
control_flow_v2: Bool whether to enable control flow v2 ops.
experiments: Bool enable experimental features.
metadata: User defined metadata map.
"""
_convert_tf_saved_model(output_dir, keras_model=keras_model,
saved_model_tags=saved_model_tags,
quantization_dtype_map=quantization_dtype_map,
skip_op_check=skip_op_check,
strip_debug_ops=strip_debug_ops,
weight_shard_size_bytes=weight_shard_size_bytes,
control_flow_v2=control_flow_v2,
experiments=experiments,
metadata=metadata)
| |
from setuptools import setup
from distutils.extension import Extension
import os.path
import sys
extra_compile_args = ["-DDLLX="]
extra_link_args = []
define_macros = []
runtime_library_dirs = []
library_dirs = []
libraries = []
include_dirs = []
if sys.platform == "win32":
if sys.maxsize > 2 ** 32: # 64-bit
print "Building windows application 'leveldb_mcpe' x64"
include_dirs = ["C:/Boost/include/boost-1_55", "./leveldb-mcpe/include"]
library_dirs = ["C:/Boost/lib/x64", "."]
libraries = ["leveldb-mcpe", "shell32", "zlib"]
extra_compile_args += ["/EHs", "/MD"]
extra_link_args += ["/MACHINE:x64", "/NODEFAULTLIB:LIBCMT"]
define_macros = [("WIN32", None), ("_WINDOWS", None), ("LEVELDB_PLATFORM_WINDOWS", None), ("OS_WIN", None)]
else: # 32-bit
print "Building windows application 'leveldb_mcpe' x32"
include_dirs = ["C:/Boost/include/boost-1_55", "./leveldb-mcpe/include"]
library_dirs = ["C:/Boost/lib/i386", "."]
libraries = ["leveldb-mcpe", "shell32", "zlib"]
extra_compile_args += ["/EHs", "/MD"]
extra_link_args += ["/MACHINE:x86", "/NODEFAULTLIB:LIBCMT"]
define_macros = [("WIN32", None), ("_WINDOWS", None), ("LEVELDB_PLATFORM_WINDOWS", None), ("OS_WIN", None)]
elif sys.platform == "darwin":
include_dirs = ["/usr/local/include/boost", "./leveldb-mcpe/include", "."]
library_dirs = ["/usr/local/lib", ".", "./leveldb-mcpe"]
libraries = ["boost_python", "leveldb"]
elif sys.platform == "linux2":
if sys.argv[-1] == 'setup.py':
print 'No command specified. Aborting.'
print 'Please, use `python setup.py build` to build Pocket Edition support for MCEdit.'
sys.exit(1)
print "Building Linux application 'leveldb_mcpe'..."
# TODO: add checks, warnings and recomandations if something fails... (<< On the way)
# add a cleanup option
# First, unpack and build dependencies: boost and mojang's leveldb-mcpe
# Need make, g++, tar, unzip, Python 2.7 header files
# boost will be intalled there to avoid elevation problems
# 'build_ext --inplace' is not wanted here. Let it be replaced with build
if '--inplace' in sys.argv:
sys.argv.remove('--inplace')
if 'build_ext' in sys.argv:
sys.argv.remove('build_ext')
sys.argv.append('build')
curdir = os.getcwd()
destpath = '../pymclevel'
user_libdir = os.path.expanduser('~/.local/lib')
if not os.path.exists(user_libdir):
print 'Creating needed library folder: %s' % user_libdir
os.makedirs(user_libdir)
print 'Done'
mcpeso_dir = os.path.join(user_libdir, 'leveldb-mcpe')
boostRoot = os.path.expanduser('~/.local/lib/boost_1_55_0_mcpe')
install_boost = None
build_boost_python = None
if not os.path.exists(boostRoot):
install_boost = True
else:
print 'Boost found in %s. Skipping installation.' % boostRoot
install_boost = False
if not os.path.exists(os.path.join(boostRoot, 'stage', 'lib', 'libboost_python.a')):
build_boost_python = True
else:
print 'Boost Python wrapper found in %s. Skipping build.' % os.path.join(boostRoot, 'stage', 'lib')
build_boost_python = False
if install_boost is None: # Shall not happen...
print 'Impossible to determine if Boost 1.55.0 is installed in your personnal library folder.'
a = raw_input('Do you want to (re)install it [y/N] ?')
if a and a in 'yY':
install_boost = True
if build_boost_python is None: # Shall not happen...
print 'Impossible to determine if Boost Python wrapper is installed in your personnal library folder.'
a = raw_input('Do you want to (re)install it [y/N] ?')
if a and a in 'yY':
build_boost_python = True
if install_boost:
print "Extracting boost..."
os.system('tar --bzip2 -xf boost_1_55_0.tar.bz2')
os.system('mv boost_1_55_0 %s' % boostRoot)
os.chdir(boostRoot)
print "Installing boost..."
os.system('sh ./bootstrap.sh --prefix=%s' % boostRoot)
os.chdir(curdir)
print 'Done.'
if build_boost_python:
print "Building boost_python..."
os.chdir(boostRoot)
os.system('./b2 --with-python --prefix=%s --build-dir=%s -a link=static cxxflags="-fPIC" linkflags="-fPIC"' % (
boostRoot, boostRoot))
os.chdir(curdir)
print 'Done.'
# Unpack and build leveldb-mcpe from mojang
build_leveldb = None
# if not os.path.exists('leveldb-mcpe/libleveldb.a') and not os.path.exists('leveldb-mcpe/libleveldb.so'):
if not os.path.exists(os.path.join(mcpeso_dir, 'libleveldb.so')):
build_leveldb = True
# elif os.path.exists('leveldb-mcpe/libleveldb.a') and os.path.exists('leveldb-mcpe/libleveldb.so'):
elif os.path.exists(os.path.join(mcpeso_dir, 'libleveldb.so')):
a = raw_input("Mojang's leveldb is already built. Rebuild [y/N] ?")
if a and a in 'yY':
build_leveldb = True
else:
build_leveldb = False
else:
not_exists = [os.path.basename(a) for a in (os.path.join(mcpeso_dir, 'libleveldb.a'), os.path.join(mcpeso_dir, 'libleveldb.so')) if not os.path.exists(a)]
print "The file %s is missing. Building MCEdit one may not work." % not_exists[0]
a = raw_input("Rebuild Mojang's leveldb-mcpe [y/N] ?")
if a and a in 'yY':
build_leveldb = True
if build_leveldb is None: # Shall not happen...
print "Impossible to determine if Mojang's leveldb-mcpe is already built or not..."
a = raw_input('Do you want to (re)build it [y/N] ?')
if a and a in 'yY':
build_leveldb = True
if build_leveldb:
extract = True
if os.path.exists('leveldb-mcpe') and os.listdir('leveldb-mcpe') != []:
a = raw_input("Mojang's leveldb-mcpe source directory already exists. Replace it (reextract) [y/N] ?")
if not a or a not in 'yY':
extract = False
else:
extract = True
if extract:
os.system('rm -R leveldb-mcpe')
if not os.path.exists('leveldb-mcpe-master.zip'):
# Retrieve Mojang resource linked in MCEdit sources. I know, freaking command line :p
os.system("""wget -O leveldb-mcpe-master.zip $(wget -S -O - https://github.com/Mojang/leveldb-mcpe/tree/$(wget -S -O - https://github.com/Khroki/MCEdit-Unified/tree/master/leveldb_mcpe | egrep -o '@ <a href="/Mojang/leveldb-mcpe/tree/([0-9A-Za-z]*)"' | egrep -o '/[0-9A-Za-z]*"' | egrep -o '[0-9A-Za-z]*') | egrep '\.zip' | sed 's/<a href="\/Mojang\/leveldb-mcpe\/archive/https:\/\/codeload.github.com\/Mojang\/leveldb-mcpe\/zip/' | sed 's/.zip"//'| sed 's/ //')""")
print "Extracting Mojang's leveldb-mcpe..."
os.system('unzip -q leveldb-mcpe-master.zip')
os.system("mv $(ls -d1 */ | egrep 'leveldb-mcpe-') leveldb-mcpe")
os.chdir('leveldb-mcpe')
if not os.path.exists('../zlib.zip'):
os.system('wget -O ../zlib.zip http://zlib.net/zlib128.zip')
os.system('unzip -q ../zlib.zip')
os.system("mv $(ls -d1 */ | egrep 'zlib-') zlib")
os.chdir('..')
if build_leveldb:
os.chdir('leveldb-mcpe')
print "Building Mojang's leveldb-mcpe..."
os.system('make')
os.chdir(curdir)
print 'Done.'
else:
print "Skipping Mojang's leveldb-mcpe build."
if not os.path.exists(os.path.join(mcpeso_dir, 'libleveldb.so.1.18')):
print 'Copying library to %s.' % mcpeso_dir
if not os.path.exists(mcpeso_dir):
os.makedirs(mcpeso_dir)
os.system('cp ./leveldb-mcpe/libleveldb.so.1.18 %s' % mcpeso_dir)
os.system('ln -s %s/libleveldb.so.1.18 %s/libleveldb.so.1' % (mcpeso_dir, mcpeso_dir))
os.system('ln -s %s/libleveldb.so.1.18 %s/libleveldb.so' % (mcpeso_dir, mcpeso_dir))
print 'Done.'
# #1# This form compiles dynamic shared library
# include_dirs = [boosRoot, './leveldb-mcpe/include', '.']
# library_dirs = [boostRoot, boostRoot + '/stage/lib', '/usr/local/lib', '.', './leveldb-mcpe']
# libraries = ['boost_python', 'leveldb']
# define_macros = [("LINUX", None),("_DEBUG", None),("_LINUX", None),("LEVELDB_PLATFORM_POSIX", None)]
# extra_compile_args = ['-std=c++11'] + extra_compile_args
# runtime_library_dirs = ['.', './leveldb-mcpe']
# Need to copy libboost_python.so.1.55.0 in the current directory and link it
# os.system('cp %s/stage/lib/libboost_python.so.1.55.0 .|ln -s '
# 'libboost_python.so.1.55.0 libboost_python.so' % boostRoot)
# 2# Static library build: need a boost python libs built with cxxflags"-fPIC" and linkflags="-fPIC"
include_dirs = [boostRoot, './leveldb-mcpe/include', '.']
library_dirs = [boostRoot, boostRoot + '/stage/lib', '/usr/local/lib', '.', mcpeso_dir]
libraries = ['boost_python', 'leveldb']
define_macros = [("LINUX", None), ("_DEBUG", None), ("_LINUX", None), ("LEVELDB_PLATFORM_POSIX", None),
('OS_LINUX', None)]
extra_compile_args = ['-std=c++11'] + extra_compile_args
runtime_library_dirs = [mcpeso_dir]
files = ["leveldb_mcpe.cpp"]
setup(name="leveldb_python_wrapper",
ext_modules=[
Extension(
"leveldb_mcpe",
files,
library_dirs=library_dirs,
libraries=libraries,
include_dirs=include_dirs,
depends=[],
define_macros=define_macros,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
runtime_library_dirs=runtime_library_dirs)
]
)
# Need to copy leveldb_mcpe.so in the current directory
if sys.platform == 'linux2':
os.system('cp $(ls -R build/*/leveldb_mcpe.so) %s' % destpath)
| |
import json
import pytest
from rancher import ApiError
from .common import random_str
from .conftest import wait_for, wait_for_condition
@pytest.mark.skip(reason="cluster-defaults disabled")
def test_generic_initial_defaults(admin_mc):
cclient = admin_mc.client
schema_defaults = {}
setting_defaults = {}
data = cclient.schema.types['cluster'].resourceFields
default = data["enableNetworkPolicy"]["default"]
for name in cclient.schema.types['cluster'].resourceFields.keys():
if name == "enableNetworkPolicy":
schema_defaults["enableNetworkPolicy"] = default
for name in cclient.schema.types['rancherKubernetesEngineConfig'] \
.resourceFields.keys():
if name == "ignoreDockerVersion":
schema_defaults["ignoreDockerVersion"] = cclient.schema. \
types["rancherKubernetesEngineConfig"]. \
resourceFields["ignoreDockerVersion"]. \
data_dict()["default"]
setting = cclient.list_setting(name="cluster-defaults")
data = json.loads(setting['data'][0]['default'])
setting_defaults["enableNetworkPolicy"] = data["enableNetworkPolicy"]
setting_defaults["ignoreDockerVersion"] = \
data["rancherKubernetesEngineConfig"]["ignoreDockerVersion"]
assert schema_defaults == setting_defaults
def test_generic_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "asdfsd"})
remove_resource(cluster)
assert len(cluster.conditions) == 3
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.conditions[1].type == 'Provisioned'
assert cluster.conditions[1].status == 'Unknown'
assert cluster.conditions[2].type == 'Waiting'
assert cluster.conditions[2].status == 'Unknown'
assert 'exportYaml' not in cluster.actions
def test_eks_cluster_immutable_subnets(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3",
"subnet-02388a166136f98c4"
]})
remove_resource(cluster)
wait_for_condition(
'DefaultProjectCreated', 'True', admin_mc.client, cluster)
# try to edit cluster subnets
with pytest.raises(ApiError) as e:
admin_mc.client.update_by_id_cluster(
id=cluster.id,
amazonElasticContainerServiceConfig={
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3"
]})
assert e.value.error.status == 422
assert e.value.error.message == 'cannot modify EKS subnets after creation'
# tests updates still work
new = admin_mc.client.update_by_id_cluster(
id=cluster.id,
name=cluster.name,
description="update",
amazonElasticContainerServiceConfig={
# required field when updating KE clusters
"driverName": "amazonelasticcontainerservice",
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3",
"subnet-02388a166136f98c4"
]})
assert new.id == cluster.id
assert not hasattr(cluster, "description")
assert hasattr(new, "description")
def test_rke_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"accessKey": "asdfsd"})
remove_resource(cluster)
assert len(cluster.conditions) == 3
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.conditions[1].type == 'Provisioned'
assert cluster.conditions[1].status == 'Unknown'
assert cluster.conditions[2].type == 'Waiting'
assert cluster.conditions[2].status == 'Unknown'
assert 'exportYaml' in cluster.actions
def test_psp_enabled_set(admin_mc, remove_resource):
"""Asserts podSecurityPolicy field is used to populate pspEnabled in
cluster capabilities"""
admin_client = admin_mc.client
cluster = admin_client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"accessKey": "asdfsd",
"services": {
"kubeApi": {
"podSecurityPolicy": True,
}
}
})
remove_resource(cluster)
def psp_set_to_true():
updated_cluster = admin_client.by_id_cluster(id=cluster.id)
capabilities = updated_cluster.get("capabilities")
if capabilities is not None:
return capabilities.get("pspEnabled") is True
return None
wait_for(lambda: psp_set_to_true(), fail_handler=lambda: "failed waiting "
"for pspEnabled to be set")
def test_import_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(name=random_str())
remove_resource(cluster)
assert cluster.conditions is None
def test_rke_k8s_deprecated_versions(admin_mc, remove_resource):
client = admin_mc.client
deprecated_versions_setting = client.by_id_setting(
"k8s-versions-deprecated")
client.update_by_id_setting(id=deprecated_versions_setting.id,
value="{\"v1.8.10-rancher1-1\":true}")
with pytest.raises(ApiError) as e:
cluster = client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"kubernetesVersion": "v1.8.10-rancher1-1"})
remove_resource(cluster)
assert e.value.error.status == 500
assert e.value.error.message == 'Requested kubernetesVersion ' \
'v1.8.10-rancher1-1 is deprecated'
client.update_by_id_setting(id=deprecated_versions_setting.id,
value="")
def test_save_as_template_action_rbac(admin_mc, remove_resource, user_factory):
cluster = admin_mc.client.create_cluster(name=random_str(),
rancherKubernetesEngineConfig={
"services": {
"type":
"rkeConfigServices",
"kubeApi": {
"alwaysPullImages":
"false",
"podSecurityPolicy":
"false",
"serviceNodePort\
Range":
"30000-32767",
"type":
"kubeAPIService"
}
}
})
remove_resource(cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
try:
admin_mc.client.action(obj=cluster, action_name="saveAsTemplate",
clusterTemplateName="template1",
clusterTemplateRevisionName="v1")
except ApiError as e:
assert e.error.status == 503
user = user_factory()
user_cluster = user.client.create_cluster(name=random_str())
remove_resource(user_cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
try:
user.client.action(obj=user_cluster, action_name="saveAsTemplate")
except AttributeError as e:
assert e is not None
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pyFPlug - F-Plug library for Python
Copyright (C) 2014 SUNAGA Takahiro
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
"""
import serial
import time
import datetime
import sys
from struct import *
def hexdump(s):
print hexdump_str(s)
def hexdump_str(s):
result = []
for ch in s:
b = unpack('B', ch)[0]
result.append('{0:0>2X}'.format(b))
return ' '.join(result)
class UnknownState(Exception):
pass
def struct_num_values(fmt):
sz = calcsize(fmt)
return len(unpack(fmt, "\0" * sz))
COMM_WAIT_DEFAULT = 0.1
class FPlugDevice:
def __init__(self, port, timeout = 10, debug = False, ntry = 4, retry_wait = 2, comm_wait = COMM_WAIT_DEFAULT):
assert 0 < ntry < 10
self.port = port
last_error = None
for i in range(ntry):
try:
self.sfile = serial.Serial(self.port, 9600, timeout = timeout)
break
except serial.serialutil.SerialException, e:
last_error = e
time.sleep(retry_wait)
if last_error:
raise last_error
self.tid = 100
self.debug = debug
self.comm_wait_dur = comm_wait
def set_comm_wait(self, comm_wait):
self.comm_wait_dur = comm_wait
def close(self):
self.sfile.close()
self.sfile = None
self.port = None
def _sfile_read(self, *params):
time.sleep(self.comm_wait_dur)
return self.sfile.read(*params)
def _sfile_write(self, *params):
time.sleep(self.comm_wait_dur)
return self.sfile.write(*params)
def _sfile_set_timeout(self, timeout):
self.sfile.timeout = timeout
def clear_recv(self, timeout):
self.ensure_done(timeout)
def ensure_done(self, timeout = None):
if not timeout and not self.debug:
return
current_timeout = self.sfile.timeout
try:
self.sfile.timeout = 1
remain = self._sfile_read(1024)
if remain:
print "!! BUFFER REMAIN !!:", hexdump_str(remain)
finally:
self._sfile_set_timeout(current_timeout)
def read(self, nmax, nthru = 0):
if nthru:
thrustr = self._sfile_read(nthru)
if self.debug:
print "READ thru:", hexdump_str(thrustr)
if len(thrustr) < nthru:
if self.debug:
print "Cannot read thru data"
return None
rstr = self._sfile_read(nmax)
if self.debug:
print "READ:", hexdump_str(rstr)
return rstr
def read_byte(self, nthru = 0):
ch = self.read(1, nthru)
if ch:
return unpack('B', ch)[0]
else:
return None
def read_format(self, fmt, nthru = 0):
fmt = '<' + fmt
sz = calcsize(fmt)
read_data = self.read(sz, nthru = nthru)
if len(read_data) < sz:
return (None,) * struct_num_values(fmt)
return unpack(fmt, read_data)
def send_command(self, fmt, **params):
self.tid += 1
byte_template = fmt.split(' ')
fmt = "<"
data = []
for elem in byte_template:
if ':' in elem:
varname, fmtchar = elem.split(':')
fmt += fmtchar
data.append(params[varname])
elif len(elem) == 2:
fmt += 'B'
data.append(int(elem, 16))
elif len(elem) == 4:
fmt += 'H'
data.append(int(elem, 16))
else:
raise Exception('Unknown format')
sending_data = pack(fmt, *data)
if self.debug:
print "packing:", (fmt, data)
print "sending:", hexdump_str(sending_data)
ntry = 20
while True:
try:
# self.sfile.sendBreak(1.0)
self.sfile.write(sending_data)
break
except serial.serialutil.SerialException, e:
if ntry <= 0:
raise e
ntry -= 1
if self.debug:
print "Retry send:", ntry
time.sleep(0.5)
def plug_init(self):
""" (1.1 Plug Initialize Request) """
now = datetime.datetime.now()
self.send_command(
"10 81 tid:H 0E F0 00 00 22 00 61 02 97 02 hour:B minute:B 98 04 year:H month:B day:B",
tid = self.tid,
hour = now.hour,
minute = now.minute,
year = now.year,
month = now.month,
day = now.day
)
esv = self.read_byte(nthru = 10)
if esv == 0x71:
self.read(5)
# BUG of F-Plug?: too long message of 0x00 * 6
self.read(6)
self.ensure_done()
return True
elif esv == 0x51:
self.read(11)
self.ensure_done()
return False
else:
raise UnknownState("ESV={0}".format(esv))
def get_prop_value(self, prop_class_code, epc1, pdc1, value_format = 'h', remain_size = None):
self.send_command(
"10 81 tid:H 0E F0 00 00 prop_class_code:B 00 62 01 epc1:B pdc1:B",
tid = self.tid,
prop_class_code = prop_class_code,
epc1 = epc1,
pdc1 = pdc1
)
esv = self.read_byte(10)
if esv == 0x72:
_opc, _epc1, _pdc1, value = self.read_format('BBB' + value_format)
if remain_size:
self._sfile_read(remain_size)
self.ensure_done()
return value
elif esv == 0x52:
self.read(3)
if remain_size:
self._sfile_read(remain_size)
self.ensure_done()
return None
else:
raise UnknownState("ESV = {0}".format(esv))
def get_temperature(self):
"""
(2.3 Get temperature)
Returns: temp in degree(float) or None (failure)
"""
pval = self.get_prop_value(0x11, epc1 = 0xE0, pdc1 = 0x00)
return float(pval) / 10.0 if pval else None
def get_humidity(self):
"""
(2.6 Get humidity)
Returns: humidity % or None (failure)
"""
return self.get_prop_value(0x12, epc1 = 0xE0, pdc1 = 0x00)
def get_illuminance(self):
"""
(2.9 Get illuminance)
Returns: illuminance or None (failure)
"""
return self.get_prop_value(0x0D, epc1 = 0xE0, pdc1 = 0x00)
def get_power_realtime(self):
"""
(2.12 Get power real-time)
Returns: power or None (failure)
"""
pval = self.get_prop_value(0x22, epc1 = 0xE2, pdc1 = 0x02)
return float(pval) / 10.0 if pval else None
def get_data_dict(self):
return {
'temperature': self.get_temperature(),
'humidity': self.get_humidity(),
'illuminance': self.get_illuminance(),
'power': self.get_power_realtime(),
}
def get_prop_histry24(self, req_kind, dt, struct, vfunc):
self.send_command(
"10 82 tid:H req_kind:B hour:B minute:B year:H month:B day:B",
tid = self.tid,
req_kind = req_kind,
hour = dt.hour,
minute = dt.minute,
year = dt.year,
month = dt.month,
day = dt.day
)
is_fail = self.read_byte(nthru = 5)
result = []
for i in range(24):
v_tuple = self.read_format(struct)
if not is_fail:
result.append(vfunc(*v_tuple))
self.ensure_done()
if is_fail:
return None
else:
return result
def get_acc_power(self):
""" (2.1 get accumulated power value ) """
return self.get_prop_histry24(
0x11,
datetime.datetime.now(),
'HB',
lambda val, err: None if err else val
)
def get_power_data_history(self, time = None):
""" (2.16 get accumulated power value in past ) """
return self.get_prop_histry24(
0x16,
time,
'HB',
lambda val, err: None if err else val
)
def get_misc_data_history(self, time = None):
""" (2.16 get data history ) """
return self.get_prop_histry24(
0x17,
time,
'HBH',
lambda vt, vh, vi: (
None if vt == 0xEEEE else float(vt) / 10.0,
None if vh == 0xEE else vh,
None if vi == 0xEEEE else vi
)
)
def dump_all(self):
while True:
wlen = self.sfile.inWaiting()
if wlen == 0:
time.sleep(1)
continue
hexdump(self.sfile.read(wlen))
def set_led(self, state = 0):
self.send_command('05 state:B', state = state)
_rk, result = self.read_format('BB')
self.ensure_done()
return result
def led_on(self):
return self.set_led(1)
def led_off(self):
return self.set_led(0)
def set_datetime(self, dt = None):
if not dt:
dt = datetime.datetime.now()
self.send_command(
"07 hour:B minute:B year:H month:B day:B",
hour = dt.hour,
minute = dt.minute,
year = dt.year,
month = dt.month,
day = dt.day - 5
)
is_fail = self.read_byte(nthru = 1)
self.ensure_done()
if is_fail == 0:
return True
elif is_fail == 1:
return False
else:
raise UnknownState()
def clear(self):
self.comm_wait()
self.sfile.flush()
def test_fplug_dev():
dev = FPlugDevice('/dev/rfcomm0', debug = True)
# print "init:", dev.plug_init()
print "on:", dev.led_on()
time.sleep(0.5)
print "off:", dev.led_off()
print "set_datetime:", dev.set_datetime()
print "TMP:", dev.get_temperature(), "degree C"
print "HUM:", dev.get_humidity(), "%"
print "ILL:", dev.get_illuminance(), ""
print "PWR:", dev.get_power_realtime(), "W"
print "ACC:", dev.get_acc_power()
print "HIST PWR:", dev.get_power_data_history(datetime.datetime.now())
print "HIST MISC:", dev.get_misc_data_history(datetime.datetime.now())
if __name__ == '__main__':
test_fplug_dev()
| |
# -*- coding: utf-8 -*-
# File: monitor.py
import json
import numpy as np
import operator
import os
import re
import shutil
import time
from collections import defaultdict
from datetime import datetime
import six
import threading
from ..compat import tfv1 as tf
from ..libinfo import __git_version__
from ..tfutils.summary import create_image_summary, create_scalar_summary
from ..utils import fs, logger
from ..utils.develop import HIDE_DOC
from .base import Callback
__all__ = ['MonitorBase', 'Monitors',
'TFEventWriter', 'JSONWriter',
'ScalarPrinter', 'SendMonitorData',
'CometMLMonitor']
def image_to_nhwc(arr):
if arr.ndim == 4:
pass
elif arr.ndim == 3:
if arr.shape[-1] in [1, 3, 4]:
arr = arr[np.newaxis, :]
else:
arr = arr[:, :, :, np.newaxis]
elif arr.ndim == 2:
arr = arr[np.newaxis, :, :, np.newaxis]
else:
raise ValueError("Array of shape {} is not an image!".format(arr.shape))
return arr
class MonitorBase(Callback):
"""
Base class for monitors which monitor a training progress, by processing different types of
summary/statistics from trainer.
.. document private functions
.. automethod:: _setup_graph
"""
_chief_only = False
def setup_graph(self, trainer):
# Set attributes following Callback.setup_graph
self.trainer = trainer
self.graph = tf.get_default_graph()
self._setup_graph()
def _setup_graph(self):
""" Override this method to setup the monitor."""
pass
def process_summary(self, summary):
"""
Process a tf.Summary.
"""
pass
def process(self, name, val):
"""
Process a key-value pair.
"""
pass
def process_scalar(self, name, val):
"""
Args:
val: a scalar
"""
pass
def process_image(self, name, val):
"""
Args:
val (np.ndarray): 4D (NHWC) numpy array of images in range [0,255].
If channel is 3, assumed to be RGB.
"""
pass
def process_event(self, evt):
"""
Args:
evt (tf.Event): the most basic format acceptable by tensorboard.
It could include Summary, RunMetadata, LogMessage, and more.
"""
pass
# TODO process other types
class NoOpMonitor(MonitorBase):
def __init__(self, name=None):
self._name = name
def __str__(self):
if self._name is None:
return "NoOpMonitor"
return "NoOpMonitor({})".format(self._name)
class Monitors(Callback):
"""
Merge monitors together for trainer to use.
In training, each trainer will create a :class:`Monitors` instance,
and you can access it through ``trainer.monitors``.
You should use ``trainer.monitors`` for logging and it will dispatch your
logs to each sub-monitor.
"""
_chief_only = False
def __init__(self, monitors):
self._scalar_history = ScalarHistory()
self._monitors = monitors + [self._scalar_history]
for m in self._monitors:
assert isinstance(m, MonitorBase), m
def _setup_graph(self):
# scalar_history's other methods were not called.
# but they are not useful for now
self._scalar_history.setup_graph(self.trainer)
def _dispatch(self, func):
for m in self._monitors:
func(m)
def put_summary(self, summary):
"""
Put a `tf.Summary`.
"""
if isinstance(summary, six.binary_type):
summary = tf.Summary.FromString(summary)
assert isinstance(summary, tf.Summary), type(summary)
# TODO other types
for val in summary.value:
if val.WhichOneof('value') == 'simple_value':
val.tag = re.sub('tower[0-9]+/', '', val.tag) # TODO move to subclasses
# TODO This hack is still needed, seem to disappear only when
# compiled from source.
suffix = '-summary' # tensorflow#6150, tensorboard#59
if val.tag.endswith(suffix):
val.tag = val.tag[:-len(suffix)]
self._dispatch(lambda m: m.process_scalar(val.tag, val.simple_value))
self._dispatch(lambda m: m.process_summary(summary))
def put_scalar(self, name, val):
"""
Put a scalar.
"""
if isinstance(val, np.floating):
val = float(val)
if isinstance(val, np.integer):
val = int(val)
self._dispatch(lambda m: m.process_scalar(name, val))
s = create_scalar_summary(name, val)
self._dispatch(lambda m: m.process_summary(s))
def put_image(self, name, val):
"""
Put an image.
Args:
name (str):
val (np.ndarray): 2D, 3D (HWC) or 4D (NHWC) numpy array of images
in range [0,255]. If channel is 3, assumed to be RGB.
"""
assert isinstance(val, np.ndarray)
arr = image_to_nhwc(val)
self._dispatch(lambda m: m.process_image(name, arr))
s = create_image_summary(name, arr)
self._dispatch(lambda m: m.process_summary(s))
def put_event(self, evt):
"""
Put an :class:`tf.Event`.
`step` and `wall_time` fields of :class:`tf.Event` will be filled automatically.
Args:
evt (tf.Event):
"""
evt.step = self.global_step
evt.wall_time = time.time()
self._dispatch(lambda m: m.process_event(evt))
def get_latest(self, name):
"""
Get latest scalar value of some data.
If you run multiprocess training, keep in mind that
the data is perhaps only available on chief process.
Returns:
scalar
"""
return self._scalar_history.get_latest(name)[1]
def get_history(self, name):
"""
Get a history of the scalar value of some data.
If you run multiprocess training, keep in mind that
the data is perhaps only available on chief process.
Returns:
a list of (global_step, value) pairs: history data for this scalar
"""
return self._scalar_history.get_history(name)
class TFEventWriter(MonitorBase):
"""
Write summaries to TensorFlow event file.
"""
def __init__(self, logdir=None, max_queue=10, flush_secs=120, split_files=False):
"""
Args:
logdir: ``logger.get_logger_dir()`` by default.
max_queue, flush_secs: Same as in :class:`tf.summary.FileWriter`.
split_files: if True, split events to multiple files rather than
append to a single file. Useful on certain filesystems where append is expensive.
"""
if logdir is None:
logdir = logger.get_logger_dir()
assert tf.gfile.IsDirectory(logdir), logdir
self._logdir = fs.normpath(logdir)
self._max_queue = max_queue
self._flush_secs = flush_secs
self._split_files = split_files
def __new__(cls, logdir=None, max_queue=10, flush_secs=120, **kwargs):
if logdir is None:
logdir = logger.get_logger_dir()
if logdir is not None:
return super(TFEventWriter, cls).__new__(cls)
else:
logger.warn("logger directory was not set. Ignore TFEventWriter.")
return NoOpMonitor("TFEventWriter")
def _setup_graph(self):
self._writer = tf.summary.FileWriter(
self._logdir, max_queue=self._max_queue, flush_secs=self._flush_secs)
def _write_graph(self):
self._writer.add_graph(self.graph)
def _before_train(self):
# Writing the graph is expensive (takes ~2min) when the graph is large.
# Therefore use a separate thread. It will then run in the
# background while TF is warming up in the first several iterations.
self._write_graph_thread = threading.Thread(target=self._write_graph)
self._write_graph_thread.daemon = True
self._write_graph_thread.start()
@HIDE_DOC
def process_summary(self, summary):
self._writer.add_summary(summary, self.global_step)
@HIDE_DOC
def process_event(self, evt):
self._writer.add_event(evt)
def _trigger(self): # flush every epoch
self._writer.flush()
if self._split_files:
self._writer.close()
self._writer.reopen() # open new file
def _after_train(self):
self._writer.close()
class JSONWriter(MonitorBase):
"""
Write all scalar data to a json file under ``logger.get_logger_dir()``, grouped by their global step.
If found an earlier json history file, will append to it.
"""
FILENAME = 'stats.json'
"""
The name of the json file. Do not change it.
"""
def __new__(cls):
if logger.get_logger_dir():
return super(JSONWriter, cls).__new__(cls)
else:
logger.warn("logger directory was not set. Ignore JSONWriter.")
return NoOpMonitor("JSONWriter")
@staticmethod
def load_existing_json():
"""
Look for an existing json under :meth:`logger.get_logger_dir()` named "stats.json",
and return the loaded list of statistics if found. Returns None otherwise.
"""
dir = logger.get_logger_dir()
fname = os.path.join(dir, JSONWriter.FILENAME)
if tf.gfile.Exists(fname):
with open(fname) as f:
stats = json.load(f)
assert isinstance(stats, list), type(stats)
return stats
return None
@staticmethod
def load_existing_epoch_number():
"""
Try to load the latest epoch number from an existing json stats file (if any).
Returns None if not found.
"""
stats = JSONWriter.load_existing_json()
try:
return int(stats[-1]['epoch_num'])
except Exception:
return None
# initialize the stats here, because before_train from other callbacks may use it
def _setup_graph(self):
self._stats = []
self._stat_now = {}
self._last_gs = -1
def _before_train(self):
stats = JSONWriter.load_existing_json()
self._fname = os.path.join(logger.get_logger_dir(), JSONWriter.FILENAME)
if stats is not None:
try:
epoch = stats[-1]['epoch_num'] + 1
except Exception:
epoch = None
# check against the current training settings
# therefore this logic needs to be in before_train stage
starting_epoch = self.trainer.loop.starting_epoch
if epoch is None or epoch == starting_epoch:
logger.info("Found existing JSON inside {}, will append to it.".format(logger.get_logger_dir()))
self._stats = stats
else:
logger.warn(
"History epoch={} from JSON is not the predecessor of the current starting_epoch={}".format(
epoch - 1, starting_epoch))
logger.warn("If you want to resume old training, either use `AutoResumeTrainConfig` "
"or correctly set the new starting_epoch yourself to avoid inconsistency. ")
backup_fname = JSONWriter.FILENAME + '.' + datetime.now().strftime('%m%d-%H%M%S')
backup_fname = os.path.join(logger.get_logger_dir(), backup_fname)
logger.warn("Now, we will train with starting_epoch={} and backup old json to {}".format(
self.trainer.loop.starting_epoch, backup_fname))
shutil.move(self._fname, backup_fname)
# in case we have something to log here.
self._trigger()
def _trigger_step(self):
# will do this in trigger_epoch
if self.local_step != self.trainer.steps_per_epoch - 1:
self._trigger()
def _trigger_epoch(self):
self._trigger()
@HIDE_DOC
def process_scalar(self, name, val):
self._stat_now[name] = val
def _trigger(self):
"""
Add stats to json and dump to disk.
Note that this method is idempotent.
"""
if len(self._stat_now):
self._stat_now['epoch_num'] = self.epoch_num
self._stat_now['global_step'] = self.global_step
self._stats.append(self._stat_now)
self._stat_now = {}
self._write_stat()
def _write_stat(self):
tmp_filename = self._fname + '.tmp'
try:
with open(tmp_filename, 'w') as f:
json.dump(self._stats, f)
shutil.move(tmp_filename, self._fname)
except IOError: # disk error sometimes..
logger.exception("Exception in JSONWriter._write_stat()!")
class ScalarPrinter(MonitorBase):
"""
Print scalar data into terminal.
"""
def __init__(self, enable_step=False, enable_epoch=True,
whitelist=None, blacklist=None):
"""
Args:
enable_step, enable_epoch (bool): whether to print the
monitor data (if any) between steps or between epochs.
whitelist (list[str] or None): A list of regex. Only names
matching some regex will be allowed for printing.
Defaults to match all names.
blacklist (list[str] or None): A list of regex. Names matching
any regex will not be printed. Defaults to match no names.
"""
def compile_regex(rs):
if rs is None:
return None
rs = {re.compile(r) for r in rs}
return rs
self._whitelist = compile_regex(whitelist)
if blacklist is None:
blacklist = []
self._blacklist = compile_regex(blacklist)
self._enable_step = enable_step
self._enable_epoch = enable_epoch
self._dic = {}
# in case we have something to log here.
def _before_train(self):
self._trigger()
def _trigger_step(self):
if self._enable_step:
if self.local_step != self.trainer.steps_per_epoch - 1:
# not the last step
self._trigger()
else:
if not self._enable_epoch:
self._trigger()
# otherwise, will print them together
def _trigger_epoch(self):
if self._enable_epoch:
self._trigger()
@HIDE_DOC
def process_scalar(self, name, val):
self._dic[name] = float(val)
def _trigger(self):
# Print stats here
def match_regex_list(regexs, name):
for r in regexs:
if r.search(name) is not None:
return True
return False
for k, v in sorted(self._dic.items(), key=operator.itemgetter(0)):
if self._whitelist is None or \
match_regex_list(self._whitelist, k):
if not match_regex_list(self._blacklist, k):
logger.info('{}: {:.5g}'.format(k, v))
self._dic = {}
class ScalarHistory(MonitorBase):
"""
Only internally used by monitors.
"""
def __init__(self):
self._dic = defaultdict(list)
@HIDE_DOC
def process_scalar(self, name, val):
self._dic[name].append((self.global_step, float(val)))
def get_latest(self, name):
hist = self._dic[name]
if len(hist) == 0:
raise KeyError("No available data for the key: {}".format(name))
else:
return hist[-1]
def get_history(self, name):
return self._dic[name]
class SendMonitorData(MonitorBase):
"""
Execute a command with some specific scalar monitor data.
This is useful for, e.g. building a custom statistics monitor.
It will try to send once receiving all the stats
"""
def __init__(self, command, names):
"""
Args:
command(str): a command to execute. Use format string with stat
names as keys.
names(list or str): data name(s) to use.
Example:
Send the stats to your phone through pushbullet:
.. code-block:: python
SendMonitorData('curl -u your_id: https://api.pushbullet.com/v2/pushes \\
-d type=note -d title="validation error" \\
-d body={validation_error} > /dev/null 2>&1',
'validation_error')
"""
self.command = command
if not isinstance(names, list):
names = [names]
self.names = names
self.dic = {}
@HIDE_DOC
def process_scalar(self, name, val):
if name in self.names:
self.dic[name] = val
def _trigger_step(self):
self._trigger()
def _trigger(self):
try:
v = {k: self.dic[k] for k in self.names}
except KeyError:
return
cmd = self.command.format(**v)
ret = os.system(cmd)
if ret != 0:
logger.error("Command '{}' failed with ret={}!".format(cmd, ret))
self.dic = {}
class CometMLMonitor(MonitorBase):
"""
Send scalar data and the graph to https://www.comet.ml.
Note:
1. comet_ml requires you to `import comet_ml` before importing tensorflow or tensorpack.
2. The "automatic output logging" feature of comet_ml will make the training progress bar appear to freeze.
Therefore the feature is disabled by default.
"""
def __init__(self, experiment=None, tags=None, **kwargs):
"""
Args:
experiment (comet_ml.Experiment): if provided, invalidate all other arguments
tags (list[str]): experiment tags
kwargs: arguments used to initialize :class:`comet_ml.Experiment`,
such as project name, API key, etc.
Refer to its documentation for details.
"""
if experiment is not None:
self._exp = experiment
assert tags is None and len(kwargs) == 0
else:
from comet_ml import Experiment
kwargs.setdefault('log_code', True) # though it's not functioning, git patch logging requires it
kwargs.setdefault('auto_output_logging', None)
self._exp = Experiment(**kwargs)
if tags is not None:
self._exp.add_tags(tags)
self._exp.set_code("Code logging is impossible ...")
self._exp.log_dependency('tensorpack', __git_version__)
@property
def experiment(self):
"""
The :class:`comet_ml.Experiment` instance.
"""
return self._exp
def _before_train(self):
self._exp.set_model_graph(tf.get_default_graph())
@HIDE_DOC
def process_scalar(self, name, val):
self._exp.log_metric(name, val, step=self.global_step)
@HIDE_DOC
def process_image(self, name, val):
self._exp.set_step(self.global_step)
for idx, v in enumerate(val):
log_name = "{}_step{}{}".format(
name,
self.global_step,
"_" + str(idx) if len(val) > 1 else "")
self._exp.log_image(v, image_format="jpeg", name=log_name, image_minmax=(0, 255))
def _after_train(self):
self._exp.end()
def _after_epoch(self):
self._exp.log_epoch_end(self.epoch_num)
| |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package layer_model_helper
# Module caffe2.python.layer_model_helper
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, model_helper, schema, scope
from caffe2.python.modeling.parameter_sharing import (
parameter_sharing_context,
)
from caffe2.python.optimizer import get_param_device
from caffe2.python.layers import layers
from caffe2.proto import caffe2_pb2
from future.utils import viewitems
import logging
import numpy as np
import six
import copy
logger = logging.getLogger(__name__)
class LayerModelHelper(model_helper.ModelHelper):
"""
Model helper for building models on top of layers abstractions.
Each layer is the abstraction that is higher level than Operator. Layer
is responsible for ownership of it's own parameters and can easily be
instantiated in multiple nets possible with different sets of ops.
As an example: one can easily instantiate predict and train nets from
the same set of layers, where predict net will have subset of the
operators from train net.
"""
def __init__(self, name, input_feature_schema, trainer_extra_schema,
keep_blobs=False):
''' TODO(amalevich): more documnetation on input args
'''
super(LayerModelHelper, self).__init__(name=name)
self._layer_names = set()
self._layers = []
self._param_to_shape = {}
# optimizer bookkeeping
self.param_to_optim = {}
self._default_optimizer = None
self._loss = None
self._output_schema = None
# Connect Schema to self.net. That particular instance of schmea will be
# use for generation of the Layers accross the network and would be used
# for connection with Readers.
self._input_feature_schema = schema.NewRecord(
self.net,
input_feature_schema
) if not keep_blobs else input_feature_schema.clone()
self._trainer_extra_schema = schema.NewRecord(
self.net,
trainer_extra_schema
) if not keep_blobs else trainer_extra_schema.clone()
self._metrics_schema = schema.Struct()
self._init_global_constants()
self.param_init_net = self.create_init_net('param_init_net')
self._initialize_params = True
def clear_output_schema(self):
self._output_schema = None
def set_initialize_params(self, initialize_params):
self._initialize_params = initialize_params
def add_metric_field(self, name, value):
assert name not in self._metrics_schema.fields, (
"Try to add metric field twice: {}".format(name))
self._metrics_schema = self._metrics_schema + schema.Struct(
(name, value)
)
def add_global_constant(self, name, array=None, dtype=None,
initializer=None):
# This is global namescope for constants. They will be created in all
# init_nets and there should be very few of them.
assert name not in self.global_constants
self.global_constants[name] = self.net.NextBlob(name)
if array is not None:
assert initializer is None,\
"Only one from array and initializer should be specified"
if dtype is None:
array = np.array(array)
else:
array = np.array(array, dtype=dtype)
# TODO: make GivenTensor generic
op_name = None
if array.dtype == np.int32:
op_name = 'GivenTensorIntFill'
elif array.dtype == np.int64:
op_name = 'GivenTensorInt64Fill'
elif array.dtype == np.str:
op_name = 'GivenTensorStringFill'
elif array.dtype == np.bool:
op_name = 'GivenTensorBoolFill'
else:
op_name = 'GivenTensorFill'
def initializer(blob_name):
return core.CreateOperator(op_name,
[],
blob_name,
shape=array.shape,
values=array.flatten().tolist()
)
else:
assert initializer is not None
self.global_constant_initializers.append(
initializer(self.global_constants[name]))
return self.global_constants[name]
def _init_global_constants(self):
self.global_constants = {}
self.global_constant_initializers = []
self.add_global_constant('ONE', 1.0)
self.add_global_constant('ZERO', 0.0)
self.add_global_constant('ZERO_RANGE', [0, 0], dtype='int32')
def _add_global_constants(self, init_net):
for initializer_op in self.global_constant_initializers:
init_net._net.op.extend([initializer_op])
def create_init_net(self, name):
init_net = core.Net(name)
self._add_global_constants(init_net)
return init_net
def _validate_param_shape(self, param_name, shape):
if param_name not in self._param_to_shape:
return
ref_shape = self._param_to_shape[param_name]
if shape != ref_shape:
raise ValueError(
"Got inconsistent shapes between shared parameters "
"when trying to map a blob in scope {0} to {1}.".format(
scope.CurrentNameScope(), param_name)
)
def create_param(self, param_name, shape, initializer, optimizer=None,
ps_param=None):
if isinstance(param_name, core.BlobReference):
param_name = str(param_name)
elif isinstance(param_name, six.string_types):
# Parameter name will be equal to current Namescope that got
# resolved with the respect of parameter sharing of the scopes.
param_name = parameter_sharing_context.get_parameter_name(
param_name)
else:
raise "Unsupported type for param_name"
param_blob = core.BlobReference(param_name)
if len(initializer) == 1:
init_op_args = {}
else:
assert len(initializer) == 2
init_op_args = copy.deepcopy(initializer[1])
if shape is not None:
assert 'shape' not in init_op_args
init_op_args.update({'shape': shape})
initializer_op = None
if self._initialize_params:
initializer_op = core.CreateOperator(
initializer[0],
[],
param_blob,
**init_op_args
)
param = layers.LayerParameter(
parameter=param_blob,
initializer=initializer_op,
optimizer=optimizer,
ps_param=ps_param,
)
self._validate_param_shape(param_name, shape)
self._param_to_shape[param_name] = shape
return param
def next_layer_name(self, prefix):
base_name = core.ScopedName(prefix)
name = base_name
index = 0
while name in self._layer_names:
name = base_name + '_auto_' + str(index)
index += 1
self._layer_names.add(name)
return name
def add_layer(self, layer):
self._layers.append(layer)
for param in layer.get_parameters():
assert isinstance(param.parameter, core.BlobReference)
self.param_to_optim[str(param.parameter)] = \
param.optimizer or self.default_optimizer
self.params.append(param.parameter)
# The primary value of adding everything to self.net - generation of the
# operators right away, i.e. if error happens it'll be detected
# immediately. Other than this - create_x_net should be called.
layer.add_operators(self.net, self.param_init_net)
return layer.output_schema
def get_parameter_blobs(self):
param_blobs = []
for layer in self._layers:
for param in layer.get_parameters():
param_blobs.append(param.parameter)
return param_blobs
@property
def default_optimizer(self):
return self._default_optimizer
@default_optimizer.setter
def default_optimizer(self, optimizer):
self._default_optimizer = optimizer
@property
def input_feature_schema(self):
return self._input_feature_schema
@property
def trainer_extra_schema(self):
return self._trainer_extra_schema
@property
def metrics_schema(self):
"""
Returns the schema that represents model output that should be used for
metric reporting.
During the training/evaluation this schema will be appended to the
schema that represents model output.
"""
return self._metrics_schema
@property
def output_schema(self):
assert self._output_schema is not None
return self._output_schema
@output_schema.setter
def output_schema(self, schema):
assert self._output_schema is None
self._output_schema = schema
@property
def loss(self):
assert self._loss is not None
return self._loss
@loss.setter
def loss(self, loss):
assert self._loss is None
self._loss = loss
def add_loss(self, loss, name='unnamed'):
assert loss is not None, "Added loss should not be None"
assert isinstance(loss, schema.Scalar) or isinstance(
loss, schema.Struct
), "Added loss should be a scalar or a struct"
if self._loss is None:
self._loss = schema.Struct((name, loss))
else:
prefix_base = name + '_auto_'
index = 0
prefix = name
while prefix in self._loss:
prefix = prefix_base + str(index)
index += 1
loss_struct = schema.Struct((prefix, loss))
self._loss = self._loss + loss_struct
def __getattr__(self, layer):
if layer.startswith('__'):
raise AttributeError(layer)
# TODO(amalevich): Add add support for ifbpy inline documentation
if layers.layer_exists(layer):
def wrapper(*args, **kwargs):
return self.add_layer(
layers.create_layer(layer, self, *args, **kwargs))
return wrapper
elif core.IsOperator(layer):
def wrapper(*args, **kwargs):
def apply_operator(net, in_record, out_record, **kwargs):
# TODO(amalevich): Switch to net.operator as soon as it gets
# landed
net.__getattr__(layer)(in_record.field_blobs(),
out_record.field_blobs(),
**kwargs)
if 'name' not in kwargs:
kwargs['name'] = layer
return self.add_layer(
layers.create_layer('Functional',
self, *args, function=apply_operator,
**kwargs))
return wrapper
else:
raise ValueError(
"Trying to create non-registered layer: {}".format(layer))
@property
def layers(self):
return self._layers
def apply_optimizers(
self,
train_net,
train_init_net,
grad_map,
blob_to_device=None,
):
CPU = core.DeviceOption(caffe2_pb2.CPU)
# if given, blob_to_device is a map from blob to device_option
blob_to_device = blob_to_device or {}
for param, optimizer in viewitems(self.param_to_optim):
assert optimizer is not None, \
"default optimizer must have been set in add_layer"
# note that not all params has gradient and thus we sent None if
# gradient does not exists
device = get_param_device(
param,
grad_map.get(str(param)),
param_to_device=blob_to_device,
default_device=CPU,
)
with core.DeviceScope(device):
optimizer(
train_net, train_init_net, param, grad_map.get(str(param)))
def _GetOne(self):
return self.global_constants['ONE']
# An optimizer which allows us to do NO optimization
def NoOptim(self, *args, **kwargs):
pass
| |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import tempfile
from telemetry.core import exceptions
from telemetry.core.platform import tracing_category_filter
from telemetry.core.platform import tracing_options
from telemetry.core import util
from telemetry.core import video
from telemetry import decorators
from telemetry.image_processing import image_util
from telemetry.image_processing import rgba_color
from telemetry.timeline import model
from telemetry.unittest_util import tab_test_case
def _IsDocumentVisible(tab):
return not tab.EvaluateJavaScript('document.hidden || document.webkitHidden')
class FakePlatformBackend(object):
def __init__(self):
self.platform = FakePlatform()
def DidStartBrowser(self, _, _2):
pass
def WillCloseBrowser(self, _, _2):
pass
class FakePlatform(object):
def __init__(self):
self._is_video_capture_running = False
#pylint: disable=W0613
def StartVideoCapture(self, min_bitrate_mbps):
self._is_video_capture_running = True
def StopVideoCapture(self):
self._is_video_capture_running = False
return video.Video(tempfile.NamedTemporaryFile())
@property
def is_video_capture_running(self):
return self._is_video_capture_running
class TabTest(tab_test_case.TabTestCase):
def testNavigateAndWaitForCompleteState(self):
self._tab.Navigate(self.UrlOfUnittestFile('blank.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
def testNavigateAndWaitForInteractiveState(self):
self._tab.Navigate(self.UrlOfUnittestFile('blank.html'))
self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
def testTabBrowserIsRightBrowser(self):
self.assertEquals(self._tab.browser, self._browser)
def testRendererCrash(self):
self.assertRaises(exceptions.DevtoolsTargetCrashException,
lambda: self._tab.Navigate('chrome://crash',
timeout=5))
@decorators.Enabled('has tabs')
def testActivateTab(self):
util.WaitFor(lambda: _IsDocumentVisible(self._tab), timeout=5)
new_tab = self._browser.tabs.New()
new_tab.Navigate('about:blank')
util.WaitFor(lambda: _IsDocumentVisible(new_tab), timeout=5)
self.assertFalse(_IsDocumentVisible(self._tab))
self._tab.Activate()
util.WaitFor(lambda: _IsDocumentVisible(self._tab), timeout=5)
self.assertFalse(_IsDocumentVisible(new_tab))
def testTabUrl(self):
self.assertEquals(self._tab.url, 'about:blank')
url = self.UrlOfUnittestFile('blank.html')
self._tab.Navigate(url)
self.assertEquals(self._tab.url, url)
#pylint: disable=W0212
def testIsVideoCaptureRunning(self):
original_platform_backend = self._tab.browser._platform_backend
try:
self._tab.browser._platform_backend = FakePlatformBackend()
self.assertFalse(self._tab.is_video_capture_running)
self._tab.StartVideoCapture(min_bitrate_mbps=2)
self.assertTrue(self._tab.is_video_capture_running)
self.assertIsNotNone(self._tab.StopVideoCapture())
self.assertFalse(self._tab.is_video_capture_running)
finally:
self._tab.browser._platform_backend = original_platform_backend
# Test failing on android: http://crbug.com/437057
# and mac: http://crbug.com/468675
@decorators.Disabled('android', 'chromeos', 'mac')
def testHighlight(self):
self.assertEquals(self._tab.url, 'about:blank')
options = tracing_options.TracingOptions()
options.enable_chrome_trace = True
self._browser.platform.tracing_controller.Start(
options, tracing_category_filter.CreateNoOverheadFilter())
self._tab.Highlight(rgba_color.WEB_PAGE_TEST_ORANGE)
self._tab.ClearHighlight(rgba_color.WEB_PAGE_TEST_ORANGE)
trace_data = self._browser.platform.tracing_controller.Stop()
timeline_model = model.TimelineModel(trace_data)
renderer_thread = timeline_model.GetRendererThreadFromTabId(
self._tab.id)
found_video_start_event = False
for event in renderer_thread.async_slices:
if event.name == '__ClearHighlight.video_capture_start':
found_video_start_event = True
break
self.assertTrue(found_video_start_event)
@decorators.Enabled('has tabs')
def testGetRendererThreadFromTabId(self):
self.assertEquals(self._tab.url, 'about:blank')
# Create 3 tabs. The third tab is closed before we call
# tracing_controller.Start.
first_tab = self._tab
second_tab = self._browser.tabs.New()
second_tab.Navigate('about:blank')
second_tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
third_tab = self._browser.tabs.New()
third_tab.Navigate('about:blank')
third_tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
third_tab.Close()
options = tracing_options.TracingOptions()
options.enable_chrome_trace = True
self._browser.platform.tracing_controller.Start(
options, tracing_category_filter.CreateNoOverheadFilter())
first_tab.ExecuteJavaScript('console.time("first-tab-marker");')
first_tab.ExecuteJavaScript('console.timeEnd("first-tab-marker");')
second_tab.ExecuteJavaScript('console.time("second-tab-marker");')
second_tab.ExecuteJavaScript('console.timeEnd("second-tab-marker");')
trace_data = self._browser.platform.tracing_controller.Stop()
timeline_model = model.TimelineModel(trace_data)
# Assert that the renderer_thread of the first tab contains
# 'first-tab-marker'.
renderer_thread = timeline_model.GetRendererThreadFromTabId(
first_tab.id)
first_tab_markers = [
renderer_thread.IterAllSlicesOfName('first-tab-marker')]
self.assertEquals(1, len(first_tab_markers))
# Close second tab and assert that the renderer_thread of the second tab
# contains 'second-tab-marker'.
second_tab.Close()
renderer_thread = timeline_model.GetRendererThreadFromTabId(
second_tab.id)
second_tab_markers = [
renderer_thread.IterAllSlicesOfName('second-tab-marker')]
self.assertEquals(1, len(second_tab_markers))
# Third tab wasn't available when we start tracing, so there is no
# renderer_thread corresponding to it in the the trace.
self.assertIs(None, timeline_model.GetRendererThreadFromTabId(third_tab.id))
@decorators.Disabled('android') # https://crbug.com/463933
def testTabIsAlive(self):
self.assertEquals(self._tab.url, 'about:blank')
self.assertTrue(self._tab.IsAlive())
self._tab.Navigate(self.UrlOfUnittestFile('blank.html'))
self.assertTrue(self._tab.IsAlive())
self.assertRaises(exceptions.DevtoolsTargetCrashException,
lambda: self._tab.Navigate(self.UrlOfUnittestFile('chrome://crash')))
self.assertFalse(self._tab.IsAlive())
class GpuTabTest(tab_test_case.TabTestCase):
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
# Test flaky on mac: http://crbug.com/358664
@decorators.Disabled('android', 'mac')
def testScreenshot(self):
if not self._tab.screenshot_supported:
logging.warning('Browser does not support screenshots, skipping test.')
return
self.Navigate('green_rect.html')
pixel_ratio = self._tab.EvaluateJavaScript('window.devicePixelRatio || 1')
screenshot = self._tab.Screenshot(5)
assert screenshot is not None
image_util.GetPixelColor(
screenshot, 0 * pixel_ratio, 0 * pixel_ratio).AssertIsRGB(
0, 255, 0, tolerance=2)
image_util.GetPixelColor(
screenshot, 31 * pixel_ratio, 31 * pixel_ratio).AssertIsRGB(
0, 255, 0, tolerance=2)
image_util.GetPixelColor(
screenshot, 32 * pixel_ratio, 32 * pixel_ratio).AssertIsRGB(
255, 255, 255, tolerance=2)
| |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2014 Intel Corporation
#
import sys
import os
import subprocess
import argparse
import platform
from glob import glob
from os.path import exists, basename
from os.path import join as path_join
# The PCI base class for all devices
network_class = {'Class': '02', 'Vendor': None, 'Device': None,
'SVendor': None, 'SDevice': None}
acceleration_class = {'Class': '12', 'Vendor': None, 'Device': None,
'SVendor': None, 'SDevice': None}
ifpga_class = {'Class': '12', 'Vendor': '8086', 'Device': '0b30',
'SVendor': None, 'SDevice': None}
encryption_class = {'Class': '10', 'Vendor': None, 'Device': None,
'SVendor': None, 'SDevice': None}
intel_processor_class = {'Class': '0b', 'Vendor': '8086', 'Device': None,
'SVendor': None, 'SDevice': None}
cavium_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a04b,a04d',
'SVendor': None, 'SDevice': None}
cavium_fpa = {'Class': '08', 'Vendor': '177d', 'Device': 'a053',
'SVendor': None, 'SDevice': None}
cavium_pkx = {'Class': '08', 'Vendor': '177d', 'Device': 'a0dd,a049',
'SVendor': None, 'SDevice': None}
cavium_tim = {'Class': '08', 'Vendor': '177d', 'Device': 'a051',
'SVendor': None, 'SDevice': None}
cavium_zip = {'Class': '12', 'Vendor': '177d', 'Device': 'a037',
'SVendor': None, 'SDevice': None}
avp_vnic = {'Class': '05', 'Vendor': '1af4', 'Device': '1110',
'SVendor': None, 'SDevice': None}
cnxk_bphy = {'Class': '08', 'Vendor': '177d', 'Device': 'a089',
'SVendor': None, 'SDevice': None}
cnxk_bphy_cgx = {'Class': '08', 'Vendor': '177d', 'Device': 'a059,a060',
'SVendor': None, 'SDevice': None}
cnxk_dma = {'Class': '08', 'Vendor': '177d', 'Device': 'a081',
'SVendor': None, 'SDevice': None}
cnxk_inl_dev = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f0,a0f1',
'SVendor': None, 'SDevice': None}
hisilicon_dma = {'Class': '08', 'Vendor': '19e5', 'Device': 'a122',
'SVendor': None, 'SDevice': None}
intel_dlb = {'Class': '0b', 'Vendor': '8086', 'Device': '270b,2710,2714',
'SVendor': None, 'SDevice': None}
intel_ioat_bdw = {'Class': '08', 'Vendor': '8086',
'Device': '6f20,6f21,6f22,6f23,6f24,6f25,6f26,6f27,6f2e,6f2f',
'SVendor': None, 'SDevice': None}
intel_ioat_skx = {'Class': '08', 'Vendor': '8086', 'Device': '2021',
'SVendor': None, 'SDevice': None}
intel_ioat_icx = {'Class': '08', 'Vendor': '8086', 'Device': '0b00',
'SVendor': None, 'SDevice': None}
intel_idxd_spr = {'Class': '08', 'Vendor': '8086', 'Device': '0b25',
'SVendor': None, 'SDevice': None}
intel_ntb_skx = {'Class': '06', 'Vendor': '8086', 'Device': '201c',
'SVendor': None, 'SDevice': None}
intel_ntb_icx = {'Class': '06', 'Vendor': '8086', 'Device': '347e',
'SVendor': None, 'SDevice': None}
cnxk_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f9,a0fa',
'SVendor': None, 'SDevice': None}
cnxk_npa = {'Class': '08', 'Vendor': '177d', 'Device': 'a0fb,a0fc',
'SVendor': None, 'SDevice': None}
cn9k_ree = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f4',
'SVendor': None, 'SDevice': None}
network_devices = [network_class, cavium_pkx, avp_vnic, ifpga_class]
baseband_devices = [acceleration_class]
crypto_devices = [encryption_class, intel_processor_class]
dma_devices = [cnxk_dma, hisilicon_dma,
intel_idxd_spr, intel_ioat_bdw, intel_ioat_icx, intel_ioat_skx]
eventdev_devices = [cavium_sso, cavium_tim, intel_dlb, cnxk_sso]
mempool_devices = [cavium_fpa, cnxk_npa]
compress_devices = [cavium_zip]
regex_devices = [cn9k_ree]
misc_devices = [cnxk_bphy, cnxk_bphy_cgx, cnxk_inl_dev,
intel_ntb_skx, intel_ntb_icx]
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
devices = {}
# list of supported DPDK drivers
dpdk_drivers = ["igb_uio", "vfio-pci", "uio_pci_generic"]
# list of currently loaded kernel modules
loaded_modules = None
# command-line arg flags
b_flag = None
status_flag = False
force_flag = False
args = []
# check if a specific kernel module is loaded
def module_is_loaded(module):
global loaded_modules
if module == 'vfio_pci':
module = 'vfio-pci'
if loaded_modules:
return module in loaded_modules
# Get list of sysfs modules (both built-in and dynamically loaded)
sysfs_path = '/sys/module/'
# Get the list of directories in sysfs_path
sysfs_mods = [m for m in os.listdir(sysfs_path)
if os.path.isdir(os.path.join(sysfs_path, m))]
# special case for vfio_pci (module is named vfio-pci,
# but its .ko is named vfio_pci)
sysfs_mods = [a if a != 'vfio_pci' else 'vfio-pci' for a in sysfs_mods]
loaded_modules = sysfs_mods
# add built-in modules as loaded
release = platform.uname().release
filename = os.path.join("/lib/modules/", release, "modules.builtin")
if os.path.exists(filename):
try:
with open(filename) as f:
loaded_modules += [os.path.splitext(os.path.basename(mod))[0] for mod in f]
except IOError:
print("Warning: cannot read list of built-in kernel modules")
return module in loaded_modules
def check_modules():
'''Checks that igb_uio is loaded'''
global dpdk_drivers
# list of supported modules
mods = [{"Name": driver, "Found": False} for driver in dpdk_drivers]
# first check if module is loaded
for mod in mods:
if module_is_loaded(mod["Name"]):
mod["Found"] = True
# check if we have at least one loaded module
if True not in [mod["Found"] for mod in mods] and b_flag is not None:
print("Warning: no supported DPDK kernel modules are loaded", file=sys.stderr)
# change DPDK driver list to only contain drivers that are loaded
dpdk_drivers = [mod["Name"] for mod in mods if mod["Found"]]
def has_driver(dev_id):
'''return true if a device is assigned to a driver. False otherwise'''
return "Driver_str" in devices[dev_id]
def get_pci_device_details(dev_id, probe_lspci):
'''This function gets additional details for a PCI device'''
device = {}
if probe_lspci:
extra_info = subprocess.check_output(["lspci", "-vmmks", dev_id]).splitlines()
# parse lspci details
for line in extra_info:
if not line:
continue
name, value = line.decode("utf8").split("\t", 1)
name = name.strip(":") + "_str"
device[name] = value
# check for a unix interface name
device["Interface"] = ""
for base, dirs, _ in os.walk("/sys/bus/pci/devices/%s/" % dev_id):
if "net" in dirs:
device["Interface"] = \
",".join(os.listdir(os.path.join(base, "net")))
break
# check if a port is used for ssh connection
device["Ssh_if"] = False
device["Active"] = ""
return device
def clear_data():
'''This function clears any old data'''
global devices
devices = {}
def get_device_details(devices_type):
'''This function populates the "devices" dictionary. The keys used are
the pci addresses (domain:bus:slot.func). The values are themselves
dictionaries - one for each NIC.'''
global devices
global dpdk_drivers
# first loop through and read details for all devices
# request machine readable format, with numeric IDs and String
dev = {}
dev_lines = subprocess.check_output(["lspci", "-Dvmmnnk"]).splitlines()
for dev_line in dev_lines:
if not dev_line:
if device_type_match(dev, devices_type):
# Replace "Driver" with "Driver_str" to have consistency of
# of dictionary key names
if "Driver" in dev.keys():
dev["Driver_str"] = dev.pop("Driver")
if "Module" in dev.keys():
dev["Module_str"] = dev.pop("Module")
# use dict to make copy of dev
devices[dev["Slot"]] = dict(dev)
# Clear previous device's data
dev = {}
else:
name, value = dev_line.decode("utf8").split("\t", 1)
value_list = value.rsplit(' ', 1)
if value_list:
# String stored in <name>_str
dev[name.rstrip(":") + '_str'] = value_list[0]
# Numeric IDs
dev[name.rstrip(":")] = value_list[len(value_list) - 1] \
.rstrip("]").lstrip("[")
if devices_type == network_devices:
# check what is the interface if any for an ssh connection if
# any to this host, so we can mark it later.
ssh_if = []
route = subprocess.check_output(["ip", "-o", "route"])
# filter out all lines for 169.254 routes
route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
route.decode().splitlines()))
rt_info = route.split()
for i in range(len(rt_info) - 1):
if rt_info[i] == "dev":
ssh_if.append(rt_info[i + 1])
# based on the basic info, get extended text details
for d in devices.keys():
if not device_type_match(devices[d], devices_type):
continue
# get additional info and add it to existing data
devices[d] = devices[d].copy()
# No need to probe lspci
devices[d].update(get_pci_device_details(d, False).items())
if devices_type == network_devices:
for _if in ssh_if:
if _if in devices[d]["Interface"].split(","):
devices[d]["Ssh_if"] = True
devices[d]["Active"] = "*Active*"
break
# add igb_uio to list of supporting modules if needed
if "Module_str" in devices[d]:
for driver in dpdk_drivers:
if driver not in devices[d]["Module_str"]:
devices[d]["Module_str"] = \
devices[d]["Module_str"] + ",%s" % driver
else:
devices[d]["Module_str"] = ",".join(dpdk_drivers)
# make sure the driver and module strings do not have any duplicates
if has_driver(d):
modules = devices[d]["Module_str"].split(",")
if devices[d]["Driver_str"] in modules:
modules.remove(devices[d]["Driver_str"])
devices[d]["Module_str"] = ",".join(modules)
def device_type_match(dev, devices_type):
for i in range(len(devices_type)):
param_count = len(
[x for x in devices_type[i].values() if x is not None])
match_count = 0
if dev["Class"][0:2] == devices_type[i]["Class"]:
match_count = match_count + 1
for key in devices_type[i].keys():
if key != 'Class' and devices_type[i][key]:
value_list = devices_type[i][key].split(',')
for value in value_list:
if value.strip(' ') == dev[key]:
match_count = match_count + 1
# count must be the number of non None parameters to match
if match_count == param_count:
return True
return False
def dev_id_from_dev_name(dev_name):
'''Take a device "name" - a string passed in by user to identify a NIC
device, and determine the device id - i.e. the domain:bus:slot.func - for
it, which can then be used to index into the devices array'''
# check if it's already a suitable index
if dev_name in devices:
return dev_name
# check if it's an index just missing the domain part
if "0000:" + dev_name in devices:
return "0000:" + dev_name
# check if it's an interface name, e.g. eth1
for d in devices.keys():
if dev_name in devices[d]["Interface"].split(","):
return devices[d]["Slot"]
# if nothing else matches - error
raise ValueError("Unknown device: %s. "
"Please specify device in \"bus:slot.func\" format" % dev_name)
def unbind_one(dev_id, force):
'''Unbind the device identified by "dev_id" from its current driver'''
dev = devices[dev_id]
if not has_driver(dev_id):
print("Notice: %s %s %s is not currently managed by any driver" %
(dev["Slot"], dev["Device_str"], dev["Interface"]), file=sys.stderr)
return
# prevent us disconnecting ourselves
if dev["Ssh_if"] and not force:
print("Warning: routing table indicates that interface %s is active. "
"Skipping unbind" % dev_id, file=sys.stderr)
return
# write to /sys to unbind
filename = "/sys/bus/pci/drivers/%s/unbind" % dev["Driver_str"]
try:
f = open(filename, "a")
except OSError as err:
sys.exit("Error: unbind failed for %s - Cannot open %s: %s" %
(dev_id, filename, err))
f.write(dev_id)
f.close()
def bind_one(dev_id, driver, force):
'''Bind the device given by "dev_id" to the driver "driver". If the device
is already bound to a different driver, it will be unbound first'''
dev = devices[dev_id]
saved_driver = None # used to rollback any unbind in case of failure
# prevent disconnection of our ssh session
if dev["Ssh_if"] and not force:
print("Warning: routing table indicates that interface %s is active. "
"Not modifying" % dev_id, file=sys.stderr)
return
# unbind any existing drivers we don't want
if has_driver(dev_id):
if dev["Driver_str"] == driver:
print("Notice: %s already bound to driver %s, skipping" %
(dev_id, driver), file=sys.stderr)
return
saved_driver = dev["Driver_str"]
unbind_one(dev_id, force)
dev["Driver_str"] = "" # clear driver string
# For kernels >= 3.15 driver_override can be used to specify the driver
# for a device rather than relying on the driver to provide a positive
# match of the device. The existing process of looking up
# the vendor and device ID, adding them to the driver new_id,
# will erroneously bind other devices too which has the additional burden
# of unbinding those devices
if driver in dpdk_drivers:
filename = "/sys/bus/pci/devices/%s/driver_override" % dev_id
if exists(filename):
try:
f = open(filename, "w")
except OSError as err:
print("Error: bind failed for %s - Cannot open %s: %s"
% (dev_id, filename, err), file=sys.stderr)
return
try:
f.write("%s" % driver)
f.close()
except OSError as err:
print("Error: bind failed for %s - Cannot write driver %s to "
"PCI ID: %s" % (dev_id, driver, err), file=sys.stderr)
return
# For kernels < 3.15 use new_id to add PCI id's to the driver
else:
filename = "/sys/bus/pci/drivers/%s/new_id" % driver
try:
f = open(filename, "w")
except OSError as err:
print("Error: bind failed for %s - Cannot open %s: %s"
% (dev_id, filename, err), file=sys.stderr)
return
try:
# Convert Device and Vendor Id to int to write to new_id
f.write("%04x %04x" % (int(dev["Vendor"], 16),
int(dev["Device"], 16)))
f.close()
except OSError as err:
print("Error: bind failed for %s - Cannot write new PCI ID to "
"driver %s: %s" % (dev_id, driver, err), file=sys.stderr)
return
# do the bind by writing to /sys
filename = "/sys/bus/pci/drivers/%s/bind" % driver
try:
f = open(filename, "a")
except OSError as err:
print("Error: bind failed for %s - Cannot open %s: %s"
% (dev_id, filename, err), file=sys.stderr)
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
try:
f.write(dev_id)
f.close()
except OSError as err:
# for some reason, closing dev_id after adding a new PCI ID to new_id
# results in IOError. however, if the device was successfully bound,
# we don't care for any errors and can safely ignore IOError
tmp = get_pci_device_details(dev_id, True)
if "Driver_str" in tmp and tmp["Driver_str"] == driver:
return
print("Error: bind failed for %s - Cannot bind to driver %s: %s"
% (dev_id, driver, err), file=sys.stderr)
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
# For kernels > 3.15 driver_override is used to bind a device to a driver.
# Before unbinding it, overwrite driver_override with empty string so that
# the device can be bound to any other driver
filename = "/sys/bus/pci/devices/%s/driver_override" % dev_id
if exists(filename):
try:
f = open(filename, "w")
except OSError as err:
sys.exit("Error: unbind failed for %s - Cannot open %s: %s"
% (dev_id, filename, err))
try:
f.write("\00")
f.close()
except OSError as err:
sys.exit("Error: unbind failed for %s - Cannot write %s: %s"
% (dev_id, filename, err))
def unbind_all(dev_list, force=False):
"""Unbind method, takes a list of device locations"""
if dev_list[0] == "dpdk":
for d in devices.keys():
if "Driver_str" in devices[d]:
if devices[d]["Driver_str"] in dpdk_drivers:
unbind_one(devices[d]["Slot"], force)
return
try:
dev_list = map(dev_id_from_dev_name, dev_list)
except ValueError as ex:
print(ex)
sys.exit(1)
for d in dev_list:
unbind_one(d, force)
def bind_all(dev_list, driver, force=False):
"""Bind method, takes a list of device locations"""
global devices
# a common user error is to forget to specify the driver the devices need to
# be bound to. check if the driver is a valid device, and if it is, show
# a meaningful error.
try:
dev_id_from_dev_name(driver)
# if we've made it this far, this means that the "driver" was a valid
# device string, so it's probably not a valid driver name.
sys.exit("Error: Driver '%s' does not look like a valid driver. "
"Did you forget to specify the driver to bind devices to?" % driver)
except ValueError:
# driver generated error - it's not a valid device ID, so all is well
pass
# check if we're attempting to bind to a driver that isn't loaded
if not module_is_loaded(driver.replace('-', '_')):
sys.exit("Error: Driver '%s' is not loaded." % driver)
try:
dev_list = map(dev_id_from_dev_name, dev_list)
except ValueError as ex:
sys.exit(ex)
for d in dev_list:
bind_one(d, driver, force)
# For kernels < 3.15 when binding devices to a generic driver
# (i.e. one that doesn't have a PCI ID table) using new_id, some devices
# that are not bound to any other driver could be bound even if no one has
# asked them to. hence, we check the list of drivers again, and see if
# some of the previously-unbound devices were erroneously bound.
if not exists("/sys/bus/pci/devices/%s/driver_override" % d):
for d in devices.keys():
# skip devices that were already bound or that we know should be bound
if "Driver_str" in devices[d] or d in dev_list:
continue
# update information about this device
devices[d] = dict(devices[d].items()
+ get_pci_device_details(d, True).items())
# check if updated information indicates that the device was bound
if "Driver_str" in devices[d]:
unbind_one(d, force)
def display_devices(title, dev_list, extra_params=None):
'''Displays to the user the details of a list of devices given in
"dev_list". The "extra_params" parameter, if given, should contain a string
with %()s fields in it for replacement by the named fields in each
device's dictionary.'''
strings = [] # this holds the strings to print. We sort before printing
print("\n%s" % title)
print("=" * len(title))
if not dev_list:
strings.append("<none>")
else:
for dev in dev_list:
if extra_params is not None:
strings.append("%s '%s %s' %s" % (dev["Slot"],
dev["Device_str"],
dev["Device"],
extra_params % dev))
else:
strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
# sort before printing, so that the entries appear in PCI order
strings.sort()
print("\n".join(strings)) # print one per line
def show_device_status(devices_type, device_name, if_field=False):
global dpdk_drivers
kernel_drv = []
dpdk_drv = []
no_drv = []
# split our list of network devices into the three categories above
for d in devices.keys():
if device_type_match(devices[d], devices_type):
if not has_driver(d):
no_drv.append(devices[d])
continue
if devices[d]["Driver_str"] in dpdk_drivers:
dpdk_drv.append(devices[d])
else:
kernel_drv.append(devices[d])
n_devs = len(dpdk_drv) + len(kernel_drv) + len(no_drv)
# don't bother displaying anything if there are no devices
if n_devs == 0:
msg = "No '%s' devices detected" % device_name
print("")
print(msg)
print("".join('=' * len(msg)))
return
# print each category separately, so we can clearly see what's used by DPDK
if dpdk_drv:
display_devices("%s devices using DPDK-compatible driver" % device_name,
dpdk_drv, "drv=%(Driver_str)s unused=%(Module_str)s")
if kernel_drv:
if_text = ""
if if_field:
if_text = "if=%(Interface)s "
display_devices("%s devices using kernel driver" % device_name, kernel_drv,
if_text + "drv=%(Driver_str)s "
"unused=%(Module_str)s %(Active)s")
if no_drv:
display_devices("Other %s devices" % device_name, no_drv,
"unused=%(Module_str)s")
def show_status():
'''Function called when the script is passed the "--status" option.
Displays to the user what devices are bound to the igb_uio driver, the
kernel driver or to no driver'''
if status_dev in ["net", "all"]:
show_device_status(network_devices, "Network", if_field=True)
if status_dev in ["baseband", "all"]:
show_device_status(baseband_devices, "Baseband")
if status_dev in ["crypto", "all"]:
show_device_status(crypto_devices, "Crypto")
if status_dev in ["dma", "all"]:
show_device_status(dma_devices, "DMA")
if status_dev in ["event", "all"]:
show_device_status(eventdev_devices, "Eventdev")
if status_dev in ["mempool", "all"]:
show_device_status(mempool_devices, "Mempool")
if status_dev in ["compress", "all"]:
show_device_status(compress_devices, "Compress")
if status_dev in ["misc", "all"]:
show_device_status(misc_devices, "Misc (rawdev)")
if status_dev in ["regex", "all"]:
show_device_status(regex_devices, "Regex")
def pci_glob(arg):
'''Returns a list containing either:
* List of PCI B:D:F matching arg, using shell wildcards e.g. 80:04.*
* Only the passed arg if matching list is empty'''
sysfs_path = "/sys/bus/pci/devices"
for _glob in [arg, '0000:' + arg]:
paths = [basename(path) for path in glob(path_join(sysfs_path, _glob))]
if paths:
return paths
return [arg]
def parse_args():
'''Parses the command-line arguments given by the user and takes the
appropriate action for each'''
global b_flag
global status_flag
global status_dev
global force_flag
global args
parser = argparse.ArgumentParser(
description='Utility to bind and unbind devices from Linux kernel',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
---------
To display current device status:
%(prog)s --status
To display current network device status:
%(prog)s --status-dev net
To bind eth1 from the current driver and move to use vfio-pci
%(prog)s --bind=vfio-pci eth1
To unbind 0000:01:00.0 from using any driver
%(prog)s -u 0000:01:00.0
To bind 0000:02:00.0 and 0000:02:00.1 to the ixgbe kernel driver
%(prog)s -b ixgbe 02:00.0 02:00.1
""")
parser.add_argument(
'-s',
'--status',
action='store_true',
help="Print the current status of all known devices.")
parser.add_argument(
'--status-dev',
help="Print the status of given device group.",
choices=['baseband', 'compress', 'crypto', 'dma', 'event',
'mempool', 'misc', 'net', 'regex'])
bind_group = parser.add_mutually_exclusive_group()
bind_group.add_argument(
'-b',
'--bind',
metavar='DRIVER',
help="Select the driver to use or \"none\" to unbind the device")
bind_group.add_argument(
'-u',
'--unbind',
action='store_true',
help="Unbind a device (equivalent to \"-b none\")")
parser.add_argument(
'--force',
action='store_true',
help="""
Override restriction on binding devices in use by Linux"
WARNING: This can lead to loss of network connection and should be used with caution.
""")
parser.add_argument(
'devices',
metavar='DEVICE',
nargs='*',
help="""
Device specified as PCI "domain:bus:slot.func" syntax or "bus:slot.func" syntax.
For devices bound to Linux kernel drivers, they may be referred to by interface name.
""")
opt = parser.parse_args()
if opt.status_dev:
status_flag = True
status_dev = opt.status_dev
if opt.status:
status_flag = True
status_dev = "all"
if opt.force:
force_flag = True
if opt.bind:
b_flag = opt.bind
elif opt.unbind:
b_flag = "none"
args = opt.devices
if not b_flag and not status_flag:
print("Error: No action specified for devices. "
"Please give a --bind, --ubind or --status option",
file=sys.stderr)
parser.print_usage()
sys.exit(1)
if b_flag and not args:
print("Error: No devices specified.", file=sys.stderr)
parser.print_usage()
sys.exit(1)
# resolve any PCI globs in the args
new_args = []
for arg in args:
new_args.extend(pci_glob(arg))
args = new_args
def do_arg_actions():
'''do the actual action requested by the user'''
global b_flag
global status_flag
global force_flag
global args
if b_flag in ["none", "None"]:
unbind_all(args, force_flag)
elif b_flag is not None:
bind_all(args, b_flag, force_flag)
if status_flag:
if b_flag is not None:
clear_data()
# refresh if we have changed anything
get_device_details(network_devices)
get_device_details(baseband_devices)
get_device_details(crypto_devices)
get_device_details(dma_devices)
get_device_details(eventdev_devices)
get_device_details(mempool_devices)
get_device_details(compress_devices)
get_device_details(regex_devices)
get_device_details(misc_devices)
show_status()
def main():
'''program main function'''
# check if lspci is installed, suppress any output
with open(os.devnull, 'w') as devnull:
ret = subprocess.call(['which', 'lspci'],
stdout=devnull, stderr=devnull)
if ret != 0:
sys.exit("'lspci' not found - please install 'pciutils'")
parse_args()
check_modules()
clear_data()
get_device_details(network_devices)
get_device_details(baseband_devices)
get_device_details(crypto_devices)
get_device_details(dma_devices)
get_device_details(eventdev_devices)
get_device_details(mempool_devices)
get_device_details(compress_devices)
get_device_details(regex_devices)
get_device_details(misc_devices)
do_arg_actions()
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
f = open(pathname, 'r')
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
finally:
f.close()
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'Godeps' in dirs:
dirs.remove('Godeps')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
for name in files:
if name.endswith(".svg"):
continue
if name.endswith(".gliffy"):
continue
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', 'third_party', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# These are usually yaml definitions
if result.endswith(":"):
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
new_excluded_flags.sort()
print("%s" % "\n".join(new_excluded_flags))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
new_flags.sort()
print("%s" % "\n".join(new_flags))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
printf("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false positives you should running `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
if __name__ == "__main__":
sys.exit(main())
| |
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ['WebSocketServerProtocol',
'WebSocketServerFactory',
'WebSocketClientProtocol',
'WebSocketClientFactory',
'WrappingWebSocketServerFactory',
'WrappingWebSocketClientFactory',
'listenWS',
'connectWS',
'WampWebSocketServerProtocol',
'WampWebSocketServerFactory',
'WampWebSocketClientProtocol',
'WampWebSocketClientFactory']
from base64 import b64encode, b64decode
from zope.interface import implementer
import twisted.internet.protocol
from twisted.internet.defer import maybeDeferred
from twisted.python import log
from twisted.internet.interfaces import ITransport
from autobahn.wamp import websocket
from autobahn.websocket import protocol
from autobahn.websocket import http
from autobahn.websocket.compress import PerMessageDeflateOffer, \
PerMessageDeflateOfferAccept, \
PerMessageDeflateResponse, \
PerMessageDeflateResponseAccept
class WebSocketAdapterProtocol(twisted.internet.protocol.Protocol):
"""
Adapter class for Twisted WebSocket client and server protocols.
"""
def connectionMade(self):
## the peer we are connected to
try:
peer = self.transport.getPeer()
except:
## ProcessProtocols lack getPeer()
self.peer = "?"
else:
try:
self.peer = "%s:%d" % (peer.host, peer.port)
except:
## eg Unix Domain sockets don't have host/port
self.peer = str(peer)
self._connectionMade()
## Set "Nagle"
try:
self.transport.setTcpNoDelay(self.tcpNoDelay)
except:
## eg Unix Domain sockets throw Errno 22 on this
pass
def connectionLost(self, reason):
self._connectionLost(reason)
def dataReceived(self, data):
self._dataReceived(data)
def _closeConnection(self, abort = False):
if abort and hasattr(self.transport, 'abortConnection'):
## ProcessProtocol lacks abortConnection()
self.transport.abortConnection()
else:
self.transport.loseConnection()
def _onOpen(self):
self.onOpen()
def _onMessageBegin(self, isBinary):
self.onMessageBegin(isBinary)
def _onMessageFrameBegin(self, length):
self.onMessageFrameBegin(length)
def _onMessageFrameData(self, payload):
self.onMessageFrameData(payload)
def _onMessageFrameEnd(self):
self.onMessageFrameEnd()
def _onMessageFrame(self, payload):
self.onMessageFrame(payload)
def _onMessageEnd(self):
self.onMessageEnd()
def _onMessage(self, payload, isBinary):
self.onMessage(payload, isBinary)
def _onPing(self, payload):
self.onPing(payload)
def _onPong(self, payload):
self.onPong(payload)
def _onClose(self, wasClean, code, reason):
self.onClose(wasClean, code, reason)
def registerProducer(self, producer, streaming):
"""
Register a Twisted producer with this protocol.
Modes: Hybi, Hixie
:param producer: A Twisted push or pull producer.
:type producer: object
:param streaming: Producer type.
:type streaming: bool
"""
self.transport.registerProducer(producer, streaming)
class WebSocketServerProtocol(WebSocketAdapterProtocol, protocol.WebSocketServerProtocol):
"""
Base class for Twisted WebSocket server protocols.
"""
def _onConnect(self, request):
## onConnect() will return the selected subprotocol or None
## or a pair (protocol, headers) or raise an HttpException
##
res = maybeDeferred(self.onConnect, request)
res.addCallback(self.succeedHandshake)
def forwardError(failure):
if failure.check(http.HttpException):
return self.failHandshake(failure.value.reason, failure.value.code)
else:
if self.debug:
self.factory._log("Unexpected exception in onConnect ['%s']" % failure.value)
return self.failHandshake(http.INTERNAL_SERVER_ERROR[1], http.INTERNAL_SERVER_ERROR[0])
res.addErrback(forwardError)
class WebSocketClientProtocol(WebSocketAdapterProtocol, protocol.WebSocketClientProtocol):
"""
Base class for Twisted WebSocket client protocols.
"""
def _onConnect(self, response):
self.onConnect(response)
class WebSocketAdapterFactory:
"""
Adapter class for Twisted WebSocket client and server factories.
"""
def _log(self, msg):
log.msg(msg)
def _callLater(self, delay, fun):
return self.reactor.callLater(delay, fun)
class WebSocketServerFactory(WebSocketAdapterFactory, protocol.WebSocketServerFactory, twisted.internet.protocol.ServerFactory):
"""
Base class for Twisted WebSocket server factories.
.. seealso:: `twisted.internet.protocol.ServerFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.protocol.ServerFactory.html>`_
"""
def __init__(self, *args, **kwargs):
"""
In addition to all arguments to the constructor of
:class:`autobahn.websocket.protocol.WebSocketServerFactory`,
you can supply a `reactor` keyword argument to specify the
Twisted reactor to be used.
"""
## lazy import to avoid reactor install upon module import
if 'reactor' in kwargs:
if kwargs['reactor']:
self.reactor = kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
del kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
protocol.WebSocketServerFactory.__init__(self, *args, **kwargs)
class WebSocketClientFactory(WebSocketAdapterFactory, protocol.WebSocketClientFactory, twisted.internet.protocol.ClientFactory):
"""
Base class for Twisted WebSocket client factories.
.. seealso:: `twisted.internet.protocol.ClientFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.protocol.ClientFactory.html>`_
"""
def __init__(self, *args, **kwargs):
"""
In addition to all arguments to the constructor of
:class:`autobahn.websocket.protocol.WebSocketClientFactory`,
you can supply a `reactor` keyword argument to specify the
Twisted reactor to be used.
"""
## lazy import to avoid reactor install upon module import
if 'reactor' in kwargs:
if kwargs['reactor']:
self.reactor = kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
del kwargs['reactor']
else:
from twisted.internet import reactor
self.reactor = reactor
protocol.WebSocketClientFactory.__init__(self, *args, **kwargs)
@implementer(ITransport)
class WrappingWebSocketAdapter:
"""
An adapter for stream-based transport over WebSocket.
This follows "websockify" (https://github.com/kanaka/websockify)
and should be compatible with that.
It uses WebSocket subprotocol negotiation and 2+ subprotocols:
- binary (or a compatible subprotocol)
- base64
Octets are either transmitted as the payload of WebSocket binary
messages when using the 'binary' subprotocol (or an alternative
binary compatible subprotocol), or encoded with Base64
and then transmitted as the payload of WebSocket text messages when
using the 'base64' subprotocol.
"""
def onConnect(self, requestOrResponse):
## Negotiate either the 'binary' or the 'base64' WebSocket subprotocol
##
if isinstance(requestOrResponse, protocol.ConnectionRequest):
request = requestOrResponse
for p in request.protocols:
if p in self.factory._subprotocols:
self._binaryMode = (p != 'base64')
return p
raise http.HttpException(http.NOT_ACCEPTABLE[0], "this server only speaks %s WebSocket subprotocols" % self.factory._subprotocols)
elif isinstance(requestOrResponse, protocol.ConnectionResponse):
response = requestOrResponse
if response.protocol not in self.factory._subprotocols:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, "this client only speaks %s WebSocket subprotocols" % self.factory._subprotocols)
self._binaryMode = (response.protocol != 'base64')
else:
## should not arrive here
raise Exception("logic error")
def onOpen(self):
self._proto.connectionMade()
def onMessage(self, payload, isBinary):
if isBinary != self._binaryMode:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_UNSUPPORTED_DATA, "message payload type does not match the negotiated subprotocol")
else:
if not isBinary:
try:
payload = b64decode(payload)
except Exception as e:
self.failConnection(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_INVALID_PAYLOAD, "message payload base64 decoding error: {}".format(e))
#print("forwarding payload: {}".format(binascii.hexlify(payload)))
self._proto.dataReceived(payload)
def onClose(self, wasClean, code, reason):
self._proto.connectionLost(None)
def write(self, data):
#print("sending payload: {}".format(binascii.hexlify(data)))
## part of ITransport
assert(type(data) == bytes)
if self._binaryMode:
self.sendMessage(data, isBinary = True)
else:
data = b64encode(data)
self.sendMessage(data, isBinary = False)
def writeSequence(self, data):
## part of ITransport
for d in data:
self.write(data)
def loseConnection(self):
## part of ITransport
self.sendClose()
class WrappingWebSocketServerProtocol(WrappingWebSocketAdapter, WebSocketServerProtocol):
"""
Server protocol for stream-based transport over WebSocket.
"""
class WrappingWebSocketClientProtocol(WrappingWebSocketAdapter, WebSocketClientProtocol):
"""
Client protocol for stream-based transport over WebSocket.
"""
class WrappingWebSocketServerFactory(WebSocketServerFactory):
"""
Wrapping server factory for stream-based transport over WebSocket.
"""
def __init__(self,
factory,
url,
reactor = None,
enableCompression = True,
autoFragmentSize = 0,
subprotocol = None,
debug = False):
"""
Constructor.
:param factory: Stream-based factory to be wrapped.
:type factory: A subclass of `twisted.internet.protocol.Factory`
:param url: WebSocket URL of the server this server factory will work for.
:type url: str
"""
self._factory = factory
self._subprotocols = ['binary', 'base64']
if subprotocol:
self._subprotocols.append(subprotocol)
WebSocketServerFactory.__init__(self,
url = url,
reactor = reactor,
protocols = self._subprotocols,
debug = debug)
## automatically fragment outgoing traffic into WebSocket frames
## of this size
self.setProtocolOptions(autoFragmentSize = autoFragmentSize)
## play nice and perform WS closing handshake
self.setProtocolOptions(failByDrop = False)
if enableCompression:
## Enable WebSocket extension "permessage-deflate".
##
## Function to accept offers from the client ..
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
self.setProtocolOptions(perMessageCompressionAccept = accept)
def buildProtocol(self, addr):
proto = WrappingWebSocketServerProtocol()
proto.factory = self
proto._proto = self._factory.buildProtocol(addr)
proto._proto.transport = proto
return proto
def startFactory(self):
self._factory.startFactory()
WebSocketServerFactory.startFactory(self)
def stopFactory(self):
self._factory.stopFactory()
WebSocketServerFactory.stopFactory(self)
class WrappingWebSocketClientFactory(WebSocketClientFactory):
"""
Wrapping client factory for stream-based transport over WebSocket.
"""
def __init__(self,
factory,
url,
reactor = None,
enableCompression = True,
autoFragmentSize = 0,
subprotocol = None,
debug = False):
"""
Constructor.
:param factory: Stream-based factory to be wrapped.
:type factory: A subclass of `twisted.internet.protocol.Factory`
:param url: WebSocket URL of the server this client factory will connect to.
:type url: str
"""
self._factory = factory
self._subprotocols = ['binary', 'base64']
if subprotocol:
self._subprotocols.append(subprotocol)
WebSocketClientFactory.__init__(self,
url = url,
reactor = reactor,
protocols = self._subprotocols,
debug = debug)
## automatically fragment outgoing traffic into WebSocket frames
## of this size
self.setProtocolOptions(autoFragmentSize = autoFragmentSize)
## play nice and perform WS closing handshake
self.setProtocolOptions(failByDrop = False)
if enableCompression:
## Enable WebSocket extension "permessage-deflate".
##
## The extensions offered to the server ..
offers = [PerMessageDeflateOffer()]
self.setProtocolOptions(perMessageCompressionOffers = offers)
## Function to accept responses from the server ..
def accept(response):
if isinstance(response, PerMessageDeflateResponse):
return PerMessageDeflateResponseAccept(response)
self.setProtocolOptions(perMessageCompressionAccept = accept)
def buildProtocol(self, addr):
proto = WrappingWebSocketClientProtocol()
proto.factory = self
proto._proto = self._factory.buildProtocol(addr)
proto._proto.transport = proto
return proto
def connectWS(factory, contextFactory = None, timeout = 30, bindAddress = None):
"""
Establish WebSocket connection to a server. The connection parameters like target
host, port, resource and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating client protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketClientFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A `twisted.internet.ssl.ClientContextFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.ssl.ClientContextFactory.html>`_ instance.
:param timeout: Number of seconds to wait before assuming the connection has failed.
:type timeout: int
:param bindAddress: A (host, port) tuple of local address to bind to, or None.
:type bindAddress: tuple
:returns: obj -- An object which implements `twisted.interface.IConnector <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IConnector.html>`_.
"""
## lazy import to avoid reactor install upon module import
if hasattr(factory, 'reactor'):
reactor = factory.reactor
else:
from twisted.internet import reactor
if factory.proxy is not None:
if factory.isSecure:
raise Exception("WSS over explicit proxies not implemented")
else:
conn = reactor.connectTCP(factory.proxy['host'], factory.proxy['port'], factory, timeout, bindAddress)
else:
if factory.isSecure:
if contextFactory is None:
# create default client SSL context factory when none given
from twisted.internet import ssl
contextFactory = ssl.ClientContextFactory()
conn = reactor.connectSSL(factory.host, factory.port, factory, contextFactory, timeout, bindAddress)
else:
conn = reactor.connectTCP(factory.host, factory.port, factory, timeout, bindAddress)
return conn
def listenWS(factory, contextFactory = None, backlog = 50, interface = ''):
"""
Listen for incoming WebSocket connections from clients. The connection parameters like
listening port and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating server protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketServerFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A twisted.internet.ssl.ContextFactory.
:param backlog: Size of the listen queue.
:type backlog: int
:param interface: The interface (derived from hostname given) to bind to, defaults to '' (all).
:type interface: str
:returns: obj -- An object that implements `twisted.interface.IListeningPort <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IListeningPort.html>`_.
"""
## lazy import to avoid reactor install upon module import
if hasattr(factory, 'reactor'):
reactor = factory.reactor
else:
from twisted.internet import reactor
if factory.isSecure:
if contextFactory is None:
raise Exception("Secure WebSocket listen requested, but no SSL context factory given")
listener = reactor.listenSSL(factory.port, factory, contextFactory, backlog, interface)
else:
listener = reactor.listenTCP(factory.port, factory, backlog, interface)
return listener
class WampWebSocketServerProtocol(websocket.WampWebSocketServerProtocol, WebSocketServerProtocol):
pass
class WampWebSocketServerFactory(websocket.WampWebSocketServerFactory, WebSocketServerFactory):
protocol = WampWebSocketServerProtocol
def __init__(self, factory, *args, **kwargs):
if 'serializers' in kwargs:
serializers = kwargs['serializers']
del kwargs['serializers']
else:
serializers = None
websocket.WampWebSocketServerFactory.__init__(self, factory, serializers)
kwargs['protocols'] = self._protocols
WebSocketServerFactory.__init__(self, *args, **kwargs)
class WampWebSocketClientProtocol(websocket.WampWebSocketClientProtocol, WebSocketClientProtocol):
pass
class WampWebSocketClientFactory(websocket.WampWebSocketClientFactory, WebSocketClientFactory):
protocol = WampWebSocketClientProtocol
def __init__(self, factory, *args, **kwargs):
if 'serializers' in kwargs:
serializers = kwargs['serializers']
del kwargs['serializers']
else:
serializers = None
websocket.WampWebSocketClientFactory.__init__(self, factory, serializers)
kwargs['protocols'] = self._protocols
WebSocketClientFactory.__init__(self, *args, **kwargs)
| |
#!/usr/bin/env python
"""Implementations of various collections."""
import cStringIO
import struct
import logging
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.aff4_objects import aff4_grr
class RDFValueCollection(aff4.AFF4Object):
"""This is a collection of RDFValues."""
# If this is set to an RDFValue class implementation, all the contained
# objects must be instances of this class.
_rdf_type = None
_behaviours = set()
size = 0
# The file object for the underlying AFF4Image stream.
fd = None
class SchemaCls(aff4.AFF4Object.SchemaCls):
SIZE = aff4.AFF4Stream.SchemaCls.SIZE
DESCRIPTION = aff4.Attribute("aff4:description", rdfvalue.RDFString,
"This collection's description", "description")
VIEW = aff4.Attribute("aff4:rdfview", aff4_grr.RDFValueCollectionView,
"The list of attributes which will show up in "
"the table.", default="")
def Initialize(self):
"""Initialize the internal storage stream."""
self.stream_dirty = False
try:
self.fd = aff4.FACTORY.Open(self.urn.Add("Stream"),
aff4_type="AFF4Image", mode=self.mode,
token=self.token)
self.size = int(self.Get(self.Schema.SIZE))
return
except IOError:
pass
# If we get here, the stream does not already exist - we create a new
# stream.
self.fd = aff4.FACTORY.Create(self.urn.Add("Stream"), "AFF4Image",
mode=self.mode, token=self.token)
self.fd.seek(0, 2)
self.size = 0
def SetChunksize(self, chunk_size):
if self.fd.size != 0:
raise ValueError("Cannot set chunk size on an existing collection.")
self.fd.SetChunksize(chunk_size)
def Flush(self, sync=False):
if self.stream_dirty:
self.Set(self.Schema.SIZE(self.size))
self.fd.Flush(sync=sync)
super(RDFValueCollection, self).Flush(sync=sync)
def Close(self, sync=False):
self.Flush(sync=sync)
def Add(self, rdf_value=None, **kwargs):
"""Add the rdf value to the collection."""
if rdf_value is None:
if self._rdf_type:
rdf_value = self._rdf_type(**kwargs) # pylint: disable=not-callable
else:
raise ValueError("RDFValueCollection doesn't accept None values.")
if self._rdf_type and not isinstance(rdf_value, self._rdf_type):
raise ValueError("This collection only accepts values of type %s" %
self._rdf_type.__name__)
if not rdf_value.age:
rdf_value.age.Now()
data = rdfvalue.EmbeddedRDFValue(payload=rdf_value).SerializeToString()
self.fd.Seek(0, 2)
self.fd.Write(struct.pack("<i", len(data)))
self.fd.Write(data)
self.stream_dirty = True
self.size += 1
def AddAll(self, rdf_values, callback=None):
"""Adds a list of rdfvalues to the collection."""
for rdf_value in rdf_values:
if rdf_value is None:
raise ValueError("Can't add None to the collection via AddAll.")
if self._rdf_type and not isinstance(rdf_value, self._rdf_type):
raise ValueError("This collection only accepts values of type %s" %
self._rdf_type.__name__)
if not rdf_value.age:
rdf_value.age.Now()
buf = cStringIO.StringIO()
for index, rdf_value in enumerate(rdf_values):
data = rdfvalue.EmbeddedRDFValue(payload=rdf_value).SerializeToString()
buf.write(struct.pack("<i", len(data)))
buf.write(data)
self.size += 1
if callback:
callback(index, rdf_value)
self.fd.Seek(0, 2)
self.fd.Write(buf.getvalue())
self.stream_dirty = True
def __len__(self):
return self.size
def __nonzero__(self):
return self.size != 0
def __iter__(self):
"""Iterate over all contained RDFValues.
Returns:
Generator of RDFValues stored in the collection.
Raises:
RuntimeError: if we are in write mode.
"""
return self.GenerateItems()
@property
def current_offset(self):
return self.fd.Tell()
def GenerateItems(self, offset=0):
"""Iterate over all contained RDFValues.
Args:
offset: The offset in the stream to start reading from.
Yields:
RDFValues stored in the collection.
Raises:
RuntimeError: if we are in write mode.
"""
if not self.fd:
return
if self.mode == "w":
raise RuntimeError("Can not read when in write mode.")
self.fd.seek(offset)
count = 0
while True:
offset = self.fd.Tell()
try:
length = struct.unpack("<i", self.fd.Read(4))[0]
serialized_event = self.fd.Read(length)
except struct.error:
break
result = rdfvalue.EmbeddedRDFValue(serialized_event)
payload = result.payload
if payload is not None:
# Mark the RDFValue with important information relating to the
# collection it is from.
payload.id = count
payload.collection_offset = offset
yield payload
else:
logging.warning("payload=None was encountered in a collection %s "
"(index %d), this may mean a logical bug or corrupt "
"data. Ignoring...", self.urn, count)
count += 1
def GetItem(self, offset=0):
for item in self.GenerateItems(offset=offset):
return item
def __getitem__(self, index):
if index >= 0:
for i, item in enumerate(self):
if i == index:
return item
else:
raise RuntimeError("Index must be >= 0")
class AFF4Collection(aff4.AFF4Volume, RDFValueCollection):
"""A collection of AFF4 objects.
The AFF4 objects themselves are opened on demand from the data store. The
collection simply stores the RDFURNs of all aff4 objects in the collection.
"""
_rdf_type = rdfvalue.AFF4ObjectSummary
_behaviours = frozenset(["Collection"])
class SchemaCls(aff4.AFF4Volume.SchemaCls, RDFValueCollection.SchemaCls):
VIEW = aff4.Attribute("aff4:view", rdfvalue.AFF4CollectionView,
"The list of attributes which will show up in "
"the table.", default="")
def CreateView(self, attributes):
"""Given a list of attributes, update our view.
Args:
attributes: is a list of attribute names.
"""
self.Set(self.Schema.VIEW(attributes))
def Query(self, filter_string="", subjects=None, limit=100):
"""Filter the objects contained within this collection."""
if subjects is None:
subjects = set()
for obj in self:
if len(subjects) < limit:
subjects.add(obj.urn)
else:
break
else:
subjects = set(subjects[:limit])
if filter_string:
# Parse the query string
ast = aff4.AFF4QueryParser(filter_string).Parse()
# Query our own data store
filter_obj = ast.Compile(aff4.AFF4Filter)
# We expect RDFURN objects to be stored in this collection.
for subject in aff4.FACTORY.MultiOpen(subjects, token=self.token):
if filter_string and not filter_obj.FilterOne(subject):
continue
yield subject
def ListChildren(self, **_):
for aff4object_summary in self:
yield aff4object_summary.urn
class GRRSignedBlobCollection(RDFValueCollection):
_rdf_type = rdfvalue.SignedBlob
class GRRSignedBlob(aff4.AFF4MemoryStream):
"""A container for storing a signed binary blob such as a driver."""
def Initialize(self):
self.collection = aff4.FACTORY.Create(
self.urn.Add("collection"), "GRRSignedBlobCollection", mode=self.mode,
token=self.token)
self.fd = cStringIO.StringIO()
if "r" in self.mode:
for x in self.collection:
self.fd.write(x.data)
self.size = self.fd.tell()
self.fd.seek(0)
# How many chunks we have?
self.chunks = len(self.collection)
def Add(self, item):
self.collection.Add(item)
def __iter__(self):
return iter(self.collection)
def Close(self):
super(GRRSignedBlob, self).Close()
self.collection.Close()
class GRRMemoryDriver(GRRSignedBlob):
"""A driver for acquiring memory."""
class SchemaCls(GRRSignedBlob.SchemaCls):
INSTALLATION = aff4.Attribute(
"aff4:driver/installation", rdfvalue.DriverInstallTemplate,
"The driver installation control protobuf.", "installation",
default=rdfvalue.DriverInstallTemplate(
driver_name="pmem", device_path=r"\\.\pmem"))
class GrepResultsCollection(RDFValueCollection):
"""A collection of grep results."""
_rdf_type = rdfvalue.BufferReference
class ClientAnomalyCollection(RDFValueCollection):
"""A collection of anomalies related to a client.
This class is a normal collection, but with additional methods for making
viewing and working with anomalies easier.
"""
_rdf_type = rdfvalue.Anomaly
# DEPRECATED: this class is deprecated and is left here only temporary for
# compatibility reasons. Add method raises a RuntimeError to discourage
# further use of this class. Please use PackedVersionedCollection instead:
# it has same functionality and better performance characterstics.
class VersionedCollection(RDFValueCollection):
"""DEPRECATED: A collection which uses the data store's version properties.
This collection is very efficient for writing to - we can insert new values by
blind writing them into the data store without needing to take a lock - using
the timestamping features of the data store.
"""
class SchemaCls(RDFValueCollection.SchemaCls):
DATA = aff4.Attribute("aff4:data", rdfvalue.EmbeddedRDFValue,
"The embedded semantic value.", versioned=True)
def Add(self, rdf_value=None, **kwargs):
"""Add the rdf value to the collection."""
raise RuntimeError("VersionedCollection is deprecated, can't add new "
"elements.")
def AddAll(self, rdf_values, callback=None):
"""Add multiple rdf values to the collection."""
raise RuntimeError("VersionedCollection is deprecated, can't add new "
"elements.")
def GenerateItems(self, offset=None, timestamp=None):
if offset is not None and timestamp is not None:
raise ValueError("Either offset or timestamp can be specified.")
if timestamp is None:
timestamp = data_store.DB.ALL_TIMESTAMPS
index = 0
for _, value, ts in data_store.DB.ResolveMulti(
self.urn, [self.Schema.DATA.predicate], token=self.token,
timestamp=timestamp):
if index >= offset:
yield self.Schema.DATA(value, age=ts).payload
index += 1
class PackedVersionedCollection(RDFValueCollection):
"""A collection which uses the data store's version properties.
This collection is very efficient for writing to - we can insert new values by
blind writing them into the data store - using the timestamping features of
the data store.
Unfortunately reading from versioned data store attributes is slow. Therefore
this object implements a compaction strategy, where writes are versioned,
until they can be compacted into a regular RDFValueCollection by the
VersionedCollectionCompactor cron job.
"""
class SchemaCls(RDFValueCollection.SchemaCls):
DATA = aff4.Attribute("aff4:data", rdfvalue.EmbeddedRDFValue,
"The embedded semantic value.", versioned=True)
COMPACTION_BATCH_SIZE = 10000
MAX_REVERSED_RESULTS = 10000
def Add(self, rdf_value=None, **kwargs):
"""Add the rdf value to the collection."""
if rdf_value is None and self._rdf_type:
rdf_value = self._rdf_type(**kwargs) # pylint: disable=not-callable
if not rdf_value.age:
rdf_value.age.Now()
self.Set(self.Schema.DATA(payload=rdf_value))
# Let the compactor know we need compacting.
data_store.DB.Set("aff4:/cron/versioned_collection_compactor",
"index:changed/%s" % self.urn, self.urn,
replace=True, token=self.token, sync=False)
def AddAll(self, rdf_values, callback=None):
"""Adds a list of rdfvalues to the collection."""
for rdf_value in rdf_values:
if rdf_value is None:
raise ValueError("Can't add None to the collection via AddAll.")
if self._rdf_type and not isinstance(rdf_value, self._rdf_type):
raise ValueError("This collection only accepts values of type %s" %
self._rdf_type.__name__)
if not rdf_value.age:
rdf_value.age.Now()
for index, rdf_value in enumerate(rdf_values):
self.Set(self.Schema.DATA(payload=rdf_value))
if callback:
callback(index, rdf_value)
# Let the compactor know we need compacting.
data_store.DB.Set("aff4:/cron/versioned_collection_compactor",
"index:changed/%s" % self.urn, self.urn,
replace=True, token=self.token, sync=False)
def GenerateItems(self, offset=0):
"""First iterate over the versions, and then iterate over the stream."""
index = 0
for x in super(PackedVersionedCollection, self).GenerateItems():
if index >= offset:
yield x
index += 1
if self.IsAttributeSet(self.Schema.DATA):
results = []
for _, value, _ in data_store.DB.ResolveRegex(
self.urn, self.Schema.DATA.predicate, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS):
if index >= offset:
if results is not None:
results.append(self.Schema.DATA(value).payload)
if len(results) > self.MAX_REVERSED_RESULTS:
for result in results:
yield result
results = None
else:
yield self.Schema.DATA(value).payload
index += 1
if results is not None:
for result in reversed(results):
yield result
@utils.Synchronized
def Compact(self, callback=None):
"""Compacts versioned attributes into the collection stream.
Versioned attributes come from the datastore sorted by the timestamp
in the decreasing order. This is the opposite of what we want in
the collection (as items in the collection should be in chronological
order).
Compact's implementation can handle very large collections that can't
be reversed in memory. It reads them in batches, reverses every batch
individually, and then reads batches back in the reversed order and
write their contents to the collection stream.
Args:
callback: An optional function without arguments that gets called
periodically while processing is done. Useful in flows
that have to heartbeat.
Raises:
RuntimeError: if problems are encountered when reading back temporary
saved data.
Returns:
Number of compacted results.
"""
compacted_count = 0
batches_urns = []
current_batch = []
# This timestamp will be used to delete attributes. We don't want
# to delete anything that was added after we started the compaction.
freeze_timestamp = rdfvalue.RDFDatetime().Now()
def DeleteVersionedDataAndFlush():
"""Removes versioned attributes and flushes the stream."""
data_store.DB.DeleteAttributes(self.urn, [self.Schema.DATA.predicate],
end=freeze_timestamp,
token=self.token, sync=True)
if self.Schema.DATA in self.synced_attributes:
del self.synced_attributes[self.Schema.DATA]
self.size += compacted_count
self.Flush(sync=True)
# We over all versioned attributes. If we get more than
# self.COMPACTION_BATCH_SIZE, we write the data to temporary
# stream in the reversed order.
for _, value, _ in data_store.DB.ResolveRegex(
self.urn, self.Schema.DATA.predicate, token=self.token,
timestamp=(0, freeze_timestamp)):
if callback:
callback()
current_batch.append(value)
compacted_count += 1
if len(current_batch) >= self.COMPACTION_BATCH_SIZE:
batch_urn = rdfvalue.RDFURN("aff4:/tmp").Add(
"%X" % utils.PRNG.GetULong())
batches_urns.append(batch_urn)
buf = cStringIO.StringIO()
for data in reversed(current_batch):
buf.write(struct.pack("<i", len(data)))
buf.write(data)
# We use AFF4Image to avoid serializing/deserializing data stored
# in versioned attributes.
with aff4.FACTORY.Create(batch_urn, "AFF4Image", mode="w",
token=self.token) as batch_stream:
batch_stream.Write(buf.getvalue())
current_batch = []
# If there are no versioned attributes, we have nothing to do.
if not current_batch and not batches_urns:
return 0
# The last batch of results can be written to our collection's stream
# immediately, because we have to reverse the order of all the data
# stored in versioned attributes.
if current_batch:
buf = cStringIO.StringIO()
for data in reversed(current_batch):
buf.write(struct.pack("<i", len(data)))
buf.write(data)
self.fd.Seek(0, 2)
self.fd.Write(buf.getvalue())
self.stream_dirty = True
# If current_batch was the only available batch, just write everything
# and return.
if not batches_urns:
DeleteVersionedDataAndFlush()
return compacted_count
batches = {}
for batch in aff4.FACTORY.MultiOpen(batches_urns, aff4_type="AFF4Image",
token=self.token):
batches[batch.urn] = batch
if len(batches_urns) != len(batches):
raise RuntimeError("Internal inconsistency can't read back all the "
"temporary batches.")
# We read all the temporary batches in reverse order (batches itself
# were reversed when they were written).
self.fd.Seek(0, 2)
for batch_urn in reversed(batches_urns):
batch = batches[batch_urn]
if callback:
callback()
data = batch.Read(len(batch))
self.fd.Write(data)
self.stream_dirty = True
aff4.FACTORY.Delete(batch_urn, token=self.token)
DeleteVersionedDataAndFlush()
return compacted_count
def CalculateLength(self):
length = super(PackedVersionedCollection, self).__len__()
if self.IsAttributeSet(self.Schema.DATA):
if self.age_policy == aff4.ALL_TIMES:
length += len(list(self.GetValuesForAttribute(self.Schema.DATA)))
else:
length += len(list(data_store.DB.ResolveMulti(
self.urn, [self.Schema.DATA.predicate], token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS)))
return length
@property
def current_offset(self):
raise AttributeError("current_offset is called on a "
"PackedVersionedCollection, this will not work.")
def __len__(self):
raise AttributeError(
"Len called on a PackedVersionedCollection, this will not work.")
def __nonzero__(self):
if "r" not in self.mode:
raise AttributeError(
"Cannot determine collection length in write only mode.")
# This checks if there is data in the stream.
if super(PackedVersionedCollection, self).__nonzero__():
return True
# if there is not, we might have some uncompacted data.
return self.IsAttributeSet(self.Schema.DATA)
| |
# Copyright (c) 2018, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Peter Ogden"
__copyright__ = "Copyright 2018, Xilinx"
__email__ = "pynq_support@xilinx.com"
import asyncio
import cffi
import functools
import os
import numpy as np
from pynq.buffer import PynqBuffer
from pynq.ps import CPU_ARCH, ZU_ARCH
from .constants import LIB_SEARCH_PATH
from .common import VideoMode
def _fourcc_int(fourcc):
if len(fourcc) != 4:
raise ValueError("FourCC code must be four characters")
return (ord(fourcc[0]) |
(ord(fourcc[1]) << 8) |
(ord(fourcc[2]) << 16) |
(ord(fourcc[3]) << 24))
class DrmDriver:
"""Driver for DRM-based output through the Linux kernel
This driver provides a zero-copy interface to the DRM subsystem
exposing a similar API to the HDMI interface.
The API should be configured with a PixelFormat containing a FourCC
which will be passed to the Linux video subsystem.
Once configured frames can be retrieved using `newframe` which returns
a numpy array mapped to a frame buffer. The frame can be switched using
`writeframe`. Once a frame has been written it should not be modified as
ownership has been transferred to the kernel.
"""
_videolib = None
_ffi = None
def __init__(self, device_path, event_loop=None):
"""Create a new driver instance
Parameters
----------
device_path : str
The device to open
event_loop : asyncio.AbstractEventLoop
The event loop to use if the default is not wanted
"""
if self._videolib is None:
self._openlib()
self._video_fd = os.open(device_path, os.O_RDWR)
self._video_file = os.fdopen(self._video_fd, "r+b", buffering=0)
self._device = self._videolib.pynqvideo_device_init(self._video_fd)
if event_loop:
self._loop = event_loop
else:
self._loop = asyncio.get_event_loop()
self._loop.add_reader(self._video_file,
functools.partial(DisplayPort._callback, self))
self._pageflip_event = asyncio.Event()
self._mode = None
mode_count = self._videolib.pynqvideo_num_modes(self._device)
raw_modes = self._ffi.new('struct video_mode[{}]'.format(mode_count))
self._videolib.pynqvideo_get_modes(self._device, raw_modes, mode_count)
self.modes = [VideoMode(m.width, m.height, 24, m.refresh)
for m in raw_modes]
def _openlib(self):
self._ffi = cffi.FFI()
self._ffi.cdef("""
struct video_mode {
int width;
int height;
int refresh;
};
void* pynqvideo_device_init(int fd);
int pynqvideo_device_set_mode(void* device, int width, int height,
int refreh, int colorspace);
void pynqvideo_device_close(void* device);
void pynqvideo_device_handle_events(void* device);
void* pynqvideo_frame_new(void* device);
int pynqvideo_frame_write(void* device, void* frame);
uint64_t pynqvideo_frame_physaddr(void* frame);
void* pynqvideo_frame_data(void* frame);
uint64_t pynqvideo_frame_size(void* frame);
uint32_t pynqvideo_frame_stride(void* frame);
void pynqvideo_frame_free(void* device, void* frame);
int pynqvideo_num_modes(void* device);
int pynqvideo_get_modes(void* device, struct video_mode* modes, int length);
"""
)
self._videolib = self._ffi.dlopen(os.path.join(LIB_SEARCH_PATH,
"libdisplayport.so"))
def _callback(self):
self._videolib.pynqvideo_device_handle_events(self._device)
self._pageflip_event.set()
def __del__(self):
self.close()
def configure(self, mode, pixelformat):
"""Configure the display output
Raises an exception if the initialisation fails.
Parameters
----------
mode : VideoMode
The resolution to set the output display to
pixelformat : PixelFormat
The pixel format to use - must contain a fourcc
"""
if not pixelformat.fourcc:
raise ValueError("pixelformat does not define a FourCC")
ret = self._videolib.pynqvideo_device_set_mode(
self._device, mode.width, mode.height, mode.fps,
_fourcc_int(pixelformat.fourcc))
if ret:
raise OSError(ret)
self._mode = mode
def start(self):
"""Dummy function to match the HDMI interface
"""
pass
def stop(self):
"""Dummy function to match the HDMI interface
"""
pass
def close(self):
"""Close the display device
"""
self._loop.remove_reader(self._video_file)
self._videolib.pynqvideo_device_close(self._device)
self._video_file.close()
def newframe(self):
"""Return a new frame which can later be written
Frames are not zeroed before being returned so the calling
application should make sure the frame is fully written.
Returns
-------
pynq.PynqBuffer : numpy.ndarray mapped to a hardware frame
"""
frame_pointer = self._videolib.pynqvideo_frame_new(self._device)
data_pointer = self._videolib.pynqvideo_frame_data(frame_pointer)
data_size = self._videolib.pynqvideo_frame_size(frame_pointer)
data_physaddr = self._videolib.pynqvideo_frame_physaddr(frame_pointer)
data_stride = self._videolib.pynqvideo_frame_stride(frame_pointer)
if len(self._mode.shape) == 2:
expected_stride = self._mode.shape[1]
else:
expected_stride = self._mode.shape[1] * self._mode.shape[2]
buffer = self._ffi.buffer(data_pointer, data_size)
if expected_stride == data_stride:
array = np.frombuffer(buffer, dtype='u1').reshape(self._mode.shape)
else:
raw_array = np.frombuffer(buffer, dtype='u1').reshape(
[self._mode.shape[0], data_stride])
array = raw_array[:,0:expected_stride].reshape(self._mode.shape)
view = array.view(PynqBuffer)
view.pointer = frame_pointer
view.device_address = data_physaddr
view.return_to = self
return view
def return_pointer(self, pointer):
if pointer:
self._videolib.pynqvideo_frame_free(self._device, pointer)
def writeframe(self, frame):
"""Write a frame to the display.
Raises an exception if the operation fails and blocks until a
page-flip if there is already a frame scheduled to be displayed.
Parameters
----------
frame : pynq.ContiguousArray
Frame to write - must have been created by `newframe`
"""
ret = self._videolib.pynqvideo_frame_write(
self._device, frame.pointer)
if ret == -1:
self._loop.run_until_complete(
asyncio.ensure_future(self.writeframe_async(frame)))
elif ret > 0:
raise OSError(ret)
else:
self._videolib.pynqvideo_device_handle_events(self._device)
# Frame should no longer be disposed
frame.pointer = None
async def writeframe_async(self, frame):
"""Write a frame to the display.
Raises an exception if the operation fails and yields until a
page-flip if there is already a frame scheduled to be displayed.
Parameters
----------
frame : pynq.ContiguousArray
Frame to write - must have been created by `newframe`
"""
ret = -1
while ret != 0:
ret = self._videolib.pynqvideo_frame_write(
self._device, frame.pointer)
if ret == 0:
await asyncio.sleep(0)
frame.disposed = True
elif ret > 0:
raise OSError(ret)
else:
self._pageflip_event.clear()
await self._pageflip_event.wait()
if CPU_ARCH == ZU_ARCH:
class DisplayPort(DrmDriver):
"""Subclass of DrmDriver which interacts with the
hardened DisplayPort port on Zynq Ultrascale+ devices
"""
def __init__(self, event_loop=None):
"""Create a new driver instance bound to card0 which
should always be the hardened DisplayPort
Parameters
----------
event_loop : asyncio.AbstractEventLoop
The event loop to use if the default is not wanted
"""
super().__init__('/dev/dri/card0', event_loop)
| |
import re, socket
from ems.validation.abstract import Validator
class RequiredValidator(Validator):
def validate(self, value):
if value is None:
return False
if isinstance(value, basestring):
return bool(value.strip())
return bool(value)
class FilledValidator(Validator):
def validate(self, value, key, data):
return key in data
class SameValidator(Validator):
def validate(self, value, otherKey, data):
try:
return value == data[otherKey]
except KeyError:
return False
class ConfirmedValidator(Validator):
def __init__(self, sameValidator=None):
self.same = SameValidator() if sameValidator is None else sameValidator
def validate(self, value, key, data):
return self.same.validate(value, '{0}_confirmation'.format(key), data)
class DifferentValidator(Validator):
def validate(self, value, otherKey, data):
try:
return value != data[otherKey]
except KeyError:
return True
class AcceptedValidator(Validator):
def __init__(self):
self.required = RequiredValidator()
def validate(self, value):
if not self.required.validate(value):
return False
trueValues = ('yes', 'on', '1', 1, True, 'true', 1.0)
return value in trueValues
class DictValidator(Validator):
def validate(self, value):
return hasattr(value, '__getitem__')
class BoolValidator(Validator):
def validate(self, value):
acceptable = [True, False, 0, 1, '0', '1']
return value in acceptable
class IntegerValidator(Validator):
def validate(self, value):
try:
num = int(value)
return True
except ValueError:
return False
class NumericValidator(Validator):
def validate(self, value):
try:
num = float(value)
return True
except (ValueError, TypeError):
return False
class StringValidator(Validator):
def validate(self, value):
return isinstance(value, basestring)
class DigitsValidator(Validator):
def __init__(self):
self.numeric = NumericValidator()
def validate(self, value, digits):
if not self.numeric.validate(value):
return False
return len(str(value)) == int(digits)
try:
num = float(value)
return True
except ValueError:
return False
class SizeValidator(Validator):
def validate(self, value, size):
size = int(size)
return len(value) == size
class BetweenValidator(Validator):
def validate(self, value, min_, max_):
if value is None:
return False
min_ = float(min_)
max_ = float(max_)
try:
value = float(value)
except ValueError:
return False
return ((value >= min_) and (value <= max_))
class MinValidator(Validator):
def validate(self, value, min_):
if value is None:
return False
min_ = float(min_)
try:
value = float(value)
except (ValueError, TypeError):
try:
value = len(value)
except (ValueError, TypeError):
return False
return value >= min_
class MaxValidator(Validator):
def validate(self, value, max_):
if value is None:
return False
max_ = float(max_)
try:
value = float(value)
except (ValueError, TypeError):
try:
value = len(value)
except (ValueError, TypeError):
return False
return value <= max_
class InValidator(Validator):
def validate(self, value, *args):
return value in args
class NotInValidator(Validator):
def __init__(self):
self.inValidator = InValidator()
def validate(self, value, *args):
return not self.inValidator.validate(value, *args)
class IpValidator(Validator):
def validate(self, value):
try:
socket.inet_aton(value)
return True
except socket.error:
return False
class EmailValidator(Validator):
def validate(self, value):
return re.match("^[a-zA-Z0-9._%-]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$", value)
class UrlValidator(Validator):
def __init__(self):
self.regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def validate(self, value):
return self.regex.match(value)
| |
# -*- coding: utf-8 -*-
"""
flask_oauth
~~~~~~~~~~~
Implements basic OAuth support for Flask.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import httplib2
from functools import wraps
# urlparse module has been renamed in Python 3.x
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from flask import request, session, json, redirect, Response
from werkzeug import url_decode, url_encode, url_quote, \
parse_options_header, Headers
import oauth2
_etree = None
def get_etree():
"""Return an elementtree implementation. Prefers lxml"""
global _etree
if _etree is None:
try:
from lxml import etree as _etree
except ImportError:
try:
from xml.etree import cElementTree as _etree
except ImportError:
try:
from xml.etree import ElementTree as _etree
except ImportError:
raise TypeError('lxml or etree not found')
return _etree
def parse_response(resp, content, strict=False):
ct, options = parse_options_header(resp['content-type'])
if ct in ('application/json', 'text/javascript'):
return json.loads(content)
elif ct in ('application/xml', 'text/xml'):
# technically, text/xml is ascii based but because many
# implementations get that wrong and utf-8 is a superset
# of utf-8 anyways, so there is not much harm in assuming
# utf-8 here
charset = options.get('charset', 'utf-8')
return get_etree().fromstring(content.decode(charset))
elif ct != 'application/x-www-form-urlencoded':
if strict:
return content
charset = options.get('charset', 'utf-8')
return url_decode(content, charset=charset).to_dict()
def add_query(url, args):
if not args:
return url
return url + ('?' in url and '&' or '?') + url_encode(args)
def encode_request_data(data, format):
if format is None:
return data, None
elif format == 'json':
return json.dumps(data or {}), 'application/json'
elif format == 'urlencoded':
return url_encode(data or {}), 'application/x-www-form-urlencoded'
raise TypeError('Unknown format %r' % format)
class OAuthResponse(object):
"""Contains the response sent back from an OAuth protected remote
application.
"""
def __init__(self, resp, content):
#: a :class:`~werkzeug.Headers` object with the response headers
#: the application sent.
self.headers = Headers(resp)
#: the raw, unencoded content from the server
self.raw_data = content
#: the parsed content from the server
self.data = parse_response(resp, content, strict=True)
@property
def status(self):
"""The status code of the response."""
return self.headers.get('status', type=int)
class OAuthClient(oauth2.Client):
def request_new_token(self, uri, callback=None, params={}):
if callback is not None:
params['oauth_callback'] = callback
req = oauth2.Request.from_consumer_and_token(
self.consumer, token=self.token,
http_method='POST', http_url=uri, parameters=params,
is_form_encoded=True)
req.sign_request(self.method, self.consumer, self.token)
body = req.to_postdata()
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': str(len(body))
}
return httplib2.Http.request(self, uri, method='POST',
body=body, headers=headers)
class OAuthException(RuntimeError):
"""Raised if authorization fails for some reason."""
message = None
type = None
def __init__(self, message, type=None, data=None):
#: A helpful error message for debugging
self.message = message
#: A unique type for this exception if available.
self.type = type
#: If available, the parsed data from the remote API that can be
#: used to pointpoint the error.
self.data = data
def __str__(self):
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
class OAuth(object):
"""Registry for remote applications. In the future this will also
be the central class for OAuth provider functionality.
"""
def __init__(self):
self.remote_apps = {}
def remote_app(self, name, register=True, **kwargs):
"""Registers a new remote applicaton. If `param` register is
set to `False` the application is not registered in the
:attr:`remote_apps` dictionary. The keyword arguments are
forwarded to the :class:`OAuthRemoteApp` consturctor.
"""
app = OAuthRemoteApp(self, name, **kwargs)
if register:
assert name not in self.remote_apps, \
'application already registered'
self.remote_apps[name] = app
return app
class OAuthRemoteApp(object):
"""Represents a remote application.
:param oauth: the associated :class:`OAuth` object.
:param name: then name of the remote application
:param request_token_url: the URL for requesting new tokens
:param access_token_url: the URL for token exchange
:param authorize_url: the URL for authorization
:param consumer_key: the application specific consumer key
:param consumer_secret: the application specific consumer secret
:param request_token_params: an optional dictionary of parameters
to forward to the request token URL
or authorize URL depending on oauth
version.
:param access_token_params: an option diction of parameters to forward to
the access token URL
:param access_token_method: the HTTP method that should be used
for the access_token_url. Defaults
to ``'GET'``.
"""
def __init__(self, oauth, name, base_url,
request_token_url,
access_token_url, authorize_url,
consumer_key, consumer_secret,
request_token_params=None,
access_token_params=None,
access_token_method='GET'):
self.oauth = oauth
#: the `base_url` all URLs are joined with.
self.base_url = base_url
self.name = name
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorize_url = authorize_url
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.tokengetter_func = None
self.request_token_params = request_token_params or {}
self.access_token_params = access_token_params or {}
self.access_token_method = access_token_method
self._consumer = oauth2.Consumer(self.consumer_key,
self.consumer_secret)
self._client = OAuthClient(self._consumer)
def status_okay(self, resp):
"""Given request data, checks if the status is okay."""
try:
return int(resp['status']) in (200, 201)
except ValueError:
return False
def get(self, *args, **kwargs):
"""Sends a ``GET`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'GET'
return self.request(*args, **kwargs)
def post(self, *args, **kwargs):
"""Sends a ``POST`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'POST'
return self.request(*args, **kwargs)
def put(self, *args, **kwargs):
"""Sends a ``PUT`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'PUT'
return self.request(*args, **kwargs)
def delete(self, *args, **kwargs):
"""Sends a ``DELETE`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'DELETE'
return self.request(*args, **kwargs)
def make_client(self, token=None):
"""Creates a new `oauth2` Client object with the token attached.
Usually you don't have to do that but use the :meth:`request`
method instead.
"""
return oauth2.Client(self._consumer, self.get_request_token(token))
def request(self, url, data="", headers=None, format='urlencoded',
method='GET', content_type=None, token=None):
"""Sends a request to the remote server with OAuth tokens attached.
The `url` is joined with :attr:`base_url` if the URL is relative.
.. versionadded:: 0.12
added the `token` parameter.
:param url: where to send the request to
:param data: the data to be sent to the server. If the request method
is ``GET`` the data is appended to the URL as query
parameters, otherwise encoded to `format` if the format
is given. If a `content_type` is provided instead, the
data must be a string encoded for the given content
type and used as request body.
:param headers: an optional dictionary of headers.
:param format: the format for the `data`. Can be `urlencoded` for
URL encoded data or `json` for JSON.
:param method: the HTTP request method to use.
:param content_type: an optional content type. If a content type is
provided, the data is passed as it and the
`format` parameter is ignored.
:param token: an optional token to pass to tokengetter. Use this if you
want to support sending requests using multiple tokens.
If you set this to anything not None, `tokengetter_func`
will receive the given token as an argument, in which case
the tokengetter should return the `(token, secret)` tuple
for the given token.
:return: an :class:`OAuthResponse` object.
"""
headers = dict(headers or {})
client = self.make_client(token)
url = self.expand_url(url)
if method == 'GET':
assert format == 'urlencoded'
if data:
url = add_query(url, data)
data = ""
else:
if content_type is None:
data, content_type = encode_request_data(data, format)
if content_type is not None:
headers['Content-Type'] = content_type
return OAuthResponse(*client.request(url, method=method,
body=data or '',
headers=headers))
def expand_url(self, url):
return urljoin(self.base_url, url)
def generate_request_token(self, callback=None):
if callback is not None:
callback = urljoin(request.url, callback)
resp, content = self._client.request_new_token(
self.expand_url(self.request_token_url), callback,
self.request_token_params)
if not self.status_okay(resp):
raise OAuthException('Failed to generate request token',
type='token_generation_failed')
data = parse_response(resp, content)
if data is None:
raise OAuthException('Invalid token response from ' + self.name,
type='token_generation_failed')
tup = (data['oauth_token'], data['oauth_token_secret'])
session[self.name + '_oauthtok'] = tup
return tup
def get_request_token(self, token=None):
assert self.tokengetter_func is not None, 'missing tokengetter function'
# Don't pass the token if the token is None to support old
# tokengetter functions.
rv = self.tokengetter_func(*(token and (token,) or ()))
if rv is None:
rv = session.get(self.name + '_oauthtok')
if rv is None:
raise OAuthException('No token available', type='token_missing')
return oauth2.Token(*rv)
def free_request_token(self):
session.pop(self.name + '_oauthtok', None)
session.pop(self.name + '_oauthredir', None)
def authorize(self, callback=None):
"""Returns a redirect response to the remote authorization URL with
the signed callback given. The callback must be `None` in which
case the application will most likely switch to PIN based authentication
or use a remotely stored callback URL. Alternatively it's an URL
on the system that has to be decorated as :meth:`authorized_handler`.
"""
if self.request_token_url:
token = self.generate_request_token(callback)[0]
url = '%s?oauth_token=%s' % (self.expand_url(self.authorize_url),
url_quote(token))
else:
assert callback is not None, 'Callback is required OAuth2'
# This is for things like facebook's oauth. Since we need the
# callback for the access_token_url we need to keep it in the
# session.
params = dict(self.request_token_params)
params['redirect_uri'] = callback
params['client_id'] = self.consumer_key
params['response_type'] = 'code'
session[self.name + '_oauthredir'] = callback
url = add_query(self.expand_url(self.authorize_url), params)
return redirect(url)
def tokengetter(self, f):
"""Registers a function as tokengetter. The tokengetter has to return
a tuple of ``(token, secret)`` with the user's token and token secret.
If the data is unavailable, the function must return `None`.
If the `token` parameter is passed to the request function it's
forwarded to the tokengetter function::
@oauth.tokengetter
def get_token(token='user'):
if token == 'user':
return find_the_user_token()
elif token == 'app':
return find_the_app_token()
raise RuntimeError('invalid token')
"""
self.tokengetter_func = f
return f
def handle_oauth1_response(self):
"""Handles an oauth1 authorization response. The return value of
this method is forwarded as first argument to the handling view
function.
"""
client = self.make_client()
resp, content = client.request('%s?oauth_verifier=%s' % (
self.expand_url(self.access_token_url),
request.args['oauth_verifier']
), self.access_token_method)
data = parse_response(resp, content)
if not self.status_okay(resp):
raise OAuthException('Invalid response from ' + self.name,
type='invalid_response', data=data)
return data
def handle_oauth2_response(self):
"""Handles an oauth2 authorization response. The return value of
this method is forwarded as first argument to the handling view
function.
"""
remote_args = {
'code': request.args.get('code'),
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
'redirect_uri': session.get(self.name + '_oauthredir')
}
remote_args.update(self.access_token_params)
if self.access_token_method == 'POST':
resp, content = self._client.request(self.expand_url(self.access_token_url),
self.access_token_method,
url_encode(remote_args))
elif self.access_token_method == 'GET':
url = add_query(self.expand_url(self.access_token_url), remote_args)
resp, content = self._client.request(url, self.access_token_method)
else:
raise OAuthException('Unsupported access_token_method: ' +
self.access_token_method)
data = parse_response(resp, content)
if not self.status_okay(resp):
raise OAuthException('Invalid response from ' + self.name,
type='invalid_response', data=data)
return data
def handle_unknown_response(self):
"""Called if an unknown response came back from the server. This
usually indicates a denied response. The default implementation
just returns `None`.
"""
return None
def authorized_handler(self, f):
"""Injects additional authorization functionality into the function.
The function will be passed the response object as first argument
if the request was allowed, or `None` if access was denied. When the
authorized handler is called, the temporary issued tokens are already
destroyed.
"""
@wraps(f)
def decorated(*args, **kwargs):
if 'oauth_verifier' in request.args:
data = self.handle_oauth1_response()
elif 'code' in request.args:
data = self.handle_oauth2_response()
else:
data = self.handle_unknown_response()
self.free_request_token()
return f(*((data,) + args), **kwargs)
return decorated
| |
from __future__ import division
from itertools import product
import numpy as np
import scipy.sparse as sp
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import issparse
from scipy.sparse import lil_matrix
from sklearn.externals.six import iteritems
from sklearn.externals.six.moves import xrange
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3 / 6, 2 / 6, 1 / 6],
[1 / 3, 1 / 3, 1 / 3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4 / 9, 3 / 9, 2 / 9],
[2 / 9, 4 / 9, 3 / 9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| |
#!/usr/bin/env python
from __future__ import print_function
import math, sys
import numpy as np
"""
This is a version of BFGS specialized for the case where the function
is constrained to a particular convex region via a barrier function,
and where we can efficiently evaluate (via calling f_finite(x), which
returns bool) whether the function is finite at the given point.
x0 The value to start the optimization at.
f The function being minimized. f(x) returns a pair (value, gradient).
f_finite f_finite(x) returns true if f(x) would be finite, and false otherwise.
init_hessian This gives you a way to specify a "better guess" at the initial
Hessian.
return Returns a 4-tuple (x, f(x), f'(x), inverse-hessian-approximation).
"""
def Bfgs(x0, f, f_finite, init_inv_hessian = None,
gradient_tolerance = 0.0005, progress_tolerance = 1.0e-06):
b = __bfgs(x0, f, f_finite,
init_inv_hessian = init_inv_hessian,
gradient_tolerance = gradient_tolerance,
progress_tolerance = progress_tolerance)
return b.Minimize()
class __bfgs:
def __init__(self, x0, f, f_finite, init_inv_hessian = None,
gradient_tolerance = 0.0005, progress_tolerance = 1.0e-06,
progress_tolerance_num_iters = 3):
self.c1 = 1.0e-04 # constant used in line search
self.c2 = 0.9 # constant used in line search
assert len(x0.shape) == 1
self.dim = x0.shape[0]
self.f = f
self.f_finite = f_finite
self.gradient_tolerance = gradient_tolerance
self.num_restarts = 0
self.progress_tolerance = progress_tolerance
assert progress_tolerance_num_iters >= 1
self.progress_tolerance_num_iters = progress_tolerance_num_iters
if not self.f_finite(x0):
self.LogMessage("Function is not finite at initial point {0}".format(x0))
sys.exit(1)
# evaluations will be a list of 3-tuples (x, function-value f(x),
# function-derivative f'(x)). it's written to and read from by the
# function self.FunctionValueAndDerivative().
self.cached_evaluations = [ ]
self.x = [ x0 ]
(value0, deriv0) = self.FunctionValueAndDerivative(x0)
self.value = [ value0 ]
self.deriv = [ deriv0 ]
deriv_magnitude = math.sqrt(np.dot(deriv0, deriv0))
self.LogMessage("On iteration 0, value is %.6f, deriv-magnitude %.6f" %
(value0, deriv_magnitude))
# note: self.inv_hessian is referred to as H_k in the Nocedal
# and Wright textbook.
if init_inv_hessian is None:
self.inv_hessian = np.identity(self.dim)
else:
self.inv_hessian = init_inv_hessian
def Minimize(self):
while not self.Converged():
self.Iterate()
self.FinalDebugOutput()
return (self.x[-1], self.value[-1], self.deriv[-1], self.inv_hessian)
def FinalDebugOutput(self):
pass
# currently this does nothing.
# This does one iteration of update.
def Iterate(self):
self.p = - np.dot(self.inv_hessian, self.deriv[-1])
alpha = self.LineSearch()
if alpha is None:
self.LogMessage("Restarting BFGS with unit Hessian since line search failed")
self.inv_hessian = np.identity(self.dim)
self.num_restarts += 1
return
cur_x = self.x[-1]
next_x = cur_x + alpha * self.p
(next_value, next_deriv) = self.FunctionValueAndDerivative(next_x)
next_deriv_magnitude = math.sqrt(np.dot(next_deriv, next_deriv))
self.LogMessage("On iteration %d, value is %.6f, deriv-magnitude %.6f" %
(len(self.x), next_value, next_deriv_magnitude))
# obtain s_k = x_{k+1} - x_k, y_k = gradient_{k+1} - gradient_{k}
# see eq. 6.5 in Nocedal and Wright.
self.x.append(next_x)
self.value.append(next_value)
self.deriv.append(next_deriv)
s_k = alpha * self.p
y_k = self.deriv[-1] - self.deriv[-2]
ysdot = np.dot(s_k, y_k)
if not ysdot > 0:
self.LogMessage("Restarting BFGS with unit Hessian since curvature "
"condition failed [likely a bug in the optimization code]")
self.inv_hessian = np.identity(self.dim)
return
rho_k = 1.0 / ysdot # eq. 6.14 in Nocedal and Wright.
# the next equation is eq. 6.17 in Nocedal and Wright.
# the following comment is the simple but inefficient version
# I = np.identity(self.dim)
# self.inv_hessian = ((I - np.outer(s_k, y_k) * rho_k) * self.inv_hessian *
# (I - np.outer(y_k, s_k) * rho_k)) + np.outer(s_k, s_k) * rho_k
z_k = np.dot(self.inv_hessian, y_k)
self.inv_hessian += np.outer(s_k, s_k) * (ysdot + np.dot(y_k,z_k)) * rho_k**2 - \
(np.outer(z_k, s_k) + np.outer(s_k, z_k)) * rho_k
# the function LineSearch is to be called after you have set self.x and
# self.p. It returns an alpha value satisfying the strong Wolfe conditions,
# or None if the line search failed. It is Algorithm 3.5 of Nocedal and
# Wright.
def LineSearch(self):
alpha_max = 1.0e+10
alpha1 = self.GetDefaultAlpha()
increase_factor = 2.0 # amount by which we increase alpha if
# needed... after the 1st time we make it 4.
if alpha1 is None:
self.LogMessage("Line search failed unexpectedly in making sure "
"f(x) is finite.")
return None
alpha = [ 0.0, alpha1 ]
(phi_0, phi_dash_0) = self.FunctionValueAndDerivativeForAlpha(0.0)
phi = [phi_0]
phi_dash = [phi_dash_0]
if phi_dash_0 >= 0.0:
self.LogMessage("{0}: line search failed unexpectedly: not a descent "
"direction")
return None
while True:
i = len(phi)
alpha_i = alpha[-1]
(phi_i, phi_dash_i) = self.FunctionValueAndDerivativeForAlpha(alpha_i)
phi.append(phi_i)
phi_dash.append(phi_dash_i)
if (phi_i > phi_0 + self.c1 * alpha_i * phi_dash_0 or
(i > 1 and phi_i >= phi[-2])):
return self.Zoom(alpha[-2], alpha_i)
if abs(phi_dash_i) <= -self.c2 * phi_dash_0:
self.LogMessage("Line search: accepting default alpha = {0}".format(alpha_i))
return alpha_i
if phi_dash_i >= 0:
return self.Zoom(alpha_i, alpha[-2])
# the algorithm says "choose alpha_{i+1} \in (alpha_i, alpha_max).
# the rest of this block is implementing that.
next_alpha = alpha_i * increase_factor
increase_factor = 4.0 # after we double once, we get more aggressive.
if next_alpha > alpha_max:
# something went wrong if alpha needed to get this large.
# most likely we'll restart BFGS.
self.LogMessage("Line search failed unexpectedly, went "
"past the max.");
return None
# make sure the function is finite at the next alpha, if possible.
# we don't need to worry about efficiency too much, as this check
# for finiteness is very fast.
while next_alpha > alpha_i * 1.2 and not self.IsFiniteForAlpha(next_alpha):
next_alpha *= 0.9
while next_alpha > alpha_i * 1.02 and not self.IsFiniteForAlpha(next_alpha):
next_alpha *= 0.99
self.LogMessage("Increasing alpha from {0} to {1} in line search".format(alpha_i,
next_alpha))
alpha.append(next_alpha)
# This function, from Nocedal and Wright (alg. 3.6) is called from from
# LineSearch. It returns the alpha value satisfying the strong Wolfe
# conditions, or None if there was an error.
def Zoom(self, alpha_lo, alpha_hi):
# these function evaluations don't really happen, we use caching.
(phi_0, phi_dash_0) = self.FunctionValueAndDerivativeForAlpha(0.0)
(phi_lo, phi_dash_lo) = self.FunctionValueAndDerivativeForAlpha(alpha_lo)
(phi_hi, phi_dash_hi) = self.FunctionValueAndDerivativeForAlpha(alpha_hi)
min_diff = 1.0e-10
while True:
if abs(alpha_lo - alpha_hi) < min_diff:
self.LogMessage("Line search failed, interval is too small: [{0},{1}]".format(
alpha_lo, alpha_hi))
return None
# the algorithm says "Interpolate (using quadratic, cubic or
# bisection) to find a trial step length between alpha_lo and
# alpha_hi. We basically choose bisection, but because alpha_lo is
# guaranteed to always have a "better" (lower) function value than
# alpha_hi, we actually want to be a little bit closer to alpha_lo,
# so we go one third of the distance between alpha_lo and alpha_hi.
alpha_j = alpha_lo + 0.3333 * (alpha_hi - alpha_lo)
(phi_j, phi_dash_j) = self.FunctionValueAndDerivativeForAlpha(alpha_j)
if phi_j > phi_0 + self.c1 * alpha_j * phi_dash_0 or phi_j >= phi_lo:
(alpha_hi, phi_hi, phi_dash_hi) = (alpha_j, phi_j, phi_dash_j)
else:
if abs(phi_dash_j) <= - self.c2 * phi_dash_0:
self.LogMessage("Acceptable alpha is {0}".format(alpha_j))
return alpha_j
if phi_dash_j * (alpha_hi - alpha_lo) >= 0.0:
(alpha_hi, phi_hi, phi_dash_hi) = (alpha_lo, phi_lo, phi_dash_lo)
(alpha_lo, phi_lo, phi_dash_lo) = (alpha_j, phi_j, phi_dash_j)
# The function GetDefaultAlpha(), called from LineSearch(), is to be called
# after you have set self.x and self.p. It normally returns 1.0, but it
# will reduce it by factors of 0.9 until the function evaluated at 1.5 * alpha
# is finite. This is because generally speaking, approaching the edge of
# the barrier function too rapidly will lead to poor function values. Note:
# evaluating whether the function is finite is very efficient.
# If the function was not finite even at very tiny alpha, then something
# probably went wrong; we'll restart BFGS in this case.
def GetDefaultAlpha(self):
alpha_factor = 1.5 # this should be strictly > 1.
min_alpha = 1.0e-10
alpha = 1.0
while alpha > min_alpha and not self.IsFiniteForAlpha(alpha * alpha_factor):
alpha *= 0.9
return alpha if alpha > min_alpha else None
# this function, called from LineSearch(), returns true if the function is finite
# at the given alpha value.
def IsFiniteForAlpha(self, alpha):
x = self.x[-1] + self.p * alpha
return self.f_finite(x)
def FunctionValueAndDerivativeForAlpha(self, alpha):
x = self.x[-1] + self.p * alpha
(value, deriv) = self.FunctionValueAndDerivative(x)
return (value, np.dot(self.p, deriv))
def Converged(self):
# we say that we're converged if either the gradient magnitude
current_gradient = self.deriv[-1]
gradient_magnitude = math.sqrt(np.dot(current_gradient, current_gradient))
if gradient_magnitude < self.gradient_tolerance:
self.LogMessage("BFGS converged on iteration {0} due to gradient magnitude {1} "
"less than gradient tolerance {2}".format(
len(self.x), "%.6f" % gradient_magnitude, self.gradient_tolerance))
return True
if self.num_restarts > 1:
self.LogMessage("Restarted BFGS computation twice: declaring convergence to avoid a loop")
return True
n = self.progress_tolerance_num_iters
if len(self.x) > n:
cur_value = self.value[-1]
prev_value = self.value[-1 - n]
# the following will be nonnegative.
change_per_iter_amortized = (prev_value - cur_value) / n
if change_per_iter_amortized < self.progress_tolerance:
self.LogMessage("BFGS converged on iteration {0} due to objf-change per "
"iteration amortized over {1} iterations = {2} < "
"threshold = {3}.".format(
len(self.x), n, change_per_iter_amortized, self.progress_tolerance))
return True
return False
# this returns the function value and derivative for x, as a tuple; it
# does caching.
def FunctionValueAndDerivative(self, x):
for i in range(len(self.cached_evaluations)):
if np.array_equal(x, self.cached_evaluations[i][0]):
return (self.cached_evaluations[i][1],
self.cached_evaluations[i][2])
# we didn't find it cached, so we need to actually evaluate the
# function. this is where it gets slow.
(value, deriv) = self.f(x)
self.cached_evaluations.append((x, value, deriv))
return (value, deriv)
def LogMessage(self, message):
print(sys.argv[0] + ": " + message, file=sys.stderr)
def __TestFunction(x):
dim = 15
a = np.array(range(1, dim + 1))
B = np.diag(range(5, dim + 5))
# define a function f(x) = x.a + x^T B x
value = np.dot(x, a) + np.dot(x, np.dot(B, x))
# derivative is a + 2 B x.
deriv = a + np.dot(B, x) * 2.0
return (value, deriv)
def __TestBfgs():
dim = 15
x0 = np.array(range(10, dim + 10))
(a,b,c,d) = Bfgs(x0, __TestFunction, lambda x : True, )
#__TestBfgs()
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Integration tests for ssh module.
"""
from __future__ import print_function
import os
import random
import socket
import subprocess
from helpers import unittest
import target_test
from luigi.contrib.ssh import RemoteContext, RemoteFileSystem, RemoteTarget, RemoteCalledProcessError
from luigi.target import MissingParentDirectory, FileAlreadyExists
working_ssh_host = os.environ.get('SSH_TEST_HOST', 'localhost')
# set this to a working ssh host string (e.g. "localhost") to activate integration tests
# The following tests require a working ssh server at `working_ssh_host`
# the test runner can ssh into using password-less authentication
# since `nc` has different syntax on different platforms
# we use a short python command to start
# a 'hello'-server on the remote machine
HELLO_SERVER_CMD = """
import socket, sys
listener = socket.socket()
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(('localhost', 2134))
listener.listen(1)
sys.stdout.write('ready')
sys.stdout.flush()
conn = listener.accept()[0]
conn.sendall(b'hello')
"""
try:
x = subprocess.check_output(
"ssh %s -S none -o BatchMode=yes 'echo 1'" % working_ssh_host,
shell=True
)
if x != b'1\n':
raise unittest.SkipTest('Not able to connect to ssh server')
except Exception:
raise unittest.SkipTest('Not able to connect to ssh server')
class TestRemoteContext(unittest.TestCase):
def setUp(self):
self.context = RemoteContext(working_ssh_host)
def tearDown(self):
try:
self.remote_server_handle.terminate()
except Exception:
pass
def test_check_output(self):
""" Test check_output ssh
Assumes the running user can ssh to working_ssh_host
"""
output = self.context.check_output(["echo", "-n", "luigi"])
self.assertEqual(output, b"luigi")
def test_tunnel(self):
print("Setting up remote listener...")
self.remote_server_handle = self.context.Popen([
"python", "-c", '"{0}"'.format(HELLO_SERVER_CMD)
], stdout=subprocess.PIPE)
print("Setting up tunnel")
with self.context.tunnel(2135, 2134):
print("Tunnel up!")
# hack to make sure the listener process is up
# and running before we write to it
server_output = self.remote_server_handle.stdout.read(5)
self.assertEqual(server_output, b"ready")
print("Connecting to server via tunnel")
s = socket.socket()
s.connect(("localhost", 2135))
print("Receiving...",)
response = s.recv(5)
self.assertEqual(response, b"hello")
print("Closing connection")
s.close()
print("Waiting for listener...")
output, _ = self.remote_server_handle.communicate()
self.assertEqual(self.remote_server_handle.returncode, 0)
print("Closing tunnel")
class TestRemoteTarget(unittest.TestCase):
""" These tests assume RemoteContext working
in order for setUp and tearDown to work
"""
def setUp(self):
self.ctx = RemoteContext(working_ssh_host)
self.filepath = "/tmp/luigi_remote_test.dat"
self.target = RemoteTarget(
self.filepath,
working_ssh_host,
)
self.ctx.check_output(["rm", "-rf", self.filepath])
self.ctx.check_output(["echo -n 'hello' >", self.filepath])
def tearDown(self):
self.ctx.check_output(["rm", "-rf", self.filepath])
def test_exists(self):
self.assertTrue(self.target.exists())
no_file = RemoteTarget(
"/tmp/_file_that_doesnt_exist_",
working_ssh_host,
)
self.assertFalse(no_file.exists())
def test_remove(self):
self.target.remove()
self.assertRaises(
subprocess.CalledProcessError,
self.ctx.check_output,
["cat", self.filepath]
)
def test_open(self):
f = self.target.open('r')
file_content = f.read()
f.close()
self.assertEqual(file_content, "hello")
self.assertTrue(self.target.fs.exists(self.filepath))
self.assertFalse(self.target.fs.isdir(self.filepath))
def test_context_manager(self):
with self.target.open('r') as f:
file_content = f.read()
self.assertEqual(file_content, "hello")
class TestRemoteFilesystem(unittest.TestCase):
def setUp(self):
self.fs = RemoteFileSystem(working_ssh_host)
self.root = '/tmp/luigi-remote-test'
self.directory = self.root + '/dir'
self.filepath = self.directory + '/file'
self.target = RemoteTarget(
self.filepath,
working_ssh_host,
)
self.fs.remote_context.check_output(['rm', '-rf', self.root])
self.addCleanup(self.fs.remote_context.check_output, ['rm', '-rf', self.root])
def test_mkdir(self):
self.assertFalse(self.fs.isdir(self.directory))
self.assertRaises(MissingParentDirectory, self.fs.mkdir, self.directory, parents=False)
self.fs.mkdir(self.directory)
self.assertTrue(self.fs.isdir(self.directory))
# Shouldn't throw
self.fs.mkdir(self.directory)
self.assertRaises(FileAlreadyExists, self.fs.mkdir, self.directory, raise_if_exists=True)
def test_list(self):
with self.target.open('w'):
pass
self.assertEqual([self.target.path], list(self.fs.listdir(self.directory)))
class TestGetAttrRecursion(unittest.TestCase):
def test_recursion_on_delete(self):
target = RemoteTarget("/etc/this/does/not/exist", working_ssh_host)
with self.assertRaises(RemoteCalledProcessError):
with target.open('w') as fh:
fh.write("test")
class TestRemoteTargetAtomicity(unittest.TestCase, target_test.FileSystemTargetTestMixin):
path = '/tmp/luigi_remote_atomic_test.txt'
ctx = RemoteContext(working_ssh_host)
def create_target(self, format=None):
return RemoteTarget(self.path, working_ssh_host, format=format)
def _exists(self, path):
try:
self.ctx.check_output(["test", "-e", path])
except subprocess.CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise
return True
def assertCleanUp(self, tp):
self.assertFalse(self._exists(tp))
def setUp(self):
self.ctx.check_output(["rm", "-rf", self.path])
self.local_file = '/tmp/local_luigi_remote_atomic_test.txt'
if os.path.exists(self.local_file):
os.remove(self.local_file)
def tearDown(self):
self.ctx.check_output(["rm", "-rf", self.path])
if os.path.exists(self.local_file):
os.remove(self.local_file)
def test_put(self):
f = open(self.local_file, 'w')
f.write('hello')
f.close()
t = RemoteTarget(self.path, working_ssh_host)
t.put(self.local_file)
self.assertTrue(self._exists(self.path))
def test_get(self):
self.ctx.check_output(["echo -n 'hello' >", self.path])
t = RemoteTarget(self.path, working_ssh_host)
t.get(self.local_file)
f = open(self.local_file, 'r')
file_content = f.read()
self.assertEqual(file_content, 'hello')
test_move_on_fs = None # ssh don't have move (yet?)
test_rename_dont_move_on_fs = None # ssh don't have move (yet?)
class TestRemoteTargetCreateDirectories(TestRemoteTargetAtomicity):
path = '/tmp/%s/xyz/luigi_remote_atomic_test.txt' % random.randint(0, 999999999)
class TestRemoteTargetRelative(TestRemoteTargetAtomicity):
path = 'luigi_remote_atomic_test.txt'
| |
"""Pgbouncer check
Collects metrics from the pgbouncer database.
"""
# stdlib
import urlparse
# 3p
import psycopg2 as pg
# project
from checks import AgentCheck, CheckException
class ShouldRestartException(Exception):
pass
class PgBouncer(AgentCheck):
"""Collects metrics from pgbouncer
"""
RATE = AgentCheck.rate
GAUGE = AgentCheck.gauge
DB_NAME = 'pgbouncer'
SERVICE_CHECK_NAME = 'pgbouncer.can_connect'
STATS_METRICS = {
'descriptors': [
('database', 'db'),
],
'metrics': [
('total_requests', ('pgbouncer.stats.requests_per_second', RATE)),
('total_received', ('pgbouncer.stats.bytes_received_per_second', RATE)),
('total_sent', ('pgbouncer.stats.bytes_sent_per_second', RATE)),
('total_query_time', ('pgbouncer.stats.total_query_time', GAUGE)),
('avg_req', ('pgbouncer.stats.avg_req', GAUGE)),
('avg_recv', ('pgbouncer.stats.avg_recv', GAUGE)),
('avg_sent', ('pgbouncer.stats.avg_sent', GAUGE)),
('avg_query', ('pgbouncer.stats.avg_query', GAUGE)),
],
'query': """SHOW STATS""",
}
POOLS_METRICS = {
'descriptors': [
('database', 'db'),
('user', 'user'),
],
'metrics': [
('cl_active', ('pgbouncer.pools.cl_active', GAUGE)),
('cl_waiting', ('pgbouncer.pools.cl_waiting', GAUGE)),
('sv_active', ('pgbouncer.pools.sv_active', GAUGE)),
('sv_idle', ('pgbouncer.pools.sv_idle', GAUGE)),
('sv_used', ('pgbouncer.pools.sv_used', GAUGE)),
('sv_tested', ('pgbouncer.pools.sv_tested', GAUGE)),
('sv_login', ('pgbouncer.pools.sv_login', GAUGE)),
('maxwait', ('pgbouncer.pools.maxwait', GAUGE)),
],
'query': """SHOW POOLS""",
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.dbs = {}
def _get_service_checks_tags(self, host, port, database_url):
if database_url:
parsed_url = urlparse.urlparse(database_url)
host = parsed_url.hostname
port = parsed_url.port
service_checks_tags = [
"host:%s" % host,
"port:%s" % port,
"db:%s" % self.DB_NAME
]
return service_checks_tags
def _collect_stats(self, db, instance_tags):
"""Query pgbouncer for various metrics
"""
metric_scope = [self.STATS_METRICS, self.POOLS_METRICS]
try:
cursor = db.cursor()
results = None
for scope in metric_scope:
metrics = scope['metrics']
cols = [m[0] for m in metrics]
try:
query = scope['query']
self.log.debug("Running query: %s" % query)
cursor.execute(query)
results = cursor.fetchall()
except pg.Error as e:
self.log.warning("Not all metrics may be available: %s" % str(e))
continue
for row in results:
if row[0] == self.DB_NAME:
continue
desc = scope['descriptors']
if len(row) == len(cols) + len(desc) + 1:
# Some versions of pgbouncer have an extra field at the end of show pools
row = row[:-1]
assert len(row) == len(cols) + len(desc)
tags = list(instance_tags)
tags += ["%s:%s" % (d[0][1], d[1]) for d in zip(desc, row[:len(desc)])]
for i, (key_name, (mname, mtype)) in enumerate(metrics):
value = row[i + len(desc)]
mtype(self, mname, value, tags)
if not results:
self.warning('No results were found for query: "%s"' % query)
cursor.close()
except pg.Error as e:
self.log.error("Connection error: %s" % str(e))
raise ShouldRestartException
def _get_connect_kwargs(self, host, port, user, password, database_url):
"""
Get the params to pass to psycopg2.connect() based on passed-in vals
from yaml settings file
"""
if database_url:
return {'dsn': database_url}
if not host:
raise CheckException(
"Please specify a PgBouncer host to connect to.")
if not user:
raise CheckException(
"Please specify a user to connect to PgBouncer as.")
if host in ('localhost', '127.0.0.1') and password == '':
return { # Use ident method
'dsn': "user={} dbname={}".format(user, self.DB_NAME)
}
if port:
return {'host': host, 'user': user, 'password': password,
'database': self.DB_NAME, 'port': port}
return {'host': host, 'user': user, 'password': password,
'database': self.DB_NAME}
def _get_connection(self, key, host='', port='', user='',
password='', database_url='', use_cached=True):
"Get and memoize connections to instances"
if key in self.dbs and use_cached:
return self.dbs[key]
try:
connect_kwargs = self._get_connect_kwargs(
host=host, port=port, user=user,
password=password, database_url=database_url
)
connection = pg.connect(**connect_kwargs)
connection.set_isolation_level(
pg.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
# re-raise the CheckExceptions raised by _get_connect_kwargs()
except CheckException:
raise
except Exception:
redacted_url = self._get_redacted_dsn(host, port, user, database_url)
message = u'Cannot establish connection to {}'.format(redacted_url)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=self._get_service_checks_tags(host, port, database_url),
message=message)
raise
self.dbs[key] = connection
return connection
def _get_redacted_dsn(self, host, port, user, database_url):
if not database_url:
return u'pgbouncer://%s:******@%s:%s/%s' % (user, host, port, self.DB_NAME)
parsed_url = urlparse.urlparse(database_url)
if parsed_url.password:
return database_url.replace(parsed_url.password, '******')
return database_url
def check(self, instance):
host = instance.get('host', '')
port = instance.get('port', '')
user = instance.get('username', '')
password = instance.get('password', '')
tags = instance.get('tags', [])
database_url = instance.get('database_url')
if database_url:
key = database_url
else:
key = '%s:%s' % (host, port)
if tags is None:
tags = []
else:
tags = list(set(tags))
try:
db = self._get_connection(key, host, port, user, password,
database_url=database_url)
self._collect_stats(db, tags)
except ShouldRestartException:
self.log.info("Resetting the connection")
db = self._get_connection(key, host, port, user, password, use_cached=False)
self._collect_stats(db, tags)
redacted_dsn = self._get_redacted_dsn(host, port, user, database_url)
message = u'Established connection to {}'.format(redacted_dsn)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=self._get_service_checks_tags(host, port, database_url),
message=message)
| |
import collections
import six
from wtforms import Form
try:
from wtforms_sqlalchemy.fields import (
QuerySelectField,
QuerySelectMultipleField
)
HAS_SQLALCHEMY_SUPPORT = True
except ImportError:
try:
from wtforms.ext.sqlalchemy.fields import (
QuerySelectField,
QuerySelectMultipleField
)
HAS_SQLALCHEMY_SUPPORT = True
except ImportError:
HAS_SQLALCHEMY_SUPPORT = False
from wtforms.fields import (
_unset_value,
BooleanField,
Field,
FieldList,
FileField,
FormField,
StringField
)
from wtforms.validators import DataRequired, Optional
__version__ = '0.3.3'
class InvalidData(Exception):
pass
def flatten_json(
form,
json,
parent_key='',
separator='-',
skip_unknown_keys=True
):
"""Flattens given JSON dict to cope with WTForms dict structure.
:form form: WTForms Form object
:param json: json to be converted into flat WTForms style dict
:param parent_key: this argument is used internally be recursive calls
:param separator: default separator
:param skip_unknown_keys:
if True unknown keys will be skipped, if False throws InvalidData
exception whenever unknown key is encountered
Examples::
>>> flatten_json(MyForm, {'a': {'b': 'c'}})
{'a-b': 'c'}
"""
if not isinstance(json, collections.Mapping):
raise InvalidData(
u'This function only accepts dict-like data structures.'
)
items = []
for key, value in json.items():
try:
unbound_field = getattr(form, key)
except AttributeError:
if skip_unknown_keys:
continue
else:
raise InvalidData(u"Unknown field name '%s'." % key)
try:
field_class = unbound_field.field_class
except AttributeError:
if skip_unknown_keys:
continue
else:
raise InvalidData(u"Key '%s' is not valid field class." % key)
new_key = parent_key + separator + key if parent_key else key
if isinstance(value, collections.MutableMapping):
if issubclass(field_class, FormField):
nested_form_class = unbound_field.bind(Form(), '').form_class
items.extend(
flatten_json(nested_form_class, value, new_key)
.items()
)
else:
items.append((new_key, value))
elif isinstance(value, list):
if issubclass(field_class, FieldList):
nested_unbound_field = unbound_field.bind(
Form(),
''
).unbound_field
items.extend(
flatten_json_list(
nested_unbound_field,
value,
new_key,
separator
)
)
else:
items.append((new_key, value))
else:
items.append((new_key, value))
return dict(items)
def flatten_json_list(field, json, parent_key='', separator='-'):
items = []
for i, item in enumerate(json):
new_key = parent_key + separator + str(i)
if (
isinstance(item, dict) and
issubclass(getattr(field, 'field_class'), FormField)
):
nested_class = field.field_class(
*field.args,
**field.kwargs
).bind(Form(), '').form_class
items.extend(
flatten_json(nested_class, item, new_key)
.items()
)
else:
items.append((new_key, item))
return items
@property
def patch_data(self):
if hasattr(self, '_patch_data'):
return self._patch_data
data = {}
def is_optional(field):
return Optional in [v.__class__ for v in field.validators]
def is_required(field):
return DataRequired in [v.__class__ for v in field.validators]
for name, f in six.iteritems(self._fields):
if f.is_missing:
if is_optional(f):
continue
elif not is_required(f) and f.default is None:
continue
elif isinstance(f, FieldList) and f.min_entries == 0:
continue
if isinstance(f, FormField):
data[name] = f.patch_data
elif isinstance(f, FieldList):
if issubclass(f.unbound_field.field_class, FormField):
data[name] = [i.patch_data for i in f.entries]
else:
data[name] = [i.data for i in f.entries]
else:
data[name] = f.data
return data
def monkey_patch_field_process(func):
"""
Monkey patches Field.process method to better understand missing values.
"""
def process(self, formdata, data=_unset_value):
call_original_func = True
if not isinstance(self, FormField):
if formdata and self.name in formdata:
if (
len(formdata.getlist(self.name)) == 1 and
formdata.getlist(self.name) == [None]
):
call_original_func = False
self.data = None
self.is_missing = not bool(formdata.getlist(self.name))
else:
self.is_missing = True
if call_original_func:
func(self, formdata, data=data)
if (
formdata and self.name in formdata and
formdata.getlist(self.name) == [None] and
isinstance(self, FormField)
):
self.form._is_missing = False
self.form._patch_data = None
if isinstance(self, StringField) and not isinstance(self, FileField):
if not self.data:
try:
self.data = self.default()
except TypeError:
self.data = self.default
else:
self.data = six.text_type(self.data)
return process
class MultiDict(dict):
def getlist(self, key):
val = self[key]
if not isinstance(val, list):
val = [val]
return val
def getall(self, key):
return [self[key]]
@classmethod
def from_json(
cls,
formdata=None,
obj=None,
prefix='',
data=None,
meta=None,
skip_unknown_keys=True,
**kwargs
):
form = cls(
formdata=MultiDict(
flatten_json(cls, formdata, skip_unknown_keys=skip_unknown_keys)
) if formdata else None,
obj=obj,
prefix=prefix,
data=data,
meta=meta,
**kwargs
)
return form
@property
def is_missing(self):
if hasattr(self, '_is_missing'):
return self._is_missing
for name, field in self._fields.items():
if not field.is_missing:
return False
return True
@property
def field_list_is_missing(self):
if hasattr(self, '_is_missing'):
return self._is_missing
return all([field.is_missing for field in self.entries])
def monkey_patch_process_formdata(func):
def process_formdata(self, valuelist):
valuelist = list(map(six.text_type, valuelist))
return func(self, valuelist)
return process_formdata
def init():
Form.is_missing = is_missing
FieldList.is_missing = field_list_is_missing
Form.from_json = from_json
Form.patch_data = patch_data
FieldList.patch_data = patch_data
if HAS_SQLALCHEMY_SUPPORT:
QuerySelectField.process_formdata = monkey_patch_process_formdata(
QuerySelectField.process_formdata
)
QuerySelectMultipleField.process_formdata = \
monkey_patch_process_formdata(
QuerySelectMultipleField.process_formdata
)
Field.process = monkey_patch_field_process(Field.process)
FormField.process = monkey_patch_field_process(FormField.process)
BooleanField.false_values = BooleanField.false_values + (False,)
| |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements Cloud ML Model Operations"""
from googleapiclient import discovery
import os
import yaml
import google.datalab as datalab
from . import _util
class Models(object):
"""Represents a list of Cloud ML models for a project."""
def __init__(self, project_id=None):
"""
Args:
project_id: project_id of the models. If not provided, default project_id will be used.
"""
if project_id is None:
project_id = datalab.Context.default().project_id
self._project_id = project_id
self._credentials = datalab.Context.default().credentials
self._api = discovery.build('ml', 'v1', credentials=self._credentials)
def _retrieve_models(self, page_token, page_size):
list_info = self._api.projects().models().list(
parent='projects/' + self._project_id, pageToken=page_token, pageSize=page_size).execute()
models = list_info.get('models', [])
page_token = list_info.get('nextPageToken', None)
return models, page_token
def get_iterator(self):
"""Get iterator of models so it can be used as "for model in Models().get_iterator()".
"""
return iter(datalab.utils.Iterator(self._retrieve_models))
def get_model_details(self, model_name):
"""Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details.
"""
full_name = model_name
if not model_name.startswith('projects/'):
full_name = ('projects/%s/models/%s' % (self._project_id, model_name))
return self._api.projects().models().get(name=full_name).execute()
def create(self, model_name):
"""Create a model.
Args:
model_name: the short name of the model, such as "iris".
Returns:
If successful, returns informaiton of the model, such as
{u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'}
Raises:
If the model creation failed.
"""
body = {'name': model_name}
parent = 'projects/' + self._project_id
# Model creation is instant. If anything goes wrong, Exception will be thrown.
return self._api.projects().models().create(body=body, parent=parent).execute()
def delete(self, model_name):
"""Delete a model.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
"""
full_name = model_name
if not model_name.startswith('projects/'):
full_name = ('projects/%s/models/%s' % (self._project_id, model_name))
response = self._api.projects().models().delete(name=full_name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name'])
def list(self, count=10):
"""List models under the current project in a table view.
Args:
count: upper limit of the number of models to list.
Raises:
Exception if it is called in a non-IPython environment.
"""
import IPython
data = []
# Add range(count) to loop so it will stop either it reaches count, or iteration
# on self is exhausted. "self" is iterable (see __iter__() method).
for _, model in zip(range(count), self.get_iterator()):
element = {'name': model['name']}
if 'defaultVersion' in model:
version_short_name = model['defaultVersion']['name'].split('/')[-1]
element['defaultVersion'] = version_short_name
data.append(element)
IPython.display.display(
datalab.utils.commands.render_dictionary(data, ['name', 'defaultVersion']))
def describe(self, model_name):
"""Print information of a specified model.
Args:
model_name: the name of the model to print details on.
"""
model_yaml = yaml.safe_dump(self.get_model_details(model_name), default_flow_style=False)
print(model_yaml)
class ModelVersions(object):
"""Represents a list of versions for a Cloud ML model."""
def __init__(self, model_name, project_id=None):
"""
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
project_id: project_id of the models. If not provided and model_name is not a full name
(not including project_id), default project_id will be used.
"""
if project_id is None:
self._project_id = datalab.Context.default().project_id
self._credentials = datalab.Context.default().credentials
self._api = discovery.build('ml', 'v1', credentials=self._credentials)
if not model_name.startswith('projects/'):
model_name = ('projects/%s/models/%s' % (self._project_id, model_name))
self._full_model_name = model_name
self._model_name = self._full_model_name.split('/')[-1]
def _retrieve_versions(self, page_token, page_size):
parent = self._full_model_name
list_info = self._api.projects().models().versions().list(parent=parent,
pageToken=page_token,
pageSize=page_size).execute()
versions = list_info.get('versions', [])
page_token = list_info.get('nextPageToken', None)
return versions, page_token
def get_iterator(self):
"""Get iterator of versions so it can be used as
"for v in ModelVersions(model_name).get_iterator()".
"""
return iter(datalab.utils.Iterator(self._retrieve_versions))
def get_version_details(self, version_name):
"""Get details of a version.
Args:
version: the name of the version in short form, such as "v1".
Returns: a dictionary containing the version details.
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
return self._api.projects().models().versions().get(name=name).execute()
def deploy(self, version_name, path, runtime_version=None):
"""Deploy a model version to the cloud.
Args:
version_name: the name of the version in short form, such as "v1".
path: the Google Cloud Storage path (gs://...) which contains the model files.
runtime_version: the ML Engine runtime version as a string, example '1.2'.
See https://cloud.google.com/ml-engine/docs/concepts/runtime-version-list
for a list of runtimes. If None, the ML Engine service will pick one.
Raises: Exception if the path is invalid or does not contain expected files.
Exception if the service returns invalid response.
"""
if not path.startswith('gs://'):
raise Exception('Invalid path. Only Google Cloud Storage path (gs://...) is accepted.')
# If there is no "export.meta" or"saved_model.pb" under path but there is
# path/model/export.meta or path/model/saved_model.pb, then append /model to the path.
if not datalab.storage.Object.from_url(os.path.join(path, 'export.meta')).exists() and not \
datalab.storage.Object.from_url(os.path.join(path, 'saved_model.pb')).exists():
if datalab.storage.Object.from_url(os.path.join(path, 'model', 'export.meta')).exists() or \
datalab.storage.Object.from_url(os.path.join(path, 'model',
'saved_model.pb')).exists():
path = os.path.join(path, 'model')
else:
print('Cannot find export.meta or saved_model.pb, but continue with deployment anyway.')
body = {'name': self._model_name}
parent = 'projects/' + self._project_id
try:
self._api.projects().models().create(body=body, parent=parent).execute()
except:
# Trying to create an already existing model gets an error. Ignore it.
pass
body = {
'name': version_name,
'deployment_uri': path,
}
if runtime_version:
body['runtime_version'] = runtime_version
response = self._api.projects().models().versions().create(
body=body, parent=self._full_model_name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name'])
def delete(self, version_name):
"""Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1".
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
response = self._api.projects().models().versions().delete(name=name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name'])
def predict(self, version_name, data):
"""Get prediction results from features instances.
Args:
version_name: the name of the version used for prediction.
data: typically a list of instance to be submitted for prediction. The format of the
instance depends on the model. For example, structured data model may require
a csv line for each instance.
Note that online prediction only works on models that take one placeholder value,
such as a string encoding a csv line.
Returns:
A list of prediction results for given instances. Each element is a dictionary representing
output mapping from the graph.
An example:
[{"predictions": 1, "score": [0.00078, 0.71406, 0.28515]},
{"predictions": 1, "score": [0.00244, 0.99634, 0.00121]}]
"""
full_version_name = ('%s/versions/%s' % (self._full_model_name, version_name))
request = self._api.projects().predict(body={'instances': data},
name=full_version_name)
request.headers['user-agent'] = 'GoogleCloudDataLab/1.0'
result = request.execute()
if 'predictions' not in result:
raise Exception('Invalid response from service. Cannot find "predictions" in response.')
return result['predictions']
def describe(self, version_name):
"""Print information of a specified model.
Args:
version: the name of the version in short form, such as "v1".
"""
version_yaml = yaml.safe_dump(self.get_version_details(version_name),
default_flow_style=False)
print(version_yaml)
def list(self):
"""List versions under the current model in a table view.
Raises:
Exception if it is called in a non-IPython environment.
"""
import IPython
# "self" is iterable (see __iter__() method).
data = [{'name': version['name'].split()[-1],
'deploymentUri': version['deploymentUri'], 'createTime': version['createTime']}
for version in self.get_iterator()]
IPython.display.display(
datalab.utils.commands.render_dictionary(data, ['name', 'deploymentUri', 'createTime']))
| |
# Copyright (C) 2013-2014 Red Hat, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
#
# Author: Peter Schiffer <pschiffe@redhat.com>
#
"""
LMI system client library.
"""
from sys import stdout
from lmi.scripts.service import get_service
from lmi.scripts.common import get_computer_system
from lmi.shell.LMIExceptions import LMIClassNotFound
from lmi.scripts.common.formatter import TableFormatter
GREEN_COLOR = 1
YELLOW_COLOR = 2
RED_COLOR = 3
FIRST_COLUMN_MIN_SIZE = 17
def _cache_replies(ns, class_name, method):
"""
Get the reply from cimom and cache it. Cache is cleared
once the namespace object changes.
:param str class_name: Name of class to operate on.
:param str method_name: Name of method to invoke on lmi class object.
:returns: Whatever the requested method returns.
"""
if not hasattr(_cache_replies, 'cache'):
_cache_replies.cache = (ns, {})
old_ns, cache = _cache_replies.cache
if old_ns is not ns:
# keep the cache until namespace object changes
cache.clear()
_cache_replies.cache = (ns, cache)
if not (class_name, method) in cache:
i = getattr(ns, class_name)
cache[(class_name, method)] = getattr(i, method)()
return cache[(class_name, method)]
def get_single_instance(ns, class_name):
"""
Returns single instance of instance_name.
:param instance_name: Instance name
:type instance_name: String
:returns: Instance of instance_name
:rtype: :py:class:`lmi.shell.LMIInstance`
"""
return _cache_replies(ns, class_name, 'first_instance')
def get_all_instances(ns, class_name):
"""
Returns all instances of instance_name.
:param instance_name: Instance name
:type instance_name: String
:returns: List of instances of instance_name
:rtype: List of :py:class:`lmi.shell.LMIInstance`
"""
return _cache_replies(ns, class_name, 'instances')
def get_hostname(ns):
"""
:returns: System hostname.
:rtype: String
"""
i = get_computer_system(ns)
return i.Name
def format_memory_size(size):
"""
Returns formatted memory size.
:param size: Size in bytes
:type size: Number
:returns: Formatted size string.
:rtype: String
"""
if not size:
return 'N/A GB'
if size >= 1099511627776:
sizestr = '%.1f TB' % (float(size) / 1099511627776.0)
elif size >= 1073741824:
sizestr = '%.1f GB' % (float(size) / 1073741824.0)
elif size >= 1048576:
sizestr = '%d MB' % (int(size) / 1048576)
elif size >= 1024:
sizestr = '%d KB' % (int(size) / 1024)
else:
sizestr = '%d B' % int(size)
return sizestr
def get_colored_string(msg, color):
"""
Returns colored message with ANSI escape sequences for terminal.
:param msg: Message to be colored.
:type msg: String
:param color: Color of the message [GREEN_COLOR, YELLOW_COLOR, RED_COLOR].
:type color: Integer
:returns: Colored message.
:rtype: String
"""
if not stdout.isatty():
return msg
colors = {
GREEN_COLOR: '\033[92m',
YELLOW_COLOR: '\033[93m',
RED_COLOR: '\033[91m',}
ENDC = '\033[0m'
return '%s%s%s' % (colors[color], msg, ENDC)
def get_system_info(ns):
"""
Prints tabular data of all general system information.
"""
tf = TableFormatter(stdout, 0, True)
tf.print_host(get_hostname(ns))
try:
get_hwinfo(ns)
except Exception as e:
tf.produce_output([(get_colored_string('error:', RED_COLOR), str(e))])
try:
get_storageinfo(ns)
except Exception as e:
tf.produce_output([(get_colored_string('error:', RED_COLOR), str(e))])
try:
get_osinfo(ns)
except Exception as e:
tf.produce_output([(get_colored_string('error:', RED_COLOR), str(e))])
try:
get_langinfo(ns)
except Exception as e:
tf.produce_output([(get_colored_string('error:', RED_COLOR), str(e))])
try:
get_selinuxinfo(ns)
except Exception as e:
tf.produce_output([(get_colored_string('error:', RED_COLOR), str(e))])
try:
get_servicesinfo(ns)
except Exception as e:
tf.produce_output([(get_colored_string('error:', RED_COLOR), str(e))])
try:
get_networkinfo(ns)
except Exception as e:
tf.produce_output([(get_colored_string('error:', RED_COLOR), str(e))])
return []
def get_hwinfo(ns):
"""
Prints tabular data of system hw info.
"""
tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})
# Chassis
try:
chassis = get_single_instance(ns, 'LMI_Chassis')
except Exception:
result = [(get_colored_string('error:', RED_COLOR),
'Missing class LMI_Chassis. Is openlmi-hardware package installed on the server?')]
tf.produce_output(result)
return []
hwinfo = chassis.Manufacturer
if chassis.Model and chassis.Model != 'Not Specified' \
and chassis.Model != chassis.Manufacturer:
hwinfo += ' ' + chassis.Model
elif chassis.ProductName and chassis.ProductName != 'Not Specified' \
and chassis.ProductName != chassis.Manufacturer:
hwinfo += ' ' + chassis.ProductName
virt = getattr(chassis, 'VirtualMachine', None)
if virt and virt != 'No':
hwinfo += ' (%s virtual machine)' % virt
chassis_res = [
('Hardware:', hwinfo),
('Serial Number:', chassis.SerialNumber),
('Asset Tag:', chassis.Tag)]
tf.produce_output(chassis_res)
# CPUs
try:
cpus = get_all_instances(ns, 'LMI_Processor')
cpu_caps = get_all_instances(ns, 'LMI_ProcessorCapabilities')
except Exception:
cpus = None
cpu_caps = None
if cpus and cpu_caps:
cores = 0
threads = 0
for i in cpu_caps:
cores += i.NumberOfProcessorCores
threads += i.NumberOfHardwareThreads
cpus_res = [
('CPU:', '%s, %s arch' % (cpus[0].Name, cpus[0].Architecture)),
('CPU Topology:', '%d cpu(s), %d core(s), %d thread(s)' % \
(len(cpus), cores, threads))]
else:
cpus_res = [('CPU:', 'N/A')]
tf.produce_output(cpus_res)
# Memory
try:
memory = get_single_instance(ns, 'LMI_Memory')
except Exception:
memory = None
if memory:
memory_size = format_memory_size(memory.NumberOfBlocks)
else:
memory_size = 'N/A GB'
tf.produce_output([('Memory:', memory_size)])
return []
def get_storageinfo(ns):
"""
Prints tabular data of storage info.
"""
tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})
try:
localfss = get_all_instances(ns, 'LMI_LocalFileSystem')
except Exception:
result = [(get_colored_string('error:', RED_COLOR),
'Missing class LMI_LocalFileSystem. Is openlmi-storage package installed on the server?')]
tf.produce_output(result)
return []
total = 0
free = 0
for fs in localfss:
if fs.FileSystemSize:
total += fs.FileSystemSize
if fs.AvailableSpace:
free += fs.AvailableSpace
result = [('Disk Space:', '%s total, %s free' % (format_memory_size(total),
format_memory_size(free)))]
tf.produce_output(result)
return []
def get_osinfo(ns):
"""
Prints tabular data of system OS info.
"""
tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})
try:
os = get_single_instance(ns, 'PG_OperatingSystem')
except Exception:
result = [(get_colored_string('error:', RED_COLOR),
'Missing class PG_OperatingSystem on the server.')]
tf.produce_output(result)
return []
os_str = ''
kernel_str = ''
if os:
os_str = os.Caption
kernel_str = os.Version
if not os_str:
os_str = 'N/A'
if not kernel_str:
kernel_str = 'N/A'
result = [
('OS:', os_str),
('Kernel:', kernel_str)]
tf.produce_output(result)
return []
def get_langinfo(ns):
"""
Prints tabular data of language and time zone info.
"""
tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})
try:
locale = get_single_instance(ns, 'LMI_Locale')
except Exception:
result = [(get_colored_string('error:', RED_COLOR),
'Missing class LMI_Locale. Is openlmi-locale package installed on the server?')]
tf.produce_output(result)
return []
tf.produce_output([('Language:', locale.Lang)])
try:
tf.produce_output([('Time Zone:',
"%s (NTP is %s)" % (locale.Timezone, 'on' if locale.NTP else 'off'))])
except Exception:
# Time Zone info was added later to the LMI_Locale class
pass
return []
def get_selinuxinfo(ns):
"""
Prints tabular data of SELinux info.
"""
tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})
try:
selinux = get_single_instance(ns, 'LMI_SELinuxService')
except Exception:
result = [(get_colored_string('error:', RED_COLOR),
'Missing class LMI_SELinuxService. Is openlmi-selinux package installed on the server?')]
tf.produce_output(result)
return []
if selinux.SELinuxState == 0:
selinux_str = 'disabled'
else:
selinux_str = 'enabled (%s)' % \
ns.LMI_SELinuxService.SELinuxStateValues.value_name(selinux.SELinuxState).lower()
tf.produce_output([('SELinux:', selinux_str)])
return []
def get_servicesinfo(ns):
"""
Prints tabular data of some system services.
"""
tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})
# Firewall
try:
fw = ''
firewalld = get_service(ns, 'firewalld')
if firewalld and firewalld.Status == 'OK':
fw = 'on (firewalld)'
else:
iptables = get_service(ns, 'iptables')
if iptables and iptables.Status == 'OK':
fw = 'on (iptables)'
if not fw:
fw = 'off'
except Exception:
fw = 'N/A'
tf.produce_output([('Firewall:', fw)])
# Logging
try:
logging = ''
journald = get_service(ns, 'systemd-journald')
if journald and journald.Status == 'OK':
logging = 'on (journald)'
else:
rsyslog = get_service(ns, 'rsyslog')
if rsyslog and rsyslog.Status == 'OK':
logging = 'on (rsyslog)'
if not logging:
logging = 'off'
except Exception:
logging = 'N/A'
tf.produce_output([('Logging:', logging)])
return []
def get_networkinfo(ns):
"""
Prints tabular data of networking status.
"""
tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})
result = [('', ''), ('Networking:', '')]
try:
lan_endpoints = get_all_instances(ns, 'LMI_LANEndpoint')
except Exception:
result += [(get_colored_string('error:', RED_COLOR),
'Missing class LMI_LANEndpoint. Is openlmi-networking package installed on the server?')]
tf.produce_output(result)
return []
nic = 1
for lan_endpoint in lan_endpoints:
if lan_endpoint.Name == 'lo':
continue
result += [
(' NIC %d' % nic, ''),
(' Name:', lan_endpoint.Name)]
try:
ip_net_con = lan_endpoint.associators(
AssocClass='LMI_EndpointForIPNetworkConnection',
ResultClass='LMI_IPNetworkConnection')[0]
result += [(' Status:',
ns.LMI_IPNetworkConnection.OperatingStatusValues.value_name(
ip_net_con.OperatingStatus))]
except Exception:
pass
try:
for ip_protocol_endpoint in lan_endpoint.associators(
AssocClass='LMI_BindsToLANEndpoint',
ResultClass='LMI_IPProtocolEndpoint'):
if ip_protocol_endpoint.ProtocolIFType == \
ns.LMI_IPProtocolEndpoint.ProtocolIFTypeValues.IPv4:
result += [(' IPv4 Address:',
ip_protocol_endpoint.IPv4Address)]
elif ip_protocol_endpoint.ProtocolIFType == \
ns.LMI_IPProtocolEndpoint.ProtocolIFTypeValues.IPv6:
result += [(' IPv6 Address:',
ip_protocol_endpoint.IPv6Address)]
except Exception:
pass
result += [
(' MAC Address:', lan_endpoint.MACAddress)]
tf.produce_output(result)
result = []
nic += 1
return []
| |
_DISABLE_ALL_LOGS = True
##----------------------------------------------------------------------------------------------------------------------
##----------------------------------------------------------------------------------------------------------------------
##----------------------------------------------------------------------------------------------------------------------
##----------------------------------------------------------------------------------------------------------------------
#_DISABLE_LOG_CLASS_HAZARD = _DISABLE_ALL_LOGS or False
_DISABLE_LOG_CLASS_HAZARD = True
#_DISABLE_LOG_CLASS_V = _DISABLE_ALL_LOGS or False
_DISABLE_LOG_CLASS_V = False
_DISABLE_LOG_CLASS_VV = _DISABLE_ALL_LOGS or False
_DISABLE_LOG_CLASS_VVV = _DISABLE_ALL_LOGS or False
_DISABLE_LOG_CLASS_VVVV = _DISABLE_ALL_LOGS or True
_DISABLE_LOG_CLASS_FEFR = _DISABLE_ALL_LOGS or False
_DISABLE_LOG_CLASS_FEFRV = _DISABLE_ALL_LOGS or True
_DISABLE_LOG_CLASS_FP = _DISABLE_ALL_LOGS or True
_DISABLE_LOG_CLASS_DAO = _DISABLE_ALL_LOGS or True
_DISABLE_LOG_CLASS_DAOV = _DISABLE_ALL_LOGS or True
# CM and CMV are logs for crypt manager module.
_DISABLE_LOG_CLASS_CM = _DISABLE_ALL_LOGS or True
_DISABLE_LOG_CLASS_CMV = _DISABLE_ALL_LOGS or True
# CLI and CLIV are logs for command line UI
_DISABLE_LOG_CLASS_CLI = _DISABLE_ALL_LOGS or False
_DISABLE_LOG_CLASS_CLIV = _DISABLE_ALL_LOGS or True
# UVSMGR and UVSMGRV are logs for command line UI
_DISABLE_LOG_CLASS_UVSMGR = _DISABLE_ALL_LOGS or False
_DISABLE_LOG_CLASS_UVSMGRV = _DISABLE_ALL_LOGS or True
# history DAG related msgs
_DISABLE_LOG_CLASS_DAG = _DISABLE_ALL_LOGS or False
_DISABLE_LOG_CLASS_DAGV = _DISABLE_ALL_LOGS or True
# history DAG related msgs
_DISABLE_LOG_CLASS_AMS = _DISABLE_ALL_LOGS or False
#_DISABLE_LOG_CLASS_AMS = False
_DISABLE_LOG_CLASS_AMSV = _DISABLE_ALL_LOGS or True
_DISABLE_LOG_CLASS_GRPH = False
_DISABLE_LOG_CLASS_GRPHV = _DISABLE_ALL_LOGS or True
# the 1 appears to make it bold, color is after that
term_red = "\033[1;31m"
term_light_red = "\033[0;91m"
term_green = "\033[0;32m"
term_light_green = "\033[5;49;92m"
term_yellow = "\033[1;33m"
term_light_yellow = "\033[0;93m"
term_light_blue = "\033[0;94m"
term_light_cyan = "\033[0;96m"
term_blue = "\033[1;34m"
term_purple = "\033[0;35m"
term_cyan = "\033[1;36m"
term_white_bold = "\033[1;38m"
term_reset = "\033[0;0m"
term_bold = "\033[;1m"
term_reverse = "\033[;7m"
term_warning = '\033[93m'
term_fail = '\033[91m'
term_endc = '\033[0m'
term_underline = '\033[4m'
#-----------------------------------------------------------------------------------------------------------------------
def fefrv(msg, label=True):
""" Print log msgs for "function entry, function return verified" category. """
if _DISABLE_LOG_CLASS_FEFRV:
return
final_msg = None
if label:
final_msg = 'fefrv: ' + str(msg)
else:
final_msg = str(msg)
print final_msg
#-----------------------------------------------------------------------------------------------------------------------
def fefr(msg, label=True):
""" Print log msgs for "function entry, function return" category. """
if _DISABLE_LOG_CLASS_FEFR:
return
final_msg = None
if label:
final_msg = 'fefr: ' + str(msg)
else:
final_msg = str(msg)
print final_msg
#-----------------------------------------------------------------------------------------------------------------------
def vvvv(msg, label=True):
""" print log msgs that are in "triple verbose and verified" category. """
if _DISABLE_LOG_CLASS_VVVV:
return
final_msg = None
if label:
final_msg = 'vvvv: ' + str(msg)
else:
final_msg = str(msg)
print final_msg
#-----------------------------------------------------------------------------------------------------------------------
def vvv(msg, label=True):
""" print log msgs that are in "triple verbose" category. """
if _DISABLE_LOG_CLASS_VVV:
return
final_msg = None
if label:
final_msg = 'vvv: ' + str(msg)
else:
final_msg = str(msg)
print final_msg
#-----------------------------------------------------------------------------------------------------------------------
def vv(msg, label=True):
""" print log msgs that are in "double verbose" category. """
if _DISABLE_LOG_CLASS_VV:
return
final_msg = None
if label:
final_msg = 'vv: ' + str(msg)
else:
final_msg = str(msg)
print final_msg
#-----------------------------------------------------------------------------------------------------------------------
def v(msg, label=True):
""" print log msgs that are in "single verbose" category. """
if _DISABLE_LOG_CLASS_V:
return
final_msg = None
if label:
final_msg = 'v: ' + str(msg)
else:
final_msg = str(msg)
print term_cyan + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def fp(msg, label=True):
""" print log msgs related to objects and their fingerprints . """
if _DISABLE_LOG_CLASS_FP:
return
final_msg = None
if label:
final_msg = 'fp: ' + str(msg)
else:
final_msg = str(msg)
print term_light_yellow + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def dao(msg, label=True):
""" print log msgs belonging to the data access objects. """
if _DISABLE_LOG_CLASS_DAO:
return
final_msg = None
if label:
final_msg = 'dao: ' + str(msg)
else:
final_msg = str(msg)
print term_green + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def daov(msg, label=True):
""" print the verbose log msgs belonging to the data access objects. """
if _DISABLE_LOG_CLASS_DAOV:
return
final_msg = None
if label:
final_msg = 'dao verbose: ' + str(msg)
else:
final_msg = str(msg)
print term_green + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def cmv(msg, label=True):
""" print the verbose log msgs belonging to the crypt manager. """
if _DISABLE_LOG_CLASS_CMV:
return
final_msg = None
if label:
final_msg = 'cm verbose: ' + str(msg)
else:
final_msg = str(msg)
print term_light_red + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def cm(msg, label=True):
""" print the log msgs belonging to the crypt manager. """
if _DISABLE_LOG_CLASS_CM:
return
final_msg = None
if label:
final_msg = 'cm: ' + str(msg)
else:
final_msg = str(msg)
print term_light_red + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def hazard(msg, label=True):
""" print log msgs that are in "hazardous" category. These msgs should not be printed in a production build. """
if _DISABLE_LOG_CLASS_HAZARD:
return
final_msg = None
if label:
final_msg = '***** hazardous log: ' + str(msg)
else:
final_msg = str(msg)
print term_red + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def cli(msg, label=True):
""" print the log msgs belonging to the cmd line interface . """
if _DISABLE_LOG_CLASS_CLI:
return
final_msg = None
if label:
final_msg = 'cli: ' + str(msg)
else:
final_msg = str(msg)
print final_msg
#-----------------------------------------------------------------------------------------------------------------------
def cliv(msg, label=True):
""" print the log msgs belonging to the cmd line interface verbose category . """
if _DISABLE_LOG_CLASS_CLIV:
return
final_msg = None
if label:
final_msg = 'cli verbose: ' + str(msg)
else:
final_msg = str(msg)
print final_msg
#-----------------------------------------------------------------------------------------------------------------------
def uvsmgr(msg, label=True):
""" print the log msgs belonging to the uvs manager. """
if _DISABLE_LOG_CLASS_UVSMGR:
return
final_msg = None
if label:
final_msg = 'uvs manager: ' + str(msg)
else:
final_msg = str(msg)
print term_light_blue + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def uvsmgrv(msg, label=True):
""" print the log msgs belonging to the uvs manager verbose category . """
if _DISABLE_LOG_CLASS_UVSMGRV:
return
final_msg = None
if label:
final_msg = 'uvs manager verbose: ' + str(msg)
else:
final_msg = str(msg)
print term_light_blue + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def dag(msg, label=True):
""" print DAG related log msgs. """
if _DISABLE_LOG_CLASS_DAG:
return
final_msg = None
if label:
final_msg = 'DAG: ' + str(msg)
else:
final_msg = str(msg)
print term_light_green + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def dagv(msg, label=True):
""" print the verbose DAG related log msgs. """
if _DISABLE_LOG_CLASS_DAGV:
return
final_msg = None
if label:
final_msg = 'DAG verbose: ' + str(msg)
else:
final_msg = str(msg)
print term_light_green + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def ams(msg, label=True):
""" print Auto Merge Service related log msgs. """
if _DISABLE_LOG_CLASS_AMS:
return
final_msg = None
if label:
final_msg = 'AMS: ' + str(msg)
else:
final_msg = str(msg)
print term_light_green + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def amsv(msg, label=True):
""" print the verbose Auto Merge Service related log msgs. """
if _DISABLE_LOG_CLASS_AMSV:
return
final_msg = None
if label:
final_msg = 'AMSV verbose: ' + str(msg)
else:
final_msg = str(msg)
print term_light_green + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def grph(msg, label=True):
""" print graph util related log msgs. """
if _DISABLE_LOG_CLASS_GRPH:
return
final_msg = None
if label:
final_msg = 'GRPH: ' + str(msg)
else:
final_msg = str(msg)
print term_light_yellow + final_msg + term_reset
#-----------------------------------------------------------------------------------------------------------------------
def grphv(msg, label=True):
""" print the verbose graph util related log msgs. """
if _DISABLE_LOG_CLASS_GRPHV:
return
final_msg = None
if label:
final_msg = 'GRPH verbose: ' + str(msg)
else:
final_msg = str(msg)
print term_light_yellow + final_msg + term_reset
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from keystoneclient.auth import token_endpoint
from keystoneclient import exceptions as keystone_exceptions
import mock
from oslo_config import cfg
from testtools import matchers
from rally.common import objects
from rally import consts
from rally import exceptions
from rally import osclients
from tests.unit import fakes
from tests.unit import test
@osclients.configure("dummy")
class DummyClient(osclients.OSClient):
def create_client(self, *args, **kwargs):
pass
class OSClientTestCase(test.TestCase):
def test_choose_service_type(self):
default_service_type = "default_service_type"
@osclients.configure("test_choose_service_type",
default_service_type=default_service_type)
class FakeClient(osclients.OSClient):
create_client = mock.MagicMock()
fake_client = FakeClient(mock.MagicMock(), {}, {})
self.assertEqual(default_service_type,
fake_client.choose_service_type())
self.assertEqual("foo",
fake_client.choose_service_type("foo"))
class CachedTestCase(test.TestCase):
def test_cached(self):
clients = osclients.Clients(mock.MagicMock())
client_name = "CachedTestCase.test_cached"
fake_client = osclients.configure(client_name)(
osclients.OSClient(clients.credential, clients.api_info,
clients.cache))
fake_client.create_client = mock.MagicMock()
self.assertEqual({}, clients.cache)
fake_client()
self.assertEqual(
{client_name: fake_client.create_client.return_value},
clients.cache)
fake_client.create_client.assert_called_once_with()
fake_client()
fake_client.create_client.assert_called_once_with()
fake_client("2")
self.assertEqual(
{client_name: fake_client.create_client.return_value,
"%s('2',)" % client_name: fake_client.create_client.return_value},
clients.cache)
clients.clear()
self.assertEqual({}, clients.cache)
class TestCreateKeystoneClient(test.TestCase):
def make_auth_args(self):
auth_kwargs = {
"auth_url": "http://auth_url", "username": "user",
"password": "password", "tenant_name": "tenant",
"domain_name": "domain", "project_name": "project_name",
"project_domain_name": "project_domain_name",
"user_domain_name": "user_domain_name",
}
kwargs = {"https_insecure": False, "https_cacert": None}
kwargs.update(auth_kwargs)
return auth_kwargs, kwargs
def set_up_keystone_mocks(self):
self.ksc_module = mock.MagicMock()
self.ksc_client = mock.MagicMock()
self.ksc_identity = mock.MagicMock()
self.ksc_password = mock.MagicMock()
self.ksc_session = mock.MagicMock()
self.ksc_auth = mock.MagicMock()
self.patcher = mock.patch.dict("sys.modules",
{"keystoneclient": self.ksc_module,
"keystoneclient.auth": self.ksc_auth})
self.patcher.start()
self.addCleanup(self.patcher.stop)
self.ksc_module.client = self.ksc_client
self.ksc_auth.identity = self.ksc_identity
self.ksc_auth.identity.Password = self.ksc_password
self.ksc_module.session = self.ksc_session
def test_create_keystone_client(self):
# NOTE(bigjools): This is a very poor testing strategy as it
# tightly couples the test implementation to the tested
# function's implementation. Ideally, we'd use a fake keystone
# but all that's happening here is that it's checking the right
# parameters were passed to the various parts that create a
# client. Hopefully one day we'll get a real fake from the
# keystone guys.
self.set_up_keystone_mocks()
auth_kwargs, all_kwargs = self.make_auth_args()
keystone = osclients.Keystone(
mock.MagicMock(), mock.sentinel, mock.sentinel)
client = keystone._create_keystone_client(all_kwargs)
self.ksc_password.assert_called_once_with(**auth_kwargs)
self.ksc_session.Session.assert_called_once_with(
auth=self.ksc_identity.Password(), timeout=mock.ANY,
verify=mock.ANY)
self.ksc_client.Client.assert_called_once_with(
version=None, **all_kwargs)
self.assertIs(client, self.ksc_client.Client())
def test_client_is_pre_authed(self):
# The client needs to be pre-authed so that service_catalog
# works. This is because when using sessions, lazy auth is done
# in keystoneclient.
self.set_up_keystone_mocks()
_, all_kwargs = self.make_auth_args()
keystone = osclients.Keystone(
mock.MagicMock(), mock.sentinel, mock.sentinel)
client = keystone._create_keystone_client(all_kwargs)
auth_ref = getattr(client, "auth_ref", None)
self.assertIsNot(auth_ref, None)
self.ksc_client.Client.assert_called_once_with(
version=None, **all_kwargs)
self.assertIs(client, self.ksc_client.Client())
def test_create_client_removes_url_path_if_version_specified(self):
# If specifying a version on the client creation call, ensure
# the auth_url is versionless and the version required is passed
# into the Client() call.
self.set_up_keystone_mocks()
auth_kwargs, all_kwargs = self.make_auth_args()
credential = objects.Credential(
"http://auth_url/v2.0", "user", "pass", "tenant")
keystone = osclients.Keystone(
credential, {}, mock.MagicMock())
client = keystone.create_client(version="3")
self.assertIs(client, self.ksc_client.Client())
called_with = self.ksc_client.Client.call_args_list[0][1]
self.expectThat(
called_with["auth_url"], matchers.Equals("http://auth_url/"))
self.expectThat(called_with["version"], matchers.Equals("3"))
def test_create_keystone_client_with_v2_url_omits_domain(self):
# NOTE(bigjools): Test that domain-related info is not present
# when forcing a v2 URL, because it breaks keystoneclient's
# service discovery.
self.set_up_keystone_mocks()
auth_kwargs, all_kwargs = self.make_auth_args()
all_kwargs["auth_url"] = "http://auth_url/v2.0"
auth_kwargs["auth_url"] = all_kwargs["auth_url"]
keystone = osclients.Keystone(
mock.MagicMock(), mock.sentinel, mock.sentinel)
client = keystone._create_keystone_client(all_kwargs)
auth_kwargs.pop("user_domain_name")
auth_kwargs.pop("project_domain_name")
auth_kwargs.pop("domain_name")
self.ksc_password.assert_called_once_with(**auth_kwargs)
self.ksc_session.Session.assert_called_once_with(
auth=self.ksc_identity.Password(), timeout=mock.ANY,
verify=mock.ANY)
self.ksc_client.Client.assert_called_once_with(
version=None, **all_kwargs)
self.assertIs(client, self.ksc_client.Client())
def test_create_keystone_client_with_v2_version_omits_domain(self):
self.set_up_keystone_mocks()
auth_kwargs, all_kwargs = self.make_auth_args()
all_kwargs["auth_url"] = "http://auth_url/"
auth_kwargs["auth_url"] = all_kwargs["auth_url"]
keystone = osclients.Keystone(
mock.MagicMock(), mock.sentinel, mock.sentinel)
client = keystone._create_keystone_client(all_kwargs, version="2")
auth_kwargs.pop("user_domain_name")
auth_kwargs.pop("project_domain_name")
auth_kwargs.pop("domain_name")
self.ksc_password.assert_called_once_with(**auth_kwargs)
self.ksc_session.Session.assert_called_once_with(
auth=self.ksc_identity.Password(), timeout=mock.ANY,
verify=mock.ANY)
self.ksc_client.Client.assert_called_once_with(
version="2", **all_kwargs)
self.assertIs(client, self.ksc_client.Client())
@ddt.ddt
class OSClientsTestCase(test.TestCase):
def setUp(self):
super(OSClientsTestCase, self).setUp()
self.credential = objects.Credential("http://auth_url/v2.0", "use",
"pass", "tenant")
self.clients = osclients.Clients(self.credential, {})
self.fake_keystone = fakes.FakeKeystoneClient()
self.fake_keystone.auth_token = mock.MagicMock()
self.service_catalog = self.fake_keystone.service_catalog
self.service_catalog.url_for = mock.MagicMock()
keystone_patcher = mock.patch(
"rally.osclients.Keystone._create_keystone_client")
self.mock_create_keystone_client = keystone_patcher.start()
self.addCleanup(keystone_patcher.stop)
self.mock_create_keystone_client.return_value = self.fake_keystone
def tearDown(self):
super(OSClientsTestCase, self).tearDown()
def test_create_from_env(self):
with mock.patch.dict("os.environ",
{"OS_AUTH_URL": "foo_auth_url",
"OS_USERNAME": "foo_username",
"OS_PASSWORD": "foo_password",
"OS_TENANT_NAME": "foo_tenant_name",
"OS_REGION_NAME": "foo_region_name"}):
clients = osclients.Clients.create_from_env()
self.assertEqual("foo_auth_url", clients.credential.auth_url)
self.assertEqual("foo_username", clients.credential.username)
self.assertEqual("foo_password", clients.credential.password)
self.assertEqual("foo_tenant_name", clients.credential.tenant_name)
self.assertEqual("foo_region_name", clients.credential.region_name)
@mock.patch.object(DummyClient, "_get_endpoint")
@mock.patch("keystoneclient.session.Session")
def test_get_session(self, mock_session, mock_dummy_client__get_endpoint):
# Use DummyClient since if not the abc meta kicks in
osc = DummyClient(self.credential, {}, {})
with mock.patch.object(token_endpoint, "Token") as token:
osc._get_session()
token.assert_called_once_with(
mock_dummy_client__get_endpoint.return_value,
self.fake_keystone.auth_token
)
mock_session.assert_called_once_with(
auth=token.return_value, verify=not self.credential.insecure,
timeout=cfg.CONF.openstack_client_http_timeout)
@mock.patch.object(DummyClient, "_get_endpoint")
@mock.patch("keystoneclient.session.Session")
def test_get_session_with_endpoint(
self, mock_session, mock_dummy_client__get_endpoint):
# Use DummyClient since if not the abc meta kicks in
osc = DummyClient(self.credential, {}, {})
fake_endpoint = mock.Mock()
with mock.patch.object(token_endpoint, "Token") as token:
osc._get_session(endpoint=fake_endpoint)
self.assertFalse(mock_dummy_client__get_endpoint.called)
token.assert_called_once_with(
fake_endpoint,
self.fake_keystone.auth_token
)
mock_session.assert_called_once_with(
auth=token.return_value, verify=not self.credential.insecure,
timeout=cfg.CONF.openstack_client_http_timeout)
@mock.patch("keystoneclient.session.Session")
def test_get_session_with_auth(self, mock_session):
# Use DummyClient since if not the abc meta kicks in
osc = DummyClient(self.credential, {}, {})
fake_auth = mock.Mock()
osc._get_session(auth=fake_auth)
mock_session.assert_called_once_with(
auth=fake_auth, verify=not self.credential.insecure,
timeout=cfg.CONF.openstack_client_http_timeout)
@mock.patch("keystoneclient.session.Session")
def test_get_session_with_ca(self, mock_session):
# Use DummyClient since if not the abc meta kicks in
osc = DummyClient(self.credential, {}, {})
self.credential.cacert = "/fake/ca"
fake_auth = mock.Mock()
osc._get_session(auth=fake_auth)
mock_session.assert_called_once_with(
auth=fake_auth, verify="/fake/ca",
timeout=cfg.CONF.openstack_client_http_timeout)
def test_keystone(self):
self.assertNotIn("keystone", self.clients.cache)
client = self.clients.keystone()
self.assertEqual(client, self.fake_keystone)
credential = {"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": False, "cacert": None}
kwargs = self.credential.to_dict()
kwargs.update(credential.items())
self.mock_create_keystone_client.assert_called_once_with(
kwargs, version=None)
self.assertEqual(self.fake_keystone, self.clients.cache["keystone"])
@mock.patch("rally.osclients.Keystone.create_client")
def test_verified_keystone_user_not_admin(self,
mock_keystone_create_client):
# naming rule for mocks sucks
mock_keystone = mock_keystone_create_client
mock_keystone.return_value = fakes.FakeKeystoneClient()
mock_keystone.return_value.auth_ref.role_names = ["notadmin"]
self.assertRaises(exceptions.InvalidAdminException,
self.clients.verified_keystone)
@mock.patch("rally.osclients.Keystone.create_client")
def test_verified_keystone_unauthorized(self, mock_keystone_create_client):
mock_keystone_create_client.return_value = fakes.FakeKeystoneClient()
mock_keystone_create_client.side_effect = (
keystone_exceptions.Unauthorized)
self.assertRaises(exceptions.InvalidEndpointsException,
self.clients.verified_keystone)
@mock.patch("rally.osclients.Keystone.create_client")
def test_verified_keystone_unreachable(self, mock_keystone_create_client):
mock_keystone_create_client.return_value = fakes.FakeKeystoneClient()
mock_keystone_create_client.side_effect = (
keystone_exceptions.AuthorizationFailure
)
self.assertRaises(exceptions.HostUnreachableException,
self.clients.verified_keystone)
def test_nova(self):
fake_nova = fakes.FakeNovaClient()
mock_nova = mock.MagicMock()
mock_nova.client.Client.return_value = fake_nova
self.assertNotIn("nova", self.clients.cache)
with mock.patch.dict("sys.modules", {"novaclient": mock_nova}):
client = self.clients.nova()
self.assertEqual(fake_nova, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="compute",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
mock_nova.client.Client.assert_called_once_with(
"2",
auth_token=self.fake_keystone.auth_token,
http_log_debug=False,
timeout=cfg.CONF.openstack_client_http_timeout,
insecure=False, cacert=None,
username=self.credential.username,
api_key=self.credential.password,
project_id=self.credential.tenant_name,
auth_url=self.credential.auth_url)
client.set_management_url.assert_called_once_with(
self.service_catalog.url_for.return_value)
self.assertEqual(fake_nova, self.clients.cache["nova"])
def test_neutron(self):
fake_neutron = fakes.FakeNeutronClient()
mock_neutron = mock.MagicMock()
mock_neutron.client.Client.return_value = fake_neutron
self.assertNotIn("neutron", self.clients.cache)
with mock.patch.dict("sys.modules", {"neutronclient.neutron":
mock_neutron}):
client = self.clients.neutron()
self.assertEqual(fake_neutron, client)
kw = {
"token": self.fake_keystone.auth_token,
"endpoint_url": self.service_catalog.url_for.return_value,
"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": self.credential.insecure,
"ca_cert": self.credential.cacert,
"username": self.credential.username,
"password": self.credential.password,
"tenant_name": self.credential.tenant_name,
"auth_url": self.credential.auth_url
}
self.service_catalog.url_for.assert_called_once_with(
service_type="network",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
mock_neutron.client.Client.assert_called_once_with("2.0", **kw)
self.assertEqual(fake_neutron, self.clients.cache["neutron"])
def test_glance(self):
fake_glance = fakes.FakeGlanceClient()
mock_glance = mock.MagicMock()
mock_glance.Client = mock.MagicMock(return_value=fake_glance)
with mock.patch.dict("sys.modules", {"glanceclient": mock_glance}):
self.assertNotIn("glance", self.clients.cache)
client = self.clients.glance()
self.assertEqual(fake_glance, client)
kw = {"endpoint": self.service_catalog.url_for.return_value,
"token": self.fake_keystone.auth_token,
"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": False, "cacert": None}
self.service_catalog.url_for.assert_called_once_with(
service_type="image",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
mock_glance.Client.assert_called_once_with("1", **kw)
self.assertEqual(fake_glance, self.clients.cache["glance"])
def test_cinder(self):
fake_cinder = mock.MagicMock(client=fakes.FakeCinderClient())
mock_cinder = mock.MagicMock()
mock_cinder.client.Client.return_value = fake_cinder
self.assertNotIn("cinder", self.clients.cache)
with mock.patch.dict("sys.modules", {"cinderclient": mock_cinder}):
client = self.clients.cinder()
self.assertEqual(fake_cinder, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="volumev2",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
mock_cinder.client.Client.assert_called_once_with(
"2",
http_log_debug=False,
timeout=cfg.CONF.openstack_client_http_timeout,
insecure=False, cacert=None,
username=self.credential.username,
api_key=self.credential.password,
project_id=self.credential.tenant_name,
auth_url=self.credential.auth_url)
self.assertEqual(fake_cinder.client.management_url,
self.service_catalog.url_for.return_value)
self.assertEqual(fake_cinder.client.auth_token,
self.fake_keystone.auth_token)
self.assertEqual(fake_cinder, self.clients.cache["cinder"])
def test_manila(self):
mock_manila = mock.MagicMock()
self.assertNotIn("manila", self.clients.cache)
with mock.patch.dict("sys.modules", {"manilaclient": mock_manila}):
client = self.clients.manila()
self.assertEqual(mock_manila.client.Client.return_value, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="share",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
mock_manila.client.Client.assert_called_once_with(
"1",
http_log_debug=False,
timeout=cfg.CONF.openstack_client_http_timeout,
insecure=False, cacert=None,
username=self.credential.username,
api_key=self.credential.password,
region_name=self.credential.region_name,
project_name=self.credential.tenant_name,
auth_url=self.credential.auth_url)
self.assertEqual(
mock_manila.client.Client.return_value.client.management_url,
self.service_catalog.url_for.return_value)
self.assertEqual(
mock_manila.client.Client.return_value.client.auth_token,
self.fake_keystone.auth_token)
self.assertEqual(
mock_manila.client.Client.return_value,
self.clients.cache["manila"])
def test_ceilometer(self):
fake_ceilometer = fakes.FakeCeilometerClient()
mock_ceilometer = mock.MagicMock()
mock_ceilometer.client.get_client = mock.MagicMock(
return_value=fake_ceilometer)
self.assertNotIn("ceilometer", self.clients.cache)
with mock.patch.dict("sys.modules",
{"ceilometerclient": mock_ceilometer}):
client = self.clients.ceilometer()
self.assertEqual(fake_ceilometer, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="metering",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
kw = {"os_endpoint": self.service_catalog.url_for.return_value,
"token": self.fake_keystone.auth_token,
"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": False, "cacert": None,
"username": self.credential.username,
"password": self.credential.password,
"tenant_name": self.credential.tenant_name,
"auth_url": self.credential.auth_url
}
mock_ceilometer.client.get_client.assert_called_once_with("2",
**kw)
self.assertEqual(fake_ceilometer,
self.clients.cache["ceilometer"])
def test_gnocchi(self):
fake_gnocchi = fakes.FakeGnocchiClient()
mock_gnocchi = mock.MagicMock()
mock_gnocchi.client.Client.return_value = fake_gnocchi
mock_keystoneauth1 = mock.MagicMock()
self.assertNotIn("gnocchi", self.clients.cache)
with mock.patch.dict("sys.modules",
{"gnocchiclient": mock_gnocchi,
"keystoneauth1": mock_keystoneauth1}):
client = self.clients.gnocchi()
self.assertEqual(fake_gnocchi, client)
kw = {"version": "1",
"session": mock_keystoneauth1.session.Session(),
"service_type": "metric"}
mock_gnocchi.client.Client.assert_called_once_with(**kw)
self.assertEqual(fake_gnocchi, self.clients.cache["gnocchi"])
def test_monasca(self):
fake_monasca = fakes.FakeMonascaClient()
mock_monasca = mock.MagicMock()
mock_monasca.client.Client.return_value = fake_monasca
self.assertNotIn("monasca", self.clients.cache)
with mock.patch.dict("sys.modules",
{"monascaclient": mock_monasca}):
client = self.clients.monasca()
self.assertEqual(fake_monasca, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="monitoring",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
os_endpoint = self.service_catalog.url_for.return_value
kw = {"token": self.fake_keystone.auth_token,
"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": False, "cacert": None,
"username": self.credential.username,
"password": self.credential.password,
"tenant_name": self.credential.tenant_name,
"auth_url": self.credential.auth_url
}
mock_monasca.client.Client.assert_called_once_with("2_0",
os_endpoint,
**kw)
self.assertEqual(mock_monasca.client.Client.return_value,
self.clients.cache["monasca"])
def test_ironic(self):
fake_ironic = fakes.FakeIronicClient()
mock_ironic = mock.MagicMock()
mock_ironic.client.get_client = mock.MagicMock(
return_value=fake_ironic)
self.assertNotIn("ironic", self.clients.cache)
with mock.patch.dict("sys.modules", {"ironicclient": mock_ironic}):
client = self.clients.ironic()
self.assertEqual(fake_ironic, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="baremetal",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
kw = {
"os_auth_token": self.fake_keystone.auth_token,
"ironic_url": self.service_catalog.url_for.return_value,
"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": self.credential.insecure,
"cacert": self.credential.cacert
}
mock_ironic.client.get_client.assert_called_once_with("1", **kw)
self.assertEqual(fake_ironic, self.clients.cache["ironic"])
def test_sahara(self):
fake_sahara = fakes.FakeSaharaClient()
mock_sahara = mock.MagicMock()
mock_sahara.client.Client = mock.MagicMock(return_value=fake_sahara)
self.assertNotIn("sahara", self.clients.cache)
with mock.patch.dict("sys.modules", {"saharaclient": mock_sahara}):
client = self.clients.sahara()
self.assertEqual(fake_sahara, client)
kw = {
"service_type": "data-processing",
"endpoint_type": self.credential.endpoint_type,
"insecure": False,
"username": self.credential.username,
"api_key": self.credential.password,
"project_name": self.credential.tenant_name,
"cacert": self.credential.cacert,
"auth_url": self.credential.auth_url
}
mock_sahara.client.Client.assert_called_once_with(1.1, **kw)
self.assertEqual(fake_sahara, self.clients.cache["sahara"])
def test_zaqar(self):
fake_zaqar = fakes.FakeZaqarClient()
mock_zaqar = mock.MagicMock()
mock_zaqar.client.Client = mock.MagicMock(return_value=fake_zaqar)
self.assertNotIn("zaqar", self.clients.cache)
p_id = self.fake_keystone.auth_ref.get("token").get("tenant").get("id")
with mock.patch.dict("sys.modules", {"zaqarclient.queues":
mock_zaqar}):
client = self.clients.zaqar()
self.assertEqual(fake_zaqar, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="messaging",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
fake_zaqar_url = self.service_catalog.url_for.return_value
conf = {"auth_opts": {"backend": "keystone", "options": {
"os_username": self.credential.username,
"os_password": self.credential.password,
"os_project_name": self.credential.tenant_name,
"os_project_id": p_id,
"os_auth_url": self.credential.auth_url,
"insecure": self.credential.insecure,
}}}
mock_zaqar.client.Client.assert_called_once_with(
url=fake_zaqar_url, version=1.1, conf=conf)
self.assertEqual(fake_zaqar, self.clients.cache["zaqar"])
def test_trove(self):
fake_trove = fakes.FakeTroveClient()
mock_trove = mock.MagicMock()
mock_trove.client.Client = mock.MagicMock(return_value=fake_trove)
self.assertNotIn("trove", self.clients.cache)
with mock.patch.dict("sys.modules", {"troveclient": mock_trove}):
client = self.clients.trove()
self.assertEqual(fake_trove, client)
kw = {
"username": self.credential.username,
"api_key": self.credential.password,
"project_id": self.credential.tenant_name,
"auth_url": self.credential.auth_url,
"region_name": self.credential.region_name,
"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": self.credential.insecure,
"cacert": self.credential.cacert
}
mock_trove.client.Client.assert_called_once_with("1.0", **kw)
self.assertEqual(fake_trove, self.clients.cache["trove"])
def test_mistral(self):
fake_mistral = fakes.FakeMistralClient()
mock_mistral = mock.Mock()
mock_mistral.client.client.return_value = fake_mistral
self.assertNotIn("mistral", self.clients.cache)
with mock.patch.dict(
"sys.modules", {"mistralclient": mock_mistral,
"mistralclient.api": mock_mistral}):
client = self.clients.mistral()
self.assertEqual(fake_mistral, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="workflowv2",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name
)
fake_mistral_url = self.service_catalog.url_for.return_value
mock_mistral.client.client.assert_called_once_with(
mistral_url=fake_mistral_url,
service_type="workflowv2",
auth_token=self.fake_keystone.auth_token
)
self.assertEqual(fake_mistral, self.clients.cache["mistral"])
def test_swift(self):
fake_swift = fakes.FakeSwiftClient()
mock_swift = mock.MagicMock()
mock_swift.client.Connection = mock.MagicMock(return_value=fake_swift)
self.assertNotIn("swift", self.clients.cache)
with mock.patch.dict("sys.modules", {"swiftclient": mock_swift}):
client = self.clients.swift()
self.assertEqual(client, fake_swift)
self.service_catalog.url_for.assert_called_once_with(
service_type="object-store",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
kw = {"retries": 1,
"preauthurl": self.service_catalog.url_for.return_value,
"preauthtoken": self.fake_keystone.auth_token,
"insecure": False,
"cacert": None,
"user": self.credential.username,
"tenant_name": self.credential.tenant_name,
}
mock_swift.client.Connection.assert_called_once_with(**kw)
self.assertEqual(self.clients.cache["swift"], fake_swift)
def test_ec2(self):
mock_boto = mock.Mock()
self.service_catalog.url_for.return_value = "http://fake.to:1/fake"
self.fake_keystone.ec2 = mock.Mock()
self.fake_keystone.ec2.create.return_value = mock.Mock(
access="fake_access", secret="fake_secret")
fake_ec2 = fakes.FakeEC2Client()
mock_boto.connect_ec2_endpoint.return_value = fake_ec2
self.assertNotIn("ec2", self.clients.cache)
with mock.patch.dict("sys.modules", {"boto": mock_boto}):
client = self.clients.ec2()
self.assertEqual(fake_ec2, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="ec2",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
kw = {
"url": "http://fake.to:1/fake",
"aws_access_key_id": "fake_access",
"aws_secret_access_key": "fake_secret",
"is_secure": self.credential.insecure,
}
mock_boto.connect_ec2_endpoint.assert_called_once_with(**kw)
self.assertEqual(fake_ec2, self.clients.cache["ec2"])
@mock.patch("rally.osclients.Keystone.create_client")
def test_services(self, mock_keystone_create_client):
available_services = {consts.ServiceType.IDENTITY: {},
consts.ServiceType.COMPUTE: {},
"some_service": {}}
mock_keystone_create_client.return_value = mock.Mock(
service_catalog=mock.Mock(
get_endpoints=lambda: available_services))
clients = osclients.Clients(self.credential)
self.assertEqual(
{consts.ServiceType.IDENTITY: consts.Service.KEYSTONE,
consts.ServiceType.COMPUTE: consts.Service.NOVA,
"some_service": "__unknown__"},
clients.services())
def test_murano(self):
fake_murano = fakes.FakeMuranoClient()
mock_murano = mock.Mock()
mock_murano.client.Client.return_value = fake_murano
self.assertNotIn("murano", self.clients.cache)
with mock.patch.dict("sys.modules", {"muranoclient": mock_murano}):
client = self.clients.murano()
self.assertEqual(fake_murano, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="application-catalog",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name
)
kw = {"endpoint": self.service_catalog.url_for.return_value,
"token": self.fake_keystone.auth_token}
mock_murano.client.Client.assert_called_once_with("1", **kw)
self.assertEqual(fake_murano, self.clients.cache["murano"])
@mock.patch("rally.osclients.Designate._get_session")
@ddt.data(
{},
{"version": "2"},
{"version": "1"},
{"version": None}
)
@ddt.unpack
def test_designate(self, mock_designate__get_session, version=None):
fake_designate = fakes.FakeDesignateClient()
mock_designate = mock.Mock()
mock_designate.client.Client.return_value = fake_designate
mock_designate__get_session.return_value = self.fake_keystone.session
self.assertNotIn("designate", self.clients.cache)
with mock.patch.dict("sys.modules",
{"designateclient": mock_designate}):
if version is not None:
client = self.clients.designate(version=version)
else:
client = self.clients.designate()
self.assertEqual(fake_designate, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="dns",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name
)
default = version or "1"
# Check that we append /v<version>
url = self.service_catalog.url_for.return_value
url.__iadd__.assert_called_once_with("/v%s" % default)
mock_designate__get_session.assert_called_once_with(
endpoint=url.__iadd__.return_value)
mock_designate.client.Client.assert_called_once_with(
default, session=self.fake_keystone.session)
key = "designate"
if version is not None:
key += "%s" % {"version": version}
self.assertEqual(fake_designate, self.clients.cache[key])
@mock.patch("rally.osclients.Cue._get_session")
def test_cue(self, mock_cue__get_session):
fake_cue = fakes.FakeCueClient()
mock_cue = mock.MagicMock()
mock_cue.client.Client = mock.MagicMock(return_value=fake_cue)
mock_cue__get_session.return_value = self.fake_keystone.session
self.assertNotIn("cue", self.clients.cache)
with mock.patch.dict("sys.modules", {"cueclient": mock_cue,
"cueclient.v1": mock_cue}):
client = self.clients.cue()
self.assertEqual(fake_cue, client)
mock_cue.client.Client.assert_called_once_with(
interface=consts.EndpointType.PUBLIC,
session=self.fake_keystone.session)
self.assertEqual(fake_cue, self.clients.cache["cue"])
def test_senlin(self):
mock_senlin = mock.MagicMock()
self.assertNotIn("senlin", self.clients.cache)
with mock.patch.dict("sys.modules", {"senlinclient": mock_senlin}):
client = self.clients.senlin()
self.assertEqual(mock_senlin.client.Client.return_value, client)
mock_senlin.client.Client.assert_called_once_with(
"1",
username=self.credential.username,
password=self.credential.password,
project_name=self.credential.tenant_name,
cert=self.credential.cacert,
auth_url=self.credential.auth_url)
self.assertEqual(
mock_senlin.client.Client.return_value,
self.clients.cache["senlin"])
@mock.patch("rally.osclients.Magnum._get_session")
def test_magnum(self, mock_magnum__get_session):
fake_magnum = fakes.FakeMagnumClient()
mock_magnum = mock.MagicMock()
mock_magnum.client.Client.return_value = fake_magnum
mock_magnum__get_session.return_value = self.fake_keystone.session
self.assertNotIn("magnum", self.clients.cache)
with mock.patch.dict("sys.modules", {"magnumclient": mock_magnum}):
client = self.clients.magnum()
self.assertEqual(fake_magnum, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="container-infra",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.credential.region_name)
mock_magnum.client.Client.assert_called_once_with(
interface=consts.EndpointType.PUBLIC,
session=self.fake_keystone.session)
self.assertEqual(fake_magnum, self.clients.cache["magnum"])
| |
#!/usr/bin/env python
"""Slack bot class to control a webcam and post snapshots to Slack ."""
import re
import time
import json
import random
import datetime as dt
import subprocess
import websocket
import sys
import logging
import socket
import signal
from slackclient import SlackClient
import psutil
import utils
import lexicon as lex
sys.path.append('./plugins/')
# logging module
logger = utils.loggerMaster('slack.bot')
class SlackBot():
"""master slack client that remains alive for the duration of the script. subsidiary connections to SlackClient are made on each connection drop or error"""
def __init__(self, config):
self.config=config
self.token = self.config['api_key']
self.slack_client = None
self.name = self.config['bot_name']
self.slack_user_id = None
self.direct_message_channels=None
self.channel_id = None
self.channel_name = None
self.master=self.config['master']
self.plugins=self.config.get('plugins', 'plugins').split('\n')
logger.info("Plugins installed: "+str(self.plugins))
self.last_ping = 0
self.reconnects = 0
self.error_count = 0
self.run_time = 0
self.run_time_total = 0
self.first_time = True
self.auth_check = True
self.errors = []
self.ping_frequency=15
def test_connection(self, verbose=True):
"""tests whether the device is connected to the internet"""
connected = False
retries = 0
while connected == False:
if verbose:
logger.info("Testing internet connection...")
try:
socket.create_connection(("www.google.com", 80))
if verbose:
logger.info("internet working")
connected = True
return True
except (socket.gaierror, socket.error):
logger.error(
"Internet connection down - retrying " +
str(retries))
error = utils.ConnectionDrop(self, "internet down")
retries += 1
time.sleep((1 + retries))
def generate_client(self):
"""creates an instance of SlackClient for each connection"""
if self.test_connection():
self.reconnects += 1
logger.info("Generating slack_client")
# check token is valid
self.slack_client = SlackClient(self.token)
if self.auth_check:
self.auth_check = False
if self.slack_client.api_call(
"auth.test", token=self.token).get('ok') == False:
logger.error("key not recognised")
sys.exit("Invalid key.. exiting")
logger.info("Token valid - SlackClient generated " +
str(self.slack_client))
logger.info("Connecting to RTM...")
#test RTM connection
try:
self.slack_client.rtm_connect()
logger.info("Connected to RTM")
self.run_time = 0
except Exception as e:
logger.error("Error in RTM connection: " + str(e))
logger.warning("Exiting script...")
sys.exit(1)
logger.info("Getting user & channel IDs")
#get list of users, channels and direct message channels
channel_list = self.slack_client.api_call("channels.list")
self.direct_message_channels=self.slack_client.api_call("im.list")
user_list = self.slack_client.api_call("users.list")
for channel in channel_list.get('channels'):
if channel.get('is_member'):
self.channel_id = str(channel.get('id'))
self.channel_name = str(channel.get('name'))
for user in user_list.get('members'):
if user.get('name') == self.name:
self.slack_user_id = user.get('id')
logger.info("Bot ID: " +
str(self.slack_user_id) +
" Channel ID: " +
str(self.channel_id) +
"/ " +
str(self.channel_name))
def say(self, text_message):
"""simple function to post a message to the bot's channel"""
try:
self.slack_client.api_call(
"chat.postMessage",
channel=self.channel_id,
text=str(text_message),
as_user=True)
except (websocket.WebSocketConnectionClosedException, socket.error) as e:
error = utils.ConnectionDrop(self, "chat connection error")
def autoping(self):
"""pings the slack server as set by the Bot"""
now = int(time.time())
if now > self.last_ping + self.ping_frequency:
self.slack_client.server.ping()
self.last_ping = now
def load_plugin(self, name):
"""loads the plugin for the process method"""
plugin=__import__("plugin_%s" % name)
return plugin
def call_plugin(self, name, message):
plugin= self.load_plugin(name)
plugin.plugin_main(message, self)
def process(self):
"""checks for connection errors, reads the RTM firehose and parses messages"""
self.run_time += 1
self.run_time_total += 1
try:
messages = self.slack_client.rtm_read()
self.error_count = 0
if self.first_time:
self.say("Bot ID:"+str(self.slack_user_id)+" is awake")
self.first_time=False
if self.errors:
drop_period = int(time.time()) - self.errors[0].timestamp
self.say(
"I was offline for " +
str(drop_period) +
" secs. " + str(len(self.errors)) + "errors.")
logger.debug("Offline for " + str(drop_period) + " secs")
self.errors = []
except websocket.WebSocketConnectionClosedException:
error = utils.ConnectionDrop(self, "websocket drop")
self.generate_client()
return
except socket.error:
error = utils.ConnectionDrop(self, "Socket error")
time.sleep(5)
self.error_count += 1
if self.error_count > 5:
self.generate_client()
return
#checks the message stream
for message in messages:
#print (message)
if message['type'] == 'presence_change':
if message['presence'] == 'active':
time.sleep(.5)
self.say(lex.response('greetings')+" "+str(self.master))
if 'text' in message:
if message['text'].startswith(
"<@%s>" %
self.slack_user_id) or 'text' in message and message['text'].startswith(
"<!%s>" %
'everyone'):
#if user issues a command, run through through all plugins
message_text = message['text']
for plugin in self.plugins:
self.call_plugin(plugin, message_text)
self.autoping()
if __name__ == ('__main__'):
print ("This is a module")
| |
import os
import json
class CopyError(Exception):
pass
class SpiderCopier(object):
"""
Utility for copying spiders and items from one project to another.
:source: read data from source project in read_file
:destination: read and write to destination project in read_file and
save_files
"""
def __init__(self, source, destination):
self.source = source
self.source_files = set(self.list_files(source))
self.destination = destination
self.destination_files = set(self.list_files(destination))
def _spider_path(self, spider):
return 'spiders/%s.json' % spider
def copy(self, spiders, items=None):
"""
Copies the provided spiders and items from the source project to the
destination project. If spiders have name collisions the copied spider
will be renamed. In the event of item name collisions a merge will be
attempted.
:list spiders: List of spiders to copy from the source to the
destination
:list items: optional: List of items to copy that may not be scraped
by the provided spiders
raises CopyError
"""
if items is None:
items = []
spider_paths = set(self._spider_path(s) for s in spiders)
self._check_missing(spider_paths)
templates = self._load_templates(spiders)
combined_items, renamed_items = \
self._build_combined_items(templates, items)
spider_data, renamed_spiders = self._load_spiders(spider_paths)
templates = self._update_templates(templates, renamed_items,
renamed_spiders)
extractors = self._build_combined_extractors(templates)
self._save_data({
'items.json': combined_items,
'extractors.json': extractors,
'spiders': spider_data,
'templates': templates,
})
return self._build_summary(spider_paths, items,
renamed_spiders, renamed_items)
def _refresh_destination_files(self):
self.destination_files = set(self.list_files(self.destination))
def _check_missing(self, spider_paths):
"""
Check if any of the provided spiders don't exist.
"""
missing = spider_paths - self.source_files
if missing:
raise CopyError('Unable to copy spiders as the following spiders '
'do not exist in the source project: "%s"' %
'", "'.join(missing))
def _load_templates(self, spiders):
templates = {}
template_startswith = ['spiders/%s/' % spider for spider in spiders]
for file_path in self.source_files:
if any(file_path.startswith(ts) for ts in template_startswith):
templates[file_path] = self.read_file(self.source, file_path)
return templates
def _update_templates(self, templates, renamed_items, renamed_spiders):
"""
Handle renamed items during copy.
"""
updated_templates = {}
for file_path, template in templates.iteritems():
scrapes = template['scrapes']
if scrapes in renamed_items:
template['scrapes'] = renamed_items[scrapes]
spider = file_path.split('/')[1]
if spider in renamed_spiders:
template_name = file_path.split('/')[-1]
spider = renamed_spiders[spider]
file_path = os.path.join('spiders', spider, template_name)
updated_templates[file_path] = template
return updated_templates
def _load_spiders(self, spider_paths):
spiders = {p: self.read_file(self.source, p) for p in spider_paths}
renamed_spiders = {}
for spider_path in spiders.keys():
if spider_path in self.destination_files:
spider_name = spider_path[8:-5]
moved_spider = self._rename(spider_name,
self.destination_files)
self._refresh_destination_files()
spiders[moved_spider] = spiders.pop(spider_path)
if spider_name != moved_spider:
renamed_spiders[spider_name] = moved_spider[8:-5]
return spiders, renamed_spiders
def _rename(self, name, dest_values, base='spiders/%s_%s%s.json'):
new_name = base % (name, 'copy', '')
start = 1
while new_name in dest_values:
new_name = base % (name, 'copy', start)
start += 1
return new_name
def _build_combined_items(self, templates, items):
"""
Compare items from both source and destination. Merge compatible files,
copy files that exist in the source and not the destination,
rename incompatible files.
"""
source_items = self.read_file(self.source, 'items.json')
dest_items = self.read_file(self.destination, 'items.json')
renamed_items = {}
copy_items = set(t['scrapes'] for t in templates.values()
if 'scrapes' in t)
for item in items:
copy_items.add(item)
for name, item in source_items.iteritems():
if name not in copy_items:
continue
if name in dest_items:
new_name, item = self._merge_items(name, item,
dest_items[name],
dest_items.keys())
if new_name != name:
renamed_items[name] = new_name
name = new_name
dest_items[name] = item
return dest_items, renamed_items
def _merge_items(self, name, source, dest, existing):
source_fields = set(source['fields'])
dest_fields = set(dest['fields'])
intersection = source_fields & dest_fields
if intersection:
for field in intersection:
s_field = source['fields'].get(field)
d_field = dest['fields'].get(field)
if s_field is None:
continue
elif d_field is None and s_field['required']:
return self._rename(name, existing, '%s_%s%s'), source
if any(s_field[p] != d_field[p] for p in ('required', 'type')):
return self._rename(name, existing, '%s_%s%s'), source
for field in source_fields - dest_fields:
dest['fields'][field] = source['fields'][field]
return name, dest
def _build_combined_extractors(self, templates):
"""
Take all extractors needed by the spiders that are being copied and
add them to the extractors at the destination
"""
source_extractors = self.read_file(self.source, 'extractors.json')
dest_extractors = self.read_file(self.destination, 'extractors.json')
for spider in templates.itervalues():
for extractor in spider.get('extractors', []):
if extractor not in dest_extractors:
dest_extractors[extractor] = source_extractors[extractor]
return dest_extractors
def _build_summary(self, spider_paths, items, renamed_spiders, renamed_items):
"""
Build a summary of copied spiders and items
"""
spiders = [sp[8:-5] for sp in spider_paths]
items = list(set(items) | set(renamed_items.keys()))
return {
'copied_spiders': spiders,
'renamed_spiders': renamed_spiders,
'copied_items': items,
'renamed_items': renamed_items,
}
def _save_data(self, data):
files_data = {}
for path in data.keys():
if isinstance(path, unicode):
path = path.encode('utf-8')
if path.endswith('.json'):
files_data[path] = json.dumps(data.pop(path),
sort_keys=True, indent=4)
else:
sub_directories = data.pop(path)
for path in sub_directories.keys():
if isinstance(path, unicode):
path = path.encode('utf-8')
files_data[path] = json.dumps(sub_directories.pop(path),
sort_keys=True, indent=4)
self.save_files(self.destination, files_data)
def read_file(self, location, filename):
raise NotImplementedError
def list_files(self, location):
raise NotImplementedError
def save_files(self, location, files):
raise NotImplementedError
class FileSystemSpiderCopier(SpiderCopier):
def __init__(self, source, destination, base_dir='.'):
self.base_dir = os.path.join(base_dir, '')
super(FileSystemSpiderCopier, self).__init__(source, destination)
def read_file(self, location, filename):
with open(os.path.join(self.base_dir, location, filename), 'r') as f:
return json.loads(f.read())
def list_files(self, location):
file_paths = []
project_dir = os.path.join(self.base_dir, location)
for dir, _, files in os.walk(project_dir):
dir = dir.split(project_dir)[1]
dir = dir[1:] if dir.startswith(os.path.sep) else dir
for filename in files:
if filename.endswith('.json'):
file_paths.append(os.path.join(dir, filename))
return file_paths
def save_files(self, location, files):
for filename, data in files.iteritems():
file_path = os.path.join(self.base_dir, location, filename)
with open(file_path, 'w') as f:
f.write(data)
class GitSpiderCopier(SpiderCopier):
def __init__(self, source, destination, branch):
self.branch = branch
super(GitSpiderCopier, self).__init__(source, destination)
def read_file(self, location, filename):
f = location.file_contents_for_branch(filename, self.branch)
if f:
return json.loads(f)
else:
return {}
def list_files(self, location):
try:
return location.list_files_for_branch(self.branch)
except KeyError:
return location.list_files_for_branch('master')
def save_files(self, location, files):
return location.save_files(files, self.branch)
| |
#!/usr/bin/python
import sqlite3
import sys
import traceback
from StatsClass import NcaaGrabber
TEAM_TABLE_NAME = "teams"
TEAM_TABLE = { "year": "year", "id": "org", "name": "name", "winLoss": "winLoss" }
SCHED_TABLE_NAME = "schedule"
SCHED_TABLE = { "year": "year", "week": "week", "team": "team", "opp": "opponent",
"stats": "stats", "teamScore": "teamScore", "oppScore": "oppScore",
"home": "home", "line": "line" }
STATS_TABLE_NAME = "stats"
STATS_TABLE = { "rush_off": "rush_off",
"pass_off": "pass_off",
"total_off": "total_off",
"score_off": "score_off",
"rush_def": "rush_def",
"pass_edef": "pass_edef",
"total_def": "total_def",
"score_def": "score_def",
"net_punt": "net_punt",
"punt_ret": "punt_ret",
"kickoff_ret": "kickoff_ret",
"turnover": "turnover",
"pass_def": "pass_def",
"pass_eff": "pass_eff",
"sacks": "sacks",
"tackles": "tackles",
"sacks_allowed": "sacks_allowed" }
def getTeamName(cursor, year, id):
cursor.execute("SELECT %s FROM %s WHERE %s=? AND %s=?" % (TEAM_TABLE["name"],
TEAM_TABLE_NAME,
TEAM_TABLE["year"],
TEAM_TABLE["id"]),
(year, id))
name = cursor.fetchone()
if ( name != None ):
name = name[0]
return name
def insertTeam(cursor, year, id, name):
cursor.execute("INSERT INTO %s (%s, %s, %s) VALUES (?, ?, ?)" % (TEAM_TABLE_NAME,
TEAM_TABLE["year"],
TEAM_TABLE["id"],
TEAM_TABLE["name"]),
(year, id, name))
def insertStats(cursor, statsDict):
statsMap = { "Rushing Offense": "rush_off",
"Passing Offense": "pass_off",
"Total Offense": "total_off",
"Scoring Offense": "score_off",
"Rushing Defense": "rush_def",
"Pass Efficiency Defense": "pass_edef",
"Total Defense": "total_def",
"Scoring Defense": "score_def",
"Net Punting": "net_punt",
"Punt Returns": "punt_ret",
"Kickoff Returns": "kickoff_ret",
"Turnover Margin": "turnover",
"Pass Defense": "pass_def",
"Passing Efficiency": "pass_eff",
"Sacks": "sacks",
"Tackles For Loss": "tackles",
"Sacks Allowed": "sacks_allowed" }
cols = []
namedParams = []
params = {}
for key in STATS_TABLE:
tempStr = "%s, %s_rank, " % (STATS_TABLE[key], STATS_TABLE[key])
cols.append(tempStr)
tempParam = ":%s, :%s_rank, " % (key, key)
namedParams.append(tempParam)
for key in statsDict:
thisStat = statsDict[key]
mapped = statsMap[key]
params[mapped] = thisStat[0].strip()
params["%s_rank" % mapped] = thisStat[1]
sqlInsertStr = "INSERT INTO %s (%s) VALUES (%s)" % (STATS_TABLE_NAME,
''.join(cols)[:-2],
''.join(namedParams)[:-2])
try:
cursor.execute(sqlInsertStr, params)
except:
print("Error on insert: ", sys.exc_info()[0])
traceback.print_exc(file=sys.stdout)
print(sqlInsertStr)
print(params)
return cursor.lastrowid
def insertGame(cursor, year, week, team1, team2, statKey, teamScore, oppScore, home, line):
print("Year: %d, Week: %d, Team: %s, Opp: %s, Stat: %d, TScore: %s, OScore: %s, Home: %d, Line: %s" %(year, week, team1, team2, statKey, teamScore, oppScore, home, line))
schedTableProps = (SCHED_TABLE_NAME,
SCHED_TABLE["year"],
SCHED_TABLE["week"],
SCHED_TABLE["team"],
SCHED_TABLE["opp"],
SCHED_TABLE["stats"],
SCHED_TABLE["teamScore"],
SCHED_TABLE["oppScore"],
SCHED_TABLE["home"],
SCHED_TABLE["line"])
cursor.execute("INSERT INTO %s (%s, %s, %s, %s, %s, %s, %s, %s, %s) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)" %
schedTableProps,
(year, week, team1, team2, statKey, teamScore, oppScore, home, line))
def checkTables(cursor):
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (TEAM_TABLE_NAME,))
count = cursor.fetchone()
retVal = True
if ( count == None ):
retVal = False
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (SCHED_TABLE_NAME,))
count = cursor.fetchone()
if ( count == None ):
retVal = False
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (STATS_TABLE_NAME,))
count = cursor.fetchone()
if ( count == None ):
retVal = False
return retVal
def buildTables(cursor):
cursor.execute("DROP TABLE IF EXISTS %s" % TEAM_TABLE_NAME)
cursor.execute("DROP TABLE IF EXISTS %s" % SCHED_TABLE_NAME)
cursor.execute("DROP TABLE IF EXISTS %s" % STATS_TABLE_NAME)
# Create team table
teamTableProps = (TEAM_TABLE_NAME, TEAM_TABLE["year"], TEAM_TABLE["id"], TEAM_TABLE["name"], TEAM_TABLE["winLoss"])
cursor.execute("CREATE TABLE %s (%s INTEGER, %s INTEGER, %s TEXT, %s REAL)" % teamTableProps)
# Create schedule table
schedTableProps = (SCHED_TABLE_NAME, SCHED_TABLE["year"], SCHED_TABLE["week"], SCHED_TABLE["team"], SCHED_TABLE["opp"], SCHED_TABLE["stats"], SCHED_TABLE["teamScore"], SCHED_TABLE["oppScore"], SCHED_TABLE["home"], SCHED_TABLE["line"])
cursor.execute("CREATE TABLE %s (%s INTEGER, %s INTEGER, %s INTEGER, %s INTEGER, %s INTEGER, %s INTEGER, %s INTEGER, %s INTEGER, %s INTEGER)" % schedTableProps)
# Create the stats table
cols = []
for key in STATS_TABLE:
tempStr = "%s REAL, %s_rank INTEGER, " % (STATS_TABLE[key], STATS_TABLE[key])
cols.append(tempStr)
statsCreateStr = "CREATE TABLE %s (%s)" % (STATS_TABLE_NAME, ''.join(cols)[:-2])
cursor.execute(statsCreateStr)
if ( len(sys.argv) < 4 ):
print("Usage: currentSeason.py <db> <year> <week>")
exit(-1)
dbPath = sys.argv[1]
targetYear = int(sys.argv[2])
targetWeek = int(sys.argv[3])
conn = sqlite3.connect(dbPath)
cursor = conn.cursor()
if ( not checkTables(cursor) ):
buildTables(cursor)
conn.commit()
statsGrabber = NcaaGrabber()
years = [targetYear]
for year in years:
teams = statsGrabber.getTeams('fcs', year)
teams.update(statsGrabber.getTeams('fbs', year))
# Insert the teams
if ( targetWeek == 1 ):
for team in teams:
insertTeam(cursor, year, teams[team], team)
conn.commit()
divisions = ['fcs', 'fbs']
for div in divisions:
# Build the week's schedule
for week in [targetWeek]:
print("In week %d..." % week)
schedule = statsGrabber.processWeekly(div, year, week, teams)
for game in schedule:
team1 = game[0]
team2 = game[1]
score = game[2]
scoreArr = score.split("-")
teamScore = scoreArr[0].strip()
oppScore = scoreArr[1].strip()
line = ""
team1Name = getTeamName(cursor, year, team1)
team2Name = "Unknown"
if ( team2 > -1 ):
team2Name = getTeamName(cursor, year, team2)
try:
print("Inserting year %d week %d game between %s and %s..." %
(year, week, team1Name, team2Name))
isHome = statsGrabber.isHomeGame(team1, year, week)
if ( team2 == None ):
team2 = -1
team1Stats = statsGrabber.getStats(team1, year, week)
if ( len(team1Stats) == 0 ):
print("%s has no stats for this week..." % team1Name)
insertGame(cursor, year, week, team1, team2, -1, teamScore, oppScore, isHome, line)
else:
statKey = insertStats(cursor, team1Stats)
insertGame(cursor, year, week, team1, team2, statKey, teamScore, oppScore, isHome, line)
conn.commit()
except:
print("Error on insert: ", sys.exc_info()[0])
traceback.print_exc(file=sys.stdout)
conn.close()
| |
from PyQt5 import QtWidgets
from chainer_wing import inspector
from chainer_wing import util
import os
class TrainParamServer(object):
"""Singleton parameter server
"""
__instance = None
def __new__(cls, *args, **keys):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
def __getitem__(cls, key):
if key in cls.__dict__:
return cls.__dict__[key]
else:
if key == 'IncludingLabel':
return False
elif key == 'PredClass':
return False
elif key == 'WorkDir':
return os.path.dirname(__file__) + '../../examples/'
elif key == 'PreProcessor':
return 'Do Nothing'
else:
raise KeyError(key)
def __setitem__(cls, key, value):
cls.__dict__[key] = value
def __iter__(cls):
return cls.__dict__.keys()
def iter_for_opt_params(cls):
for param in cls.__dict__:
if param.startswith('opt_'):
yield param
def clear_opt_params(cls):
opt_keys = [key for key in cls.iter_for_opt_params()]
for key in opt_keys:
del cls.__dict__[key]
def to_dict(cls):
return cls.__dict__
def load_from_dict(cls, dict):
cls.__dict__ = dict
def get_work_dir(cls):
if not os.path.isdir(cls['WorkDir']):
cls['WorkDir'] = os.path.dirname(__file__) + '../../examples/'
return cls['WorkDir']
def get_net_name(cls):
return cls.get_work_dir() + '/' + cls['NetName'] + '.py'
def get_result_dir(cls):
return cls.get_work_dir() + '/result'
def get_model_name(cls):
return cls.get_result_dir() + '/' + cls['ModelName']
def get_train_data_name(cls):
return cls['TrainData'].split('/')[-1]
def use_minmax(cls):
cls['PreProcessor'] == 'MinMax Scale'
class TrainDialog(QtWidgets.QDialog):
def __init__(self, *args, settings=None):
self.settings = settings
work_edit = WorkDirEdit(settings, self)
opt_edit = OptimizerEdit(settings, self)
opt_edit.currentTextChanged.connect(self.update_optimizer)
self.dialogs = [('File Settings', None),
('Working Directory', work_edit),
('', work_edit.label),
('Train Settings', None),
('Task', TaskEdit(settings, self)),
('Net Name', NetNameEdit(settings, self)),
('Model Name', ModelNameEdit(settings, self)),
('Batch Size', BatchSizeEdit(settings, self)),
('Epoch', EpochEdit(settings, self)),
('GPU', GPUEdit(settings, self)),
('Optimizer Settings', None),
('Optimizer', opt_edit),
]
optimizer_name = TrainParamServer()['Optimizer']
oi = inspector.OptimizerInspector()
for name, default in oi.get_signature(optimizer_name).items():
if name not in TrainParamServer().__dict__:
TrainParamServer()[name] = default
for param in TrainParamServer().iter_for_opt_params():
dialog = (param, OptimizeParamEdit(settings, self, param,
TrainParamServer()[param]))
self.dialogs.append(dialog)
super(TrainDialog, self).__init__(*args)
self.draw(*args, settings=settings)
self.setStyleSheet('''TrainDialog {
background: rgb(75,75,75);
}
QLineEdit {
background-color: rgb(95,95,95);
border: 1px solid gray;
color: white;
}
QSpinBox {
background-color: rgb(95,95,95);
color: white;
border: 1px solid gray;
}
QPushButton {
background-color: rgb(155,95,95);
color: white;
}
QLabel {
color: white;
}
''')
def draw(self, *args, settings=None):
main_layout = QtWidgets.QVBoxLayout()
for name, widget in self.dialogs:
if not widget:
l_widget = QtWidgets.QGroupBox(name)
l_widget.setStyleSheet('''
QGroupBox {
color: white;
border: 1px solid gray;
border-radius: 9px;
margin-top: 0.5em;
}
QGroupBox::title {
color: white;
subcontrol-origin: margin;
left: 10px;
padding: 0 3px 0 3px;
}
''')
l_widget.setFlat(False)
section_layout = QtWidgets.QFormLayout()
l_widget.setLayout(section_layout)
main_layout.addWidget(l_widget)
else:
section_layout.addRow(name, widget)
close_button = QtWidgets.QPushButton('Apply')
close_button.clicked.connect(self.close)
main_layout.addWidget(close_button)
self.setLayout(main_layout)
def close(self):
for name, widget in self.dialogs:
try:
widget.commit()
except AttributeError:
pass
self.update_opt_params(TrainParamServer()['Optimizer'])
self.settings.sync()
super(TrainDialog, self).close()
def redraw(self):
self.parent().drawer.repaint()
def update_optimizer(self, optimizer_name):
print(optimizer_name)
self.update_opt_params(optimizer_name)
self.parent().open_train_config()
self.close()
def update_opt_params(self, optimizer_name):
TrainParamServer()['Optimizer'] = optimizer_name
oi = inspector.OptimizerInspector()
not_exist_names = []
exist_names = []
for name in TrainParamServer().iter_for_opt_params():
if name in oi.get_signature(optimizer_name):
exist_names.append(name)
else:
not_exist_names.append(name)
for name in not_exist_names:
del TrainParamServer().__dict__[name]
for name, default in oi.get_signature(optimizer_name).items():
if name not in exist_names:
TrainParamServer()[name] = default
class AbstractTrainEdit(QtWidgets.QSpinBox):
def __init__(self, settings, parent, default, val_type=int):
self.parent = parent
self.settings = settings
super(AbstractTrainEdit, self).__init__()
self.globals_key = self.__class__.__name__[:-4]
v = settings.value(self.globals_key, type=val_type)
v = v if v else default
if self.globals_key in TrainParamServer().__dict__:
v = TrainParamServer()[self.globals_key]
else:
TrainParamServer()[self.globals_key] = v
self.setValue(v)
self.valueChanged.connect(self.redraw)
def commit(self):
self.settings.setValue(self.globals_key, self.value())
TrainParamServer()[self.globals_key] = self.value()
def redraw(self):
TrainParamServer()[self.globals_key] = self.value()
self.parent.redraw()
class BatchSizeEdit(AbstractTrainEdit):
def __init__(self, settings, parent):
super(BatchSizeEdit, self).__init__(settings, parent, 20)
self.setMaximum(1000)
class EpochEdit(AbstractTrainEdit):
def __init__(self, settings, parent):
super(EpochEdit, self).__init__(settings, parent, 20)
self.setMaximum(100000)
class GPUEdit(AbstractTrainEdit):
def __init__(self, settings, parent):
super(GPUEdit, self).__init__(settings, parent, 0)
class OptimizerEdit(QtWidgets.QComboBox):
def __init__(self, settings, parent):
menu = inspector.OptimizerInspector().get_members()
self.parent = parent
self.settings = settings
super(OptimizerEdit, self).__init__()
self.addItems(menu)
self.length = len(menu)
if 'Optimizer' in TrainParamServer().__dict__:
selected_optimizer = TrainParamServer()['Optimizer']
else:
selected_optimizer = settings.value('Optimizer', type=str)
self.setCurrentText(selected_optimizer)
TrainParamServer()['Optimizer'] = self.currentText()
def commit(self):
self.settings.setValue('Optimizer', self.currentText())
self.setCurrentText(TrainParamServer()['Optimizer'])
class NetNameEdit(QtWidgets.QLineEdit):
def __init__(self, settings, parent):
self.parent = parent
self.settings = settings
super(NetNameEdit, self).__init__()
v = settings.value('NetName', type=str)
v = v if v else 'MyNet'
if 'NetName' in TrainParamServer().__dict__:
v = TrainParamServer()['NetName']
else:
TrainParamServer()['NetName'] = v
self.setText(v)
def commit(self):
self.settings.setValue('NetName', self.text())
TrainParamServer()['NetName'] = self.text()
class ModelNameEdit(QtWidgets.QLineEdit):
def __init__(self, settings, parent):
self.parent = parent
self.settings = settings
super(ModelNameEdit, self).__init__()
v = settings.value('ModelName', type=str)
v = v if v else 'MyModel'
if 'ModelName' in TrainParamServer().__dict__:
v = TrainParamServer()['ModelName']
else:
TrainParamServer()['ModelName'] = v
self.setText(v)
def commit(self):
self.settings.setValue('ModelName', self.text())
TrainParamServer()['ModelName'] = self.text()
class OptimizeParamEdit(QtWidgets.QLineEdit):
def __init__(self, settings, parent, key, value):
self.parent = parent
self.settings = settings
self.key = key
super(OptimizeParamEdit, self).__init__()
if key in TrainParamServer().__dict__:
value = TrainParamServer()[key]
else:
TrainParamServer()[key] = value
self.setText(str(value))
def commit(self):
try:
TrainParamServer()[self.key] = float(self.text())
except ValueError:
util.disp_error('Optimizer parameter should be float.')
class WorkDirEdit(QtWidgets.QPushButton):
def __init__(self, settings, parent):
self.parent = parent
self.settings = settings
super(WorkDirEdit, self).__init__('Browse')
v = settings.value('WorkDir', type=str)
v = v if v else './'
if 'WorkDir' in TrainParamServer().__dict__:
self.value = TrainParamServer().get_work_dir()
else:
self.value = v
TrainParamServer()['WorkDir'] = self.value
self.label = WorkDirLabel(settings, parent)
self.clicked.connect(self.open_dialog)
def commit(self):
self.settings.setValue('WorkDir', self.value)
TrainParamServer()['WorkDir'] = self.value
def open_dialog(self):
self.value = QtWidgets.QFileDialog.\
getExistingDirectory(self,
'Result file storage',
self.value)
self.label.setText(self.value)
self.commit()
class WorkDirLabel(QtWidgets.QLabel):
def __init__(self, settings, parent):
self.parent = parent
self.settings = settings
super(WorkDirLabel, self).__init__(TrainParamServer().get_work_dir())
class TaskEdit(QtWidgets.QComboBox):
def __init__(self, settings, parent):
menu = ('Simple Classification', 'Simple Regression',
'Image Classification')
self.parent = parent
self.settings = settings
super(TaskEdit, self).__init__()
self.addItems(menu)
if 'Task_idx' in TrainParamServer().__dict__:
self.setCurrentIndex(TrainParamServer()['Task_idx'])
else:
self.setCurrentIndex(settings.value('Task', type=int))
TrainParamServer()['Task'] = self.currentText()
def commit(self):
self.settings.setValue('Task', self.currentIndex())
TrainParamServer()['Task'] = self.currentText()
TrainParamServer()['Task_idx'] = self.currentIndex()
| |
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Small functions to help with plots"""
# pylint disable=star-args
from matplotlib import pyplot as plt
import os
import re
from trappy.wa import SysfsExtractor
GOLDEN_RATIO = 1.618034
def normalize_title(title, opt_title):
"""Return a string with that contains the title and opt_title if it's
not the empty string
See test_normalize_title() for usage
"""
if opt_title is not "":
title = opt_title + " - " + title
return title
def set_lim(lim, get_lim_f, set_lim_f):
"""Set x or y limitis of the plot
lim can be a tuple containing the limits or the string "default"
or "range". "default" does nothing and uses matplotlib default.
"range" extends the current margin by 10%. This is useful since
the default xlim and ylim of the plots sometimes make it harder to
see data that is just in the margin.
"""
if lim == "default":
return
if lim == "range":
cur_lim = get_lim_f()
lim = (cur_lim[0] - 0.1 * (cur_lim[1] - cur_lim[0]),
cur_lim[1] + 0.1 * (cur_lim[1] - cur_lim[0]))
set_lim_f(lim[0], lim[1])
def set_xlim(ax, xlim):
"""Set the xlim of the plot
See set_lim() for the details
"""
set_lim(xlim, ax.get_xlim, ax.set_xlim)
def set_ylim(ax, ylim):
"""Set the ylim of the plot
See set_lim() for the details
"""
set_lim(ylim, ax.get_ylim, ax.set_ylim)
def pre_plot_setup(width=None, height=None, ncols=1, nrows=1):
"""initialize a figure
width and height are the height and width of each row of plots.
For 1x1 plots, that's the height and width of the plot. This
function should be called before any calls to plot()
"""
if height is None:
if width is None:
height = 6
width = 10
else:
height = width / GOLDEN_RATIO
else:
if width is None:
width = height * GOLDEN_RATIO
height *= nrows
_, axis = plt.subplots(ncols=ncols, nrows=nrows, figsize=(width, height))
# Needed for multirow blots to not overlap with each other
plt.tight_layout(h_pad=3.5)
return axis
def post_plot_setup(ax, title="", xlabel=None, ylabel=None, xlim="default",
ylim="range"):
"""Set xlabel, ylabel title, xlim and ylim of the plot
This has to be called after calls to .plot(). The default ylim is
to extend it by 10% because matplotlib default makes it hard
values that are close to the margins
"""
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if title:
ax.set_title(title)
set_ylim(ax, ylim)
set_xlim(ax, xlim)
def number_freq_plots(runs, map_label):
"""Calculate the number of plots needed for allfreq plots and frequency
histogram plots
"""
num_cpu_plots = len(map_label)
has_devfreq_data = False
for run in runs:
if len(run.devfreq_in_power.data_frame) > 0:
has_devfreq_data = True
break
num_freq_plots = num_cpu_plots
if has_devfreq_data:
num_freq_plots += 1
return num_freq_plots
def plot_temperature(runs, width=None, height=None, ylim="range", tz_id=None):
"""Plot temperatures
runs is an array of FTrace() instances. Extract the control_temp
from the governor data and plot the temperatures reported by the
thermal framework. The governor doesn't track temperature when
it's off, so the thermal framework trace is more reliable.
"""
ax = pre_plot_setup(width, height)
for run in runs:
gov_dfr = run.thermal_governor.data_frame
if tz_id:
gov_dfr = gov_dfr[gov_dfr["thermal_zone_id"] == tz_id]
try:
current_temp = gov_dfr["current_temperature"]
delta_temp = gov_dfr["delta_temperature"]
control_series = (current_temp + delta_temp) / 1000
except KeyError:
control_series = None
try:
run.thermal.plot_temperature(control_temperature=control_series,
ax=ax, legend_label=run.name,
tz_id=tz_id)
except ValueError:
run.thermal_governor.plot_temperature(ax=ax, legend_label=run.name)
post_plot_setup(ax, title="Temperature", ylim=ylim)
plt.legend(loc="best")
def plot_hist(data, ax, title, unit, bins, xlabel, xlim, ylim):
"""Plot a histogram"""
mean = data.mean()
std = data.std()
title += " (mean = {:.2f}{}, std = {:.2f})".format(mean, unit, std)
xlabel += " ({})".format(unit)
data.hist(ax=ax, bins=bins)
post_plot_setup(ax, title=title, xlabel=xlabel, ylabel="count", xlim=xlim,
ylim=ylim)
def plot_load(runs, map_label, width=None, height=None):
"""Make a multiplot of all the loads"""
num_runs = len(runs)
axis = pre_plot_setup(width=width, height=height, ncols=num_runs, nrows=2)
if num_runs == 1:
axis = [axis]
else:
axis = zip(*axis)
for ax, run in zip(axis, runs):
run.plot_load(map_label, title=run.name, ax=ax[0])
run.plot_normalized_load(map_label, title=run.name, ax=ax[1])
def plot_allfreqs(runs, map_label, width=None, height=None):
"""Make a multicolumn plots of the allfreqs plots of each run"""
num_runs = len(runs)
nrows = number_freq_plots(runs, map_label)
axis = pre_plot_setup(width=width, height=height, nrows=nrows,
ncols=num_runs)
if num_runs == 1:
if nrows == 1:
axis = [[axis]]
else:
axis = [axis]
elif nrows == 1:
axis = [[ax] for ax in axis]
else:
axis = zip(*axis)
for ax, run in zip(axis, runs):
run.plot_allfreqs(map_label, ax=ax)
def plot_controller(runs, width=None, height=None):
"""Make a multicolumn plot of the pid controller of each run"""
num_runs = len(runs)
axis = pre_plot_setup(width=width, height=height, ncols=num_runs)
if num_runs == 1:
axis = [axis]
for ax, run in zip(axis, runs):
run.pid_controller.plot_controller(title=run.name, ax=ax)
def plot_weighted_input_power(runs, actor_order, width=None, height=None):
"""Make a multicolumn plot of the weighted input power of each run"""
actor_weights = []
for run in runs:
run_path = os.path.dirname(run.trace_path)
sysfs = SysfsExtractor(run_path)
thermal_params = sysfs.get_parameters()
sorted_weights = []
for param in sorted(thermal_params):
if re.match(r"cdev\d+_weight", param):
sorted_weights.append(thermal_params[param])
actor_weights.append(zip(actor_order, sorted_weights))
# Do nothing if we don't have actor weights for any run
if not any(actor_weights):
return
num_runs = len(runs)
axis = pre_plot_setup(width=width, height=height, ncols=num_runs)
if num_runs == 1:
axis = [axis]
for ax, run, weights in zip(axis, runs, actor_weights):
run.thermal_governor.plot_weighted_input_power(weights, title=run.name,
ax=ax)
def plot_input_power(runs, actor_order, width=None, height=None):
"""Make a multicolumn plot of the input power of each run"""
num_runs = len(runs)
axis = pre_plot_setup(width=width, height=height, ncols=num_runs)
if num_runs == 1:
axis = [axis]
for ax, run in zip(axis, runs):
run.thermal_governor.plot_input_power(actor_order, title=run.name,
ax=ax)
plot_weighted_input_power(runs, actor_order, width, height)
def plot_output_power(runs, actor_order, width=None, height=None):
"""Make a multicolumn plot of the output power of each run"""
num_runs = len(runs)
axis = pre_plot_setup(width=width, height=height, ncols=num_runs)
if num_runs == 1:
axis = [axis]
for ax, run in zip(axis, runs):
run.thermal_governor.plot_output_power(actor_order, title=run.name,
ax=ax)
def plot_freq_hists(runs, map_label):
"""Plot frequency histograms of multiple runs"""
num_runs = len(runs)
nrows = 2 * number_freq_plots(runs, map_label)
axis = pre_plot_setup(ncols=num_runs, nrows=nrows)
if num_runs == 1:
axis = [axis]
else:
axis = zip(*axis)
for ax, run in zip(axis, runs):
run.plot_freq_hists(map_label, ax=ax)
def plot_temperature_hist(runs):
"""Plot temperature histograms for all the runs"""
num_runs = 0
for run in runs:
if len(run.thermal.data_frame):
num_runs += 1
if num_runs == 0:
return
axis = pre_plot_setup(ncols=num_runs)
if num_runs == 1:
axis = [axis]
for ax, run in zip(axis, runs):
run.thermal.plot_temperature_hist(ax, run.name)
| |
"""
An ingest module
"""
from lxml import html
import requests
import json
from bs4 import BeautifulSoup
import re
#Python 3
import urllib.request
# #Python 2
# import urllib
import pymongo
import os
import pickle
class IngestSystem(object):
def __init__(self, cl):
self.cities = cl
def pull_and_load(self):
'''
l = self.get_city_urls()
r = []
for city in l:
print(city)
one_city = self.get_restaurant_urls(city)
print(one_city)
# Get the 100 most popular restaurants for each city
#for w in one_city[:100]:
for w in one_city: ## Additional DC restaurants ONLY to pull all restaurants
if ('menu' in w[0]) and ('kids' not in w[0]):
r.append(w)
pickle.dump(r,open('restaurant_url_list.txt','wb'))
r=pickle.load(open('restaurant_url_list.txt', 'rb'))
print(len(r))
self.store_raw(r[200:300])
'''
self.build_database()
def get_city_urls(self):
url_list = []
for i in self.cities:
url_list.append(('http://www.allmenus.com/'+i['state']+'/'+i['city']+'/-/?sort=popular', i['city'], i['state']))
return url_list
def get_restaurant_urls(self, url_citystate_tuple):
uct = url_citystate_tuple
a = HTMLReader(uct[0])
citysoup = a.html_to_soup()
urllist = a.soup_to_urllist(citysoup, uct[1], uct[2])
return urllist
def store_raw(self, rest_list):
for r in rest_list:
splt = r[0].split('/')
a = HTMLReader('http://www.allmenus.com'+r[0])
restsoup = a.html_to_soup()
with open("raw_data/"+splt[1]+"_"+splt[2]+"_"+splt[3]+".html", "w") as f:
print("Writing "+splt[1]+"_"+splt[2]+"_"+splt[3]+".html")
f.write(restsoup.prettify())
def build_database(self):
l = []
for filenm in os.listdir('raw_data/'):
if filenm != '.DS_Store':
tmp = Restaurant(filenm).db_obj()
if (len(tmp['menu']) >= 1) and (tmp['latitude'] != 9999) and (tmp['type'] != ""):
l.append(tmp)
print(len(l))
'''
conn = pymongo.MongoClient()
db = conn.rdata
for i in l:
print("Insert "+i['name'])
db.restaurants.insert_one(i)
'''
self.final_rlist = l
class HTMLReader(object):
def __init__(self, uct):
self.url = uct
def html_to_soup(self):
html = urllib.request.urlopen(self.url).read()
soup = BeautifulSoup(html, "lxml")
return soup
def soup_to_urllist(self, soup, cityname, statename):
tmp = []
match = '/'+statename
for u in soup.findAll("a", href=True):
if (u['href'])[:3] == match:
tmp.append((u['href'], cityname, statename))
return tmp
def build_info(self):
pass
def build_menu(self):
pass
class Restaurant(object):
def __init__(self, filenm):
soup = BeautifulSoup(open('raw_data/'+filenm, 'r'), "lxml")
self.name = soup.find("h1", {"itemprop": "name"}).string.strip()
self.street = soup.find("span", {"itemprop": "streetAddress"}).string.strip()
self.city = soup.find("span", {"itemprop": "addressLocality"}).string.strip()
self.state = soup.find("span", {"itemprop": "addressRegion"}).string.strip()
self.zip = soup.find("span", {"itemprop": "postalCode"}).string.strip()
self.lat = str(soup.find("meta", {"itemprop": "latitude"}))
self.lng = str(soup.find("meta", {"itemprop": "longitude"}))
self.ratings = soup.findAll(attrs = {"itemprop": "ratingValue"})
self.msoup = soup.findAll("li")
def db_obj(self):
r={}
l=[]
r['name'] = self.name
r['street'] = self.street
r['city'] = self.city
r['state'] = self.state
r['zip'] = self.zip
# Add geolocation information
try:
r['latitude'] = float(re.findall(r'"(.*?)"', self.lat)[0])
r['longitude'] = float(re.findall(r'"(.*?)"', self.lng)[0])
except ValueError:
r['latitude'] = float(9999.000)
r['longitude'] = float(9999.000)
#Create a city group for suburb city names
a = self.city
if a in ['Dunwoody', 'East Point', 'Sandy Springs']:
r['city_group'] = 'Atlanta'
elif a in ['Alsip', 'Cicero', 'Evergreen Park', 'Harwood Heights', 'Elmwood Park']:
r['city_group'] = 'Chicago'
elif a in ['Hollywood', 'West Hollywood']:
r['city_group'] = 'Los Angeles'
elif a in ['Greenfield', 'Wauwatosa', 'West Allis']:
r['city_group'] = 'Milwaukee'
elif a in ['South Austin']:
r['city_group'] = 'Austin'
else:
r['city_group'] = a
# Take an average of ratings, or else assign a 2.0
if len(self.ratings) == 0:
r['avg_rating'] = 0.0
else:
num=0
count=0
for i in self.ratings:
num=num+float(i['content'])
count=count+1
r['avg_rating'] = num/float(count)
# Add a blank to cuisine type is missing data
if self.msoup[0].string:
r['type'] = self.msoup[0].string.strip()
else:
r['type'] = ""
# Create a second consolidated cusine type
if self.msoup[0].string:
a = self.msoup[0].string.strip()
if a in ['Ethiopian']:
r['type_2'] = 'African'
elif a in ['Hawaiian', 'Local/Organic', 'American (New)']:
r['type_2'] = 'American'
elif a in ['Breakfast', 'Bakery & Pastries', 'Coffee & Tea']:
r['type_2'] = 'Bakery, Breakfast & Coffee'
elif a in ['Gastropub', 'Pub Food']:
r['type_2'] = 'Bar Food'
elif a in ['Hot Dogs', 'Burgers']:
r['type_2'] = 'Burgers & Hot Dogs'
elif a in ['Dominican', 'Jamaican']:
r['type_2'] = 'Caribbean'
elif a in ['Asian Fusion', 'Taiwanese']:
r['type_2'] = 'Chinese'
elif a in ['Sandwiches', 'Deli Food']:
r['type_2'] = 'Deli & Sandwiches'
elif a in ['Ice Cream', 'Crepes']:
r['type_2'] = 'Desserts'
elif a in ['Austrian', 'British', 'Eastern European', 'Eclectic & International', 'Spanish', 'French', 'Belgian', 'Irish', 'German', 'Polish']:
r['type_2'] = 'European'
elif a in ['Puerto Rican', 'Brazilian', 'Central American']:
r['type_2'] = 'Latin American'
elif a in ['Greek']:
r['type_2'] = 'Mediterranean'
elif a in ['Sushi', 'Seafood']:
r['type_2'] = 'Seafood & Sushi'
elif a in ['Soul Food', 'Cajun & Creole']:
r['type_2'] = 'Southern'
elif a in ['Tex-Mex']:
r['type_2'] = 'Southwestern'
elif a in ['Chicago Grill']:
r['type_2'] = 'Steak'
elif a in ['Burmese', 'Malaysian']:
r['type_2'] = 'Thai'
elif a in ['Noodles']:
r['type_2'] = 'Vietnamese'
elif a in ['Pakistani']:
r['type_2'] = 'Middle Eastern'
elif a in ['Salads']:
r['type_2'] = 'Vegetarian'
else:
r['type_2'] = a
else:
r['type_2'] = ""
# Create menu, add blanks if either price or description fields are missing
for i in self.msoup:
m={}
if i.find("span","name") or i.find("span","price") or i.find("p", "description"):
if i.find("span","name"):
m["item"] = i.find("span","name").string.strip()
else:
m["item"] = ""
# For prices, set $0.00 to blanks and take the first price in a range of prices
if i.find("span","price"):
tmppr = i.find("span","price").string.strip()
tmppr = re.sub('[$]', '', tmppr)
print(tmppr)
if '-' not in tmppr:
if tmppr == "" or tmppr == " ":
m["price"] = ""
elif float(tmppr) == 0:
m["price"] = ""
else:
m["price"] = float(tmppr)
else:
if tmppr[0:tmppr.find('-')] == "" or tmppr[0:tmppr.find('-')] == " ":
m["price"] = ""
else:
m["price"] = float(tmppr[0:tmppr.find('-')])
else:
m["price"] = ""
if i.find("p","description"):
m["description"] = i.find("p","description").string.strip()
else:
m["description"] = ""
l.append(m)
r['menu'] = l
return r
| |
"""The tests for the automation component."""
import unittest
from homeassistant.bootstrap import _setup_component
import homeassistant.components.automation as automation
from homeassistant.const import ATTR_ENTITY_ID
from tests.common import get_test_home_assistant
class TestAutomation(unittest.TestCase):
"""Test the event automation."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.components.append('group')
self.calls = []
def record_call(service):
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_service_data_not_a_dict(self):
"""Test service data not dict."""
assert not _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'data': 100,
}
}
})
def test_service_specify_data(self):
"""Test service data."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'data_template': {
'some': '{{ trigger.platform }} - '
'{{ trigger.event.event_type }}'
},
}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('event - test_event', self.calls[0].data['some'])
def test_service_specify_entity_id(self):
"""Test service data."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'entity_id': 'hello.world'
}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(['hello.world'],
self.calls[0].data.get(ATTR_ENTITY_ID))
def test_service_specify_entity_id_list(self):
"""Test service data."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'entity_id': ['hello.world', 'hello.world2']
}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(['hello.world', 'hello.world2'],
self.calls[0].data.get(ATTR_ENTITY_ID))
def test_two_triggers(self):
"""Test triggers."""
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'event',
'event_type': 'test_event',
},
{
'platform': 'state',
'entity_id': 'test.entity',
}
],
'action': {
'service': 'test.automation',
}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set('test.entity', 'hello')
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
def test_two_conditions_with_and(self):
"""Test two and conditions."""
entity_id = 'test.entity'
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'event',
'event_type': 'test_event',
},
],
'condition': [
{
'condition': 'state',
'entity_id': entity_id,
'state': '100'
},
{
'condition': 'numeric_state',
'entity_id': entity_id,
'below': 150
}
],
'action': {
'service': 'test.automation',
}
}
})
self.hass.states.set(entity_id, 100)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 101)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 151)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_two_conditions_with_or(self):
"""Test two or conditions."""
entity_id = 'test.entity'
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'event',
'event_type': 'test_event',
},
],
'condition_type': 'OR',
'condition': [
{
'platform': 'state',
'entity_id': entity_id,
'state': '200'
},
{
'platform': 'numeric_state',
'entity_id': entity_id,
'below': 150
}
],
'action': {
'service': 'test.automation',
}
}
})
self.hass.states.set(entity_id, 200)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 100)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
self.hass.states.set(entity_id, 250)
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
def test_using_trigger_as_condition(self):
"""Test triggers as condition."""
entity_id = 'test.entity'
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'state',
'entity_id': entity_id,
'from': '120',
'state': '100'
},
{
'platform': 'numeric_state',
'entity_id': entity_id,
'below': 150
}
],
'condition': 'use_trigger_values',
'action': {
'service': 'test.automation',
}
}
})
self.hass.states.set(entity_id, 100)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 120)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 100)
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
self.hass.states.set(entity_id, 151)
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
def test_using_trigger_as_condition_with_invalid_condition(self):
"""Event is not a valid condition."""
entity_id = 'test.entity'
self.hass.states.set(entity_id, 100)
assert _setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'event',
'event_type': 'test_event',
},
{
'platform': 'numeric_state',
'entity_id': entity_id,
'below': 150
}
],
'condition': 'use_trigger_values',
'action': {
'service': 'test.automation',
}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_automation_list_setting(self):
"""Event is not a valid condition."""
self.assertTrue(_setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: [{
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
}
}, {
'trigger': {
'platform': 'event',
'event_type': 'test_event_2',
},
'action': {
'service': 'test.automation',
}
}]
}))
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.bus.fire('test_event_2')
self.hass.pool.block_till_done()
self.assertEqual(2, len(self.calls))
def test_automation_calling_two_actions(self):
"""Test if we can call two actions from automation definition."""
self.assertTrue(_setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': [{
'service': 'test.automation',
'data': {'position': 0},
}, {
'service': 'test.automation',
'data': {'position': 1},
}],
}
}))
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
assert len(self.calls) == 2
assert self.calls[0].data['position'] == 0
assert self.calls[1].data['position'] == 1
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ExpressRouteCircuitsOperations(object):
"""ExpressRouteCircuitsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2016-09-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-09-01"
self.config = config
def _delete_initial(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'}
def get(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of express route circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuit or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuit or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'}
def _create_or_update_initial(
self, resource_group_name, circuit_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ExpressRouteCircuit')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, circuit_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to the create or update express
route circuit operation.
:type parameters:
~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuit
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ExpressRouteCircuit or
ClientRawResponse<ExpressRouteCircuit> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuit]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuit]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'}
def _list_arp_table_initial(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_arp_table.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_arp_table(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the currently advertised ARP table associated with the express
route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitsArpTableListResult or
ClientRawResponse<ExpressRouteCircuitsArpTableListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitsArpTableListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitsArpTableListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_arp_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'}
def _list_routes_table_initial(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_routes_table.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_routes_table(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the currently advertised routes table associated with the express
route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitsRoutesTableListResult or
ClientRawResponse<ExpressRouteCircuitsRoutesTableListResult> if
raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitsRoutesTableListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitsRoutesTableListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_routes_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'}
def _list_routes_table_summary_initial(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_routes_table_summary.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_routes_table_summary(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the currently advertised routes table summary associated with the
express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitsRoutesTableSummaryListResult or
ClientRawResponse<ExpressRouteCircuitsRoutesTableSummaryListResult> if
raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'}
def get_stats(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuitStats or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitStats or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_stats.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitStats', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats'}
def get_peering_stats(
self, resource_group_name, circuit_name, peering_name, custom_headers=None, raw=False, **operation_config):
"""Gets all stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuitStats or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitStats or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_peering_stats.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitStats', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_peering_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the express route circuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCircuit
:rtype:
~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitPaged[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuit]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the express route circuits in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCircuit
:rtype:
~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitPaged[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuit]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits'}
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
from oslo.config import cfg
from neutron.agent import rpc as agent_rpc
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import topics
from neutron import context
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task
from neutron.plugins.common import constants
from neutron.services.loadbalancer.agent import agent_api
LOG = logging.getLogger(__name__)
OPTS = [
cfg.MultiStrOpt(
'device_driver',
default=['neutron.services.loadbalancer.drivers'
'.haproxy.namespace_driver.HaproxyNSDriver'],
help=_('Drivers used to manage loadbalancing devices'),
),
]
class DeviceNotFoundOnAgent(n_exc.NotFound):
msg = _('Unknown device with pool_id %(pool_id)s')
class LbaasAgentManager(periodic_task.PeriodicTasks):
RPC_API_VERSION = '2.0'
# history
# 1.0 Initial version
# 1.1 Support agent_updated call
# 2.0 Generic API for agent based drivers
# - modify/reload/destroy_pool methods were removed;
# - added methods to handle create/update/delete for every lbaas
# object individually;
def __init__(self, conf):
self.conf = conf
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_api.LbaasAgentApi(
topics.LOADBALANCER_PLUGIN,
self.context,
self.conf.host
)
self._load_drivers()
self.agent_state = {
'binary': 'neutron-lbaas-agent',
'host': conf.host,
'topic': topics.LOADBALANCER_AGENT,
'configurations': {'device_drivers': self.device_drivers.keys()},
'agent_type': n_const.AGENT_TYPE_LOADBALANCER,
'start_flag': True}
self.admin_state_up = True
self._setup_state_rpc()
self.needs_resync = False
# pool_id->device_driver_name mapping used to store known instances
self.instance_mapping = {}
def _load_drivers(self):
self.device_drivers = {}
for driver in self.conf.device_driver:
try:
driver_inst = importutils.import_object(
driver,
self.conf,
self.plugin_rpc
)
except ImportError:
msg = _('Error importing loadbalancer device driver: %s')
raise SystemExit(msg % driver)
driver_name = driver_inst.get_name()
if driver_name not in self.device_drivers:
self.device_drivers[driver_name] = driver_inst
else:
msg = _('Multiple device drivers with the same name found: %s')
raise SystemExit(msg % driver_name)
def _setup_state_rpc(self):
self.state_rpc = agent_rpc.PluginReportStateAPI(
topics.LOADBALANCER_PLUGIN)
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
instance_count = len(self.instance_mapping)
self.agent_state['configurations']['instances'] = instance_count
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_("Failed reporting state!"))
def initialize_service_hook(self, started_by):
self.sync_state()
@periodic_task.periodic_task
def periodic_resync(self, context):
if self.needs_resync:
self.needs_resync = False
self.sync_state()
@periodic_task.periodic_task(spacing=6)
def collect_stats(self, context):
for pool_id, driver_name in self.instance_mapping.items():
driver = self.device_drivers[driver_name]
try:
stats = driver.get_stats(pool_id)
if stats:
self.plugin_rpc.update_pool_stats(pool_id, stats)
except Exception:
LOG.exception(_('Error upating stats'))
self.needs_resync = True
def sync_state(self):
known_instances = set(self.instance_mapping.keys())
try:
ready_instances = set(self.plugin_rpc.get_ready_devices())
for deleted_id in known_instances - ready_instances:
self._destroy_pool(deleted_id)
for pool_id in ready_instances:
self._reload_pool(pool_id)
except Exception:
LOG.exception(_('Unable to retrieve ready devices'))
self.needs_resync = True
self.remove_orphans()
def _get_driver(self, pool_id):
if pool_id not in self.instance_mapping:
raise DeviceNotFoundOnAgent(pool_id=pool_id)
driver_name = self.instance_mapping[pool_id]
return self.device_drivers[driver_name]
def _reload_pool(self, pool_id):
try:
logical_config = self.plugin_rpc.get_logical_device(pool_id)
driver_name = logical_config['driver']
if driver_name not in self.device_drivers:
LOG.error(_('No device driver '
'on agent: %s.'), driver_name)
self.plugin_rpc.update_status(
'pool', pool_id, constants.ERROR)
return
self.device_drivers[driver_name].deploy_instance(logical_config)
self.instance_mapping[pool_id] = driver_name
self.plugin_rpc.pool_deployed(pool_id)
except Exception:
LOG.exception(_('Unable to deploy instance for pool: %s'), pool_id)
self.needs_resync = True
def _destroy_pool(self, pool_id):
driver = self._get_driver(pool_id)
try:
driver.undeploy_instance(pool_id)
del self.instance_mapping[pool_id]
self.plugin_rpc.pool_destroyed(pool_id)
except Exception:
LOG.exception(_('Unable to destroy device for pool: %s'), pool_id)
self.needs_resync = True
def remove_orphans(self):
for driver_name in self.device_drivers:
pool_ids = [pool_id for pool_id in self.instance_mapping
if self.instance_mapping[pool_id] == driver_name]
try:
self.device_drivers[driver_name].remove_orphans(pool_ids)
except NotImplementedError:
pass # Not all drivers will support this
def _handle_failed_driver_call(self, operation, obj_type, obj_id, driver):
LOG.exception(_('%(operation)s %(obj)s %(id)s failed on device driver '
'%(driver)s'),
{'operation': operation.capitalize(), 'obj': obj_type,
'id': obj_id, 'driver': driver})
self.plugin_rpc.update_status(obj_type, obj_id, constants.ERROR)
def create_vip(self, context, vip):
driver = self._get_driver(vip['pool_id'])
try:
driver.create_vip(vip)
except Exception:
self._handle_failed_driver_call('create', 'vip', vip['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE)
def update_vip(self, context, old_vip, vip):
driver = self._get_driver(vip['pool_id'])
try:
driver.update_vip(old_vip, vip)
except Exception:
self._handle_failed_driver_call('update', 'vip', vip['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE)
def delete_vip(self, context, vip):
driver = self._get_driver(vip['pool_id'])
driver.delete_vip(vip)
def create_pool(self, context, pool, driver_name):
if driver_name not in self.device_drivers:
LOG.error(_('No device driver on agent: %s.'), driver_name)
self.plugin_rpc.update_status('pool', pool['id'], constants.ERROR)
return
driver = self.device_drivers[driver_name]
try:
driver.create_pool(pool)
except Exception:
self._handle_failed_driver_call('create', 'pool', pool['id'],
driver.get_name())
else:
self.instance_mapping[pool['id']] = driver_name
self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE)
def update_pool(self, context, old_pool, pool):
driver = self._get_driver(pool['id'])
try:
driver.update_pool(old_pool, pool)
except Exception:
self._handle_failed_driver_call('update', 'pool', pool['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE)
def delete_pool(self, context, pool):
driver = self._get_driver(pool['id'])
driver.delete_pool(pool)
del self.instance_mapping[pool['id']]
def create_member(self, context, member):
driver = self._get_driver(member['pool_id'])
try:
driver.create_member(member)
except Exception:
self._handle_failed_driver_call('create', 'member', member['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('member', member['id'],
constants.ACTIVE)
def update_member(self, context, old_member, member):
driver = self._get_driver(member['pool_id'])
try:
driver.update_member(old_member, member)
except Exception:
self._handle_failed_driver_call('update', 'member', member['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('member', member['id'],
constants.ACTIVE)
def delete_member(self, context, member):
driver = self._get_driver(member['pool_id'])
driver.delete_member(member)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
driver = self._get_driver(pool_id)
assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']}
try:
driver.create_pool_health_monitor(health_monitor, pool_id)
except Exception:
self._handle_failed_driver_call(
'create', 'health_monitor', assoc_id, driver.get_name())
else:
self.plugin_rpc.update_status(
'health_monitor', assoc_id, constants.ACTIVE)
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
driver = self._get_driver(pool_id)
assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']}
try:
driver.update_pool_health_monitor(old_health_monitor,
health_monitor,
pool_id)
except Exception:
self._handle_failed_driver_call(
'update', 'health_monitor', assoc_id, driver.get_name())
else:
self.plugin_rpc.update_status(
'health_monitor', assoc_id, constants.ACTIVE)
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
driver = self._get_driver(pool_id)
driver.delete_pool_health_monitor(health_monitor, pool_id)
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
if payload['admin_state_up'] != self.admin_state_up:
self.admin_state_up = payload['admin_state_up']
if self.admin_state_up:
self.needs_resync = True
else:
for pool_id in self.instance_mapping.keys():
LOG.info(_("Destroying pool %s due to agent disabling"),
pool_id)
self._destroy_pool(pool_id)
LOG.info(_("Agent_updated by server side %s!"), payload)
| |
import copy
import random
from moneyed import Money
from django.test import RequestFactory
from rest_framework import status
from rest_framework.reverse import reverse
from minicash.core.models import Asset, Record, Tag
from minicash.core.serializers import (
AssetSerializer,
CreateRecordSerializer,
ReadRecordSerializer,
TagSerializer,
)
from minicash.utils.testing import RESTTestCase
from .factories import AssetFactory, RecordFactory, TagFactory
class RecordsDataValidatorMixin:
def _compare_records_data(self, data_in, data_out):
self.assertIsNotNone(data_in)
self.assertIsNotNone(data_out)
dt_in, dt_out = dict(data_in), dict(data_out)
# pk's are not equal (None vs. PK from database)
data_in_pk, data_out_pk = dt_in.pop('pk'), dt_out.pop('pk')
self.assertNotEqual(data_in_pk, data_out_pk)
if dt_in.get('tags_names', []) != []:
dt_in.pop('tags')
dt_out.pop('tags')
self.assertEqual(dt_in, dt_out)
# ensure internal structure via ORM
rec_internal = Record.objects.get(pk=data_out_pk)
# test versus Record Read/List serializer
ser_internal = ReadRecordSerializer(rec_internal)
data_internal = ser_internal.data
dt_out = data_out.copy()
dt_out.pop('tags_names')
self.assertEqual(dt_out, data_internal)
class RecordsAPICRUDTest(RecordsDataValidatorMixin, RESTTestCase):
def test_smoke(self):
pass
def test_list_smoke(self):
self.assert_success(self.jget(reverse('records-list')))
def test_list_data(self):
"""Verify the amount of fetched records"""
RecordFactory.create_batch(10, owner=self.owner)
res = self.jget(reverse('records-list'))
self.assert_success(res)
pagination_details, records_data = res.data
self.assertEqual(10, len(records_data))
self.assertEqual(10, pagination_details['count'])
def test_single_details(self):
"""Verify JSON representation of a single record"""
record = RecordFactory.create(owner=self.owner)
res = self.jget(reverse('records-detail', args=[record.pk]))
currency = record.delta.currency
self.assert_success(res)
data = res.data
self.assertEqual(record.pk, data['pk'])
self.assertAlmostEqual(record.delta, Money(data['delta'], currency), places=2)
self.assertEqual(record.mode, data['mode'])
self.assertEqual(record.description, data['description'])
self.assertEqual(list(record.tags.all().values_list('pk', flat=True)),
data['tags'])
self.assertEqual(record.asset_to.pk, data['asset_to'])
self.assertEqual(record.asset_from.pk, data['asset_from'])
self.assertEqual(record.extra, data['extra'])
def test_single_attributes(self):
"""Test which attributes of a Record instance are serialized"""
record = RecordFactory.create(owner=self.owner)
res = self.jget(reverse('records-detail', args=[record.pk]))
self.assert_success(res)
attributes = frozenset([
'pk',
'asset_from', 'asset_to', 'created_dt', 'delta',
'description', 'extra', 'mode', 'tags',
])
res_attributes = frozenset(res.data.keys())
self.assertEqual(attributes, res_attributes)
def test_create_full(self):
asset_to = AssetFactory.create(owner=self.owner)
asset_from = AssetFactory.create(owner=self.owner)
record = RecordFactory.build(asset_to=asset_to, asset_from=asset_from, owner=self.owner)
serializer = CreateRecordSerializer(record)
# add a list of tags
tags = TagFactory.build_batch(3)
data_in = serializer.data
data_in['tags_names'] = list(set(tag.name for tag in tags))
res = self.jpost(reverse('records-list'), data_in)
self.assert_created(res)
self._compare_records_data(data_in, res.data)
def test_create_asset_partial(self):
asset_to = AssetFactory.create(owner=self.owner)
record = RecordFactory.build(asset_to=asset_to, asset_from=None, owner=self.owner)
serializer = CreateRecordSerializer(record)
data_in = serializer.data
res = self.jpost(reverse('records-list'), data_in)
self.assert_created(res)
self._compare_records_data(data_in, res.data)
def test_update(self):
record = RecordFactory.create(owner=self.owner)
record.delta = random.randint(0, 1000)
serializer = CreateRecordSerializer(record)
res = self.jpatch(reverse('records-detail', args=[record.pk]), serializer.data)
self.assert_updated(res)
def test_create_empty_data_with_context(self):
req = RequestFactory()
req.user = self.owner
record_data = {}
serializer = CreateRecordSerializer(data=record_data, context={'request': req})
self.assertFalse(serializer.is_valid())
def test_create_invalid_mode(self):
asset_to = AssetFactory.create(owner=self.owner)
asset_from = AssetFactory.create(owner=self.owner)
record = RecordFactory.build(asset_to=asset_to, mode=Record.TRANSFER, owner=self.owner)
res = self.jpost(reverse('records-list'), CreateRecordSerializer(record).data)
self.assert_bad(res)
record = RecordFactory.build(asset_to=asset_to, mode=Record.EXPENSE, owner=self.owner)
res = self.jpost(reverse('records-list'), CreateRecordSerializer(record).data)
self.assert_bad(res)
record = RecordFactory.build(asset_from=asset_from, mode=Record.TRANSFER, owner=self.owner)
res = self.jpost(reverse('records-list'), CreateRecordSerializer(record).data)
self.assert_bad(res)
record = RecordFactory.build(asset_from=asset_from, mode=Record.INCOME, owner=self.owner)
res = self.jpost(reverse('records-list'), CreateRecordSerializer(record).data)
self.assert_bad(res)
def test_delete(self):
records = RecordFactory.create_batch(5, owner=self.owner)
res = self.delete(reverse('records-detail', args=[records[0].pk]))
self.assert_deleted(res)
self.assertEqual(4, Record.objects.all().count())
class RecordAPIBulkCRUDTest(RecordsDataValidatorMixin, RESTTestCase):
def test_mass_create(self):
asset_to = AssetFactory.create(owner=self.owner)
asset_from = AssetFactory.create(owner=self.owner)
record = RecordFactory.build(asset_to=asset_to, asset_from=asset_from, owner=self.owner)
serializer = CreateRecordSerializer(record)
# add a list of tags
tags = TagFactory.build_batch(3)
data_in_1 = serializer.data
data_in_1['tags_names'] = [tag.name for tag in tags]
data_in_2 = copy.deepcopy(data_in_1)
data_in_2['delta'] = '200.000'
res = self.jpost(reverse('records-list'), [data_in_1, data_in_2])
self.assert_created(res)
self._compare_records_data(data_in_1, res.data[0])
self._compare_records_data(data_in_2, res.data[1])
def test_mass_create_one_invalid(self):
asset_to = AssetFactory.create(owner=self.owner)
asset_from = AssetFactory.create(owner=self.owner)
record = RecordFactory.build(asset_to=asset_to, asset_from=asset_from, owner=self.owner)
serializer = CreateRecordSerializer(record)
# add a list of tags
tags = TagFactory.build_batch(3)
data_in_1 = serializer.data
data_in_1['tags_names'] = [tag.name for tag in tags]
data_in_2 = copy.deepcopy(data_in_1)
del data_in_2['delta']
res = self.jpost(reverse('records-list'), [data_in_1, data_in_2])
self.assert_bad(res)
self.assertEqual({}, res.data[0])
self.assertEqual(['delta'], list(res.data[1].keys()))
def test_mass_delete(self):
records = RecordFactory.create_batch(9, owner=self.owner)
records_pks = [r.pk for r in records[:5]]
res = self.jpost(reverse('records-mass-delete'), {'pks': records_pks})
self.assertEqual(records_pks[:5], res.data['pks'])
self.assertEqual(4, Record.objects.for_owner(self.owner).count())
class AssetAPITest(RESTTestCase):
def test_smoke(self):
pass
def test_list_smoke(self):
self.assert_success(self.jget(reverse('assets-list')))
def test_list_data(self):
"""Verify the amount of fetched records"""
AssetFactory.create_batch(5, owner=self.owner)
res = self.jget(reverse('assets-list'))
self.assert_success(res)
self.assertNotEqual(0, len(res.data))
self.assertGreaterEqual(5, len(res.data))
def test_create(self):
asset = AssetFactory.build(owner=self.owner)
serializer = AssetSerializer(asset)
data_in = serializer.data
res = self.jpost(reverse('assets-list'), data_in)
self.assert_created(res)
data_out = res.data
# pk's are not equal (None vs. PK from database)
data_in_pk, data_out_pk = data_in.pop('pk'), data_out.pop('pk')
self.assertNotEqual(data_in_pk, data_out_pk)
# the rest data is equal
self.assertEqual(data_in, data_out)
# ensure internal structure via ORM
asset_internal = Asset.objects.get(pk=data_out_pk)
ser_internal = AssetSerializer(asset_internal)
data_internal = ser_internal.data
data_internal.pop('pk')
self.assertEqual(data_out, data_internal)
# ensure initial balance value
self.assertEqual(asset_internal.balance, asset_internal.initial_balance)
def test_update(self):
asset = AssetFactory.create(owner=self.owner)
serializer = AssetSerializer(asset)
data_in = serializer.data
self.assertIn('balance', data_in)
res = self.jpatch(reverse('assets-detail', args=[asset.pk]), data_in)
data_out = res.data
self.assertNotIn('balance', data_out)
self.assertEqual(data_in['balance'], str(Asset.objects.get(pk=data_in['pk']).balance.amount))
def test_delete_empty(self):
asset = AssetFactory.create(owner=self.owner)
self.delete(reverse('assets-detail', args=[asset.pk]))
self.assertFalse(Asset.objects.filter(pk=asset.pk).exists())
def test_delete_with_records(self):
asset = AssetFactory.create(owner=self.owner)
RecordFactory.create(asset_from=asset, mode=Record.INCOME, owner=self.owner)
res = self.delete(reverse('assets-detail', args=[asset.pk]))
self.assertEqual(status.HTTP_403_FORBIDDEN, res.status_code)
self.assertTrue(Asset.objects.filter(pk=asset.pk).exists())
class TagsAPITest(RESTTestCase):
def test_smoke(self):
pass
def test_list_smoke(self):
self.assert_success(self.jget(reverse('tags-list')))
def test_create(self):
tag = TagFactory.build(owner=self.owner)
serializer = TagSerializer(tag)
data_in = serializer.data
res = self.jpost(reverse('tags-list'), data_in)
self.assert_created(res)
data_out = res.data
# pk's are not equal (None vs. PK from database)
data_in_pk, data_out_pk = data_in.pop('pk'), data_out.pop('pk')
self.assertNotEqual(data_in_pk, data_out_pk)
# the rest data is equal
self.assertEqual(data_in, data_out)
# ensure internal structure via ORM
tag_internal = Tag.objects.get(pk=data_out_pk)
ser_internal = TagSerializer(tag_internal)
data_internal = ser_internal.data
data_internal.pop('pk')
self.assertEqual(data_out, data_internal)
def test_create_invalid_name(self):
tag = TagFactory.build(name='abc,cde', owner=self.owner)
res = self.jpost(reverse('tags-list'), TagSerializer(tag).data)
self.assert_bad(res)
def test_delete(self):
tags = TagFactory.create_batch(5, owner=self.owner)
tags_count = Tag.objects.for_owner(self.owner).count()
res = self.delete(reverse('tags-detail', args=[tags[0].pk]))
self.assert_deleted(res)
self.assertEqual(tags_count - 1, Tag.objects.all().count())
def test_mass_delete(self):
TAGS_TO_DELETE = 3
TagFactory.create_batch(10, owner=self.owner)
tags_count = Tag.objects.for_owner(self.owner).count()
tags_pks = Tag.objects.for_owner(self.owner).values_list('pk', flat=True)[:TAGS_TO_DELETE]
res = self.jpost(reverse('tags-mass-delete'), {'pks': tags_pks})
self.assertEqual(tags_pks[:TAGS_TO_DELETE], res.data['pks'])
self.assertEqual(tags_count - TAGS_TO_DELETE,
Tag.objects.for_owner(self.owner).count())
| |
# CREATED:2015-02-17 14:41:28 by Brian McFee <brian.mcfee@nyu.edu>
# this function is lifted wholesale from matploblib v1.4.2,
# and modified so that images are stored explicitly under the tests path
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import gc
import os
import sys
import shutil
import warnings
import unittest
import nose
import numpy as np
import matplotlib.tests
import matplotlib.units
from matplotlib import cbook
from matplotlib import ticker
from matplotlib import pyplot as plt
from matplotlib import ft2font
from matplotlib.testing.noseclasses import KnownFailureTest, \
KnownFailureDidNotFailTest, ImageComparisonFailure
from matplotlib.testing.compare import comparable_formats, compare_images, \
make_test_filename
def knownfailureif(fail_condition, msg=None, known_exception_class=None ):
"""
Assume a will fail if *fail_condition* is True. *fail_condition*
may also be False or the string 'indeterminate'.
*msg* is the error message displayed for the test.
If *known_exception_class* is not None, the failure is only known
if the exception is an instance of this class. (Default = None)
"""
# based on numpy.testing.dec.knownfailureif
if msg is None:
msg = 'Test known to fail'
def known_fail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def failer(*args, **kwargs):
try:
# Always run the test (to generate images).
result = f(*args, **kwargs)
except Exception as err:
if fail_condition:
if known_exception_class is not None:
if not isinstance(err,known_exception_class):
# This is not the expected exception
raise
# (Keep the next ultra-long comment so in shows in console.)
raise KnownFailureTest(msg) # An error here when running nose means that you don't have the matplotlib.testing.noseclasses:KnownFailure plugin in use.
else:
raise
if fail_condition and fail_condition != 'indeterminate':
raise KnownFailureDidNotFailTest(msg)
return result
return nose.tools.make_decorator(f)(failer)
return known_fail_decorator
def _do_cleanup(original_units_registry):
plt.close('all')
gc.collect()
matplotlib.tests.setup()
matplotlib.units.registry.clear()
matplotlib.units.registry.update(original_units_registry)
warnings.resetwarnings() # reset any warning filters set in tests
class CleanupTest(object):
@classmethod
def setup_class(cls):
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def teardown_class(cls):
_do_cleanup(cls.original_units_registry)
def test(self):
self._func()
class CleanupTestCase(unittest.TestCase):
'''A wrapper for unittest.TestCase that includes cleanup operations'''
@classmethod
def setUpClass(cls):
import matplotlib.units
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def tearDownClass(cls):
_do_cleanup(cls.original_units_registry)
def cleanup(func):
@functools.wraps(func)
def wrapped_function(*args, **kwargs):
original_units_registry = matplotlib.units.registry.copy()
try:
func(*args, **kwargs)
finally:
_do_cleanup(original_units_registry)
return wrapped_function
def check_freetype_version(ver):
if ver is None:
return True
from distutils import version
if isinstance(ver, six.string_types):
ver = (ver, ver)
ver = [version.StrictVersion(x) for x in ver]
found = version.StrictVersion(ft2font.__freetype_version__)
return found >= ver[0] and found <= ver[1]
class ImageComparisonTest(CleanupTest):
@classmethod
def setup_class(cls):
CleanupTest.setup_class()
cls._func()
@staticmethod
def remove_text(figure):
figure.suptitle("")
for ax in figure.get_axes():
ax.set_title("")
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
try:
ax.zaxis.set_major_formatter(ticker.NullFormatter())
ax.zaxis.set_minor_formatter(ticker.NullFormatter())
except AttributeError:
pass
def test(self):
baseline_dir, result_dir = _image_directories(self._func)
for fignum, baseline in zip(plt.get_fignums(), self._baseline_images):
for extension in self._extensions:
will_fail = not extension in comparable_formats()
if will_fail:
fail_msg = 'Cannot compare %s files on this system' % extension
else:
fail_msg = 'No failure expected'
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.' + extension
if extension == 'eps' and not os.path.exists(orig_expected_fname):
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.pdf'
expected_fname = make_test_filename(os.path.join(
result_dir, os.path.basename(orig_expected_fname)), 'expected')
actual_fname = os.path.join(result_dir, baseline) + '.' + extension
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
will_fail = True
fail_msg = 'Do not have baseline image %s' % expected_fname
@knownfailureif(
will_fail, fail_msg,
known_exception_class=ImageComparisonFailure)
def do_test():
figure = plt.figure(fignum)
if self._remove_text:
self.remove_text(figure)
figure.savefig(actual_fname, **self._savefig_kwarg)
err = compare_images(expected_fname, actual_fname,
self._tol, in_decorator=True)
try:
if not os.path.exists(expected_fname):
raise ImageComparisonFailure(
'image does not exist: %s' % expected_fname)
if err:
raise ImageComparisonFailure(
'images not close: %(actual)s vs. %(expected)s '
'(RMS %(rms).3f)'%err)
except ImageComparisonFailure:
if not check_freetype_version(self._freetype_version):
raise KnownFailureTest(
"Mismatched version of freetype. Test requires '%s', you have '%s'" %
(self._freetype_version, ft2font.__freetype_version__))
raise
yield (do_test,)
def image_comparison(baseline_images=None, extensions=None, tol=13,
freetype_version=None, remove_text=False,
savefig_kwarg=None):
"""
call signature::
image_comparison(baseline_images=['my_figure'], extensions=None)
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImageComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*extensions*: [ None | list ]
If *None*, default to all supported extensions.
Otherwise, a list of extensions to test. For example ['png','pdf'].
*tol*: (default 13)
The RMS threshold above which the test is considered failed.
*freetype_version*: str or tuple
The expected freetype version or range of versions for this
test to pass.
*remove_text*: bool
Remove the title and tick text from the figure before
comparison. This does not remove other, more deliberate,
text, such as legends and annotations.
*savefig_kwarg*: dict
Optional arguments that are passed to the savefig method.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions is None:
# default extensions to test
extensions = ['png', 'pdf', 'svg']
if savefig_kwarg is None:
#default no kwargs to savefig
savefig_kwarg = dict()
def compare_images_decorator(func):
# We want to run the setup function (the actual test function
# that generates the figure objects) only once for each type
# of output file. The only way to achieve this with nose
# appears to be to create a test class with "setup_class" and
# "teardown_class" methods. Creating a class instance doesn't
# work, so we use type() to actually create a class and fill
# it with the appropriate methods.
name = func.__name__
# For nose 1.0, we need to rename the test function to
# something without the word "test", or it will be run as
# well, outside of the context of our image comparison test
# generator.
func = staticmethod(func)
func.__get__(1).__name__ = str('_private')
new_class = type(
name,
(ImageComparisonTest,),
{'_func': func,
'_baseline_images': baseline_images,
'_extensions': extensions,
'_tol': tol,
'_freetype_version': freetype_version,
'_remove_text': remove_text,
'_savefig_kwarg': savefig_kwarg})
return new_class
return compare_images_decorator
def _image_directories(func):
"""
Compute the baseline and result image directories for testing *func*.
Create the result directory if it doesn't exist.
"""
module_name = func.__module__
# mods = module_name.split('.')
# mods.pop(0) # <- will be the name of the package being tested (in
# most cases "matplotlib")
# assert mods.pop(0) == 'tests'
# subdir = os.path.join(*mods)
subdir = module_name
import imp
def find_dotted_module(module_name, path=None):
"""A version of imp which can handle dots in the module name"""
res = None
for sub_mod in module_name.split('.'):
try:
res = file, path, _ = imp.find_module(sub_mod, path)
path = [path]
if file is not None:
file.close()
except ImportError:
# assume namespace package
path = sys.modules[sub_mod].__path__
res = None, path, None
return res
mod_file = find_dotted_module(func.__module__)[1]
basedir = os.path.dirname(mod_file)
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
cbook.mkdirs(result_dir)
return baseline_dir, result_dir
| |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_endpoint_control_client
short_description: Configure endpoint control client lists in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify endpoint_control feature and client category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
endpoint_control_client:
description:
- Configure endpoint control client lists.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
ad_groups:
description:
- Endpoint client AD logon groups.
type: str
ftcl_uid:
description:
- Endpoint FortiClient UID.
type: str
id:
description:
- Endpoint client ID.
required: true
type: int
info:
description:
- Endpoint client information.
type: str
src_ip:
description:
- Endpoint client IP address.
type: str
src_mac:
description:
- Endpoint client MAC address.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure endpoint control client lists.
fortios_endpoint_control_client:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
endpoint_control_client:
ad_groups: "<your_own_value>"
ftcl_uid: "<your_own_value>"
id: "5"
info: "<your_own_value>"
src_ip: "<your_own_value>"
src_mac: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_endpoint_control_client_data(json):
option_list = ['ad_groups', 'ftcl_uid', 'id',
'info', 'src_ip', 'src_mac']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def endpoint_control_client(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['endpoint_control_client'] and data['endpoint_control_client']:
state = data['endpoint_control_client']['state']
else:
state = True
endpoint_control_client_data = data['endpoint_control_client']
filtered_data = underscore_to_hyphen(filter_endpoint_control_client_data(endpoint_control_client_data))
if state == "present":
return fos.set('endpoint-control',
'client',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('endpoint-control',
'client',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_endpoint_control(data, fos):
if data['endpoint_control_client']:
resp = endpoint_control_client(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"endpoint_control_client": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"ad_groups": {"required": False, "type": "str"},
"ftcl_uid": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"info": {"required": False, "type": "str"},
"src_ip": {"required": False, "type": "str"},
"src_mac": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_endpoint_control(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_endpoint_control(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
import datetime
import io
import json
import os
import six
from six.moves import range, urllib
import zipfile
from .. import base
from girder.models.notification import Notification, ProgressState
from girder.models.collection import Collection
from girder.models.item import Item
from girder.models.folder import Folder
from girder.models.user import User
import girder.utility.ziputil
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class ResourceTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
admin = {
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
}
self.admin = User().createUser(**admin)
user = {
'email': 'user@email.com',
'login': 'userlogin',
'firstName': 'Normal',
'lastName': 'User',
'password': 'goodpassword'
}
self.user = User().createUser(**user)
def _createFiles(self, user=None):
"""
Create a set of items, folders, files, metadata, and collections for
testing.
:param user: the user who should own these items.
"""
if user is None:
user = self.admin
self.expectedZip = {}
# Create a collection
coll = {
'name': 'Test Collection',
'description': 'The description',
'public': True,
'creator': user
}
self.collection = Collection().createCollection(**coll)
self.collectionPrivateFolder = Folder().createFolder(
parent=self.collection, parentType='collection', name='Private',
creator=user, public=False)
# Get the admin user's folders
resp = self.request(
path='/folder', method='GET', user=user, params={
'parentType': 'user',
'parentId': user['_id'],
'sort': 'name',
'sortdir': 1
})
self.adminPrivateFolder = Folder().load(resp.json[0]['_id'], user=user)
self.adminPublicFolder = Folder().load(resp.json[1]['_id'], user=user)
# Create a folder within the admin public folder
resp = self.request(
path='/folder', method='POST', user=user, params={
'name': 'Folder 1', 'parentId': self.adminPublicFolder['_id']
})
self.adminSubFolder = resp.json
# Create a series of items
self.items = []
self.items.append(Item().createItem('Item 1', self.admin, self.adminPublicFolder))
self.items.append(Item().createItem('Item 2', self.admin, self.adminPublicFolder))
self.items.append(Item().createItem('It\\em/3', self.admin, self.adminSubFolder))
self.items.append(Item().createItem('Item 4', self.admin, self.collectionPrivateFolder))
self.items.append(Item().createItem('Item 5', self.admin, self.collectionPrivateFolder))
# Upload a series of files
file, path, contents = self._uploadFile('File 1', self.items[0])
self.file1 = file
self.expectedZip[path] = contents
file, path, contents = self._uploadFile('File 2', self.items[0])
self.expectedZip[path] = contents
file, path, contents = self._uploadFile('File 3', self.items[1])
self.expectedZip[path] = contents
file, path, contents = self._uploadFile('File 4', self.items[2])
self.expectedZip[path] = contents
file, path, contents = self._uploadFile('File 5', self.items[3])
self.expectedZip[path] = contents
# place some metadata on two of the items and one of the folders
meta = {'key': 'value'}
Item().setMetadata(self.items[2], meta)
parents = Item().parentsToRoot(self.items[2], self.admin)
path = os.path.join(*([part['object'].get(
'name', part['object'].get('login', '')) for part in parents]
+ [self.items[2]['name'], 'girder-item-metadata.json']))
self.expectedZip[path] = meta
meta = {'x': 'y'}
Item().setMetadata(self.items[4], meta)
parents = Item().parentsToRoot(self.items[4], self.admin)
path = os.path.join(*([part['object'].get(
'name', part['object'].get('login', '')) for part in parents]
+ [self.items[4]['name'], 'girder-item-metadata.json']))
self.expectedZip[path] = meta
meta = {'key2': 'value2', 'date': datetime.datetime.utcnow()}
# mongo rounds to millisecond, so adjust our expectations
meta['date'] -= datetime.timedelta(
microseconds=meta['date'].microsecond % 1000)
Folder().setMetadata(self.adminPublicFolder, meta)
parents = Folder().parentsToRoot(self.adminPublicFolder, user=user)
path = os.path.join(*([part['object'].get(
'name', part['object'].get('login', '')) for part in parents]
+ [self.adminPublicFolder['name'], 'girder-folder-metadata.json']))
self.expectedZip[path] = meta
def _uploadFile(self, name, item):
"""
Upload a random file to an item.
:param name: name of the file.
:param item: item to upload the file to.
:returns: file: the created file object
path: the path to the file within the parent hierarchy.
contents: the contents that were generated for the file.
"""
contents = os.urandom(1024)
file = self.uploadFile(name, contents, user=self.admin, parent=item, parentType='item')
parents = Item().parentsToRoot(item, user=self.admin)
path = os.path.join(*([part['object'].get(
'name', part['object'].get('login', '')) for part in parents]
+ [item['name'], name]))
return file, path, contents
def testDownloadResources(self):
self._createFiles()
resourceList = {
'collection': [str(self.collection['_id'])],
'user': [str(self.admin['_id'])]
}
# We should fail with bad json, an empty list, an invalid item in the
# list, or a list that is an odd format.
resp = self.request(
path='/resource/download', method='GET', user=self.admin, params={
'resources': 'this_is_not_json',
}, isJson=False)
self.assertStatus(resp, 400)
resp = self.request(
path='/resource/download', method='GET', user=self.admin, params={
'resources': json.dumps('this_is_not_a_dict_of_resources')
}, isJson=False)
self.assertStatus(resp, 400)
resp = self.request(
path='/resource/download', method='GET', user=self.admin, params={
'resources': json.dumps({'not_a_resource': ['not_an_id']})
}, isJson=False)
self.assertStatus(resp, 400)
resp = self.request(
path='/resource/download', method='GET', user=self.admin, params={
'resources': json.dumps({'item': []})
}, isJson=False)
self.assertStatus(resp, 400)
resp = self.request(
path='/resource/download', method='GET', user=self.admin, params={
'resources': json.dumps({'item': [str(self.admin['_id'])]})
}, isJson=False)
self.assertStatus(resp, 400)
# Download the resources
resp = self.request(
path='/resource/download', method='GET', user=self.admin, params={
'resources': json.dumps(resourceList),
'includeMetadata': True
}, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(resp.headers['Content-Type'], 'application/zip')
zip = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)), 'r')
self.assertTrue(zip.testzip() is None)
self.assertHasKeys(self.expectedZip, zip.namelist())
self.assertHasKeys(zip.namelist(), self.expectedZip)
for name in zip.namelist():
expected = self.expectedZip[name]
if isinstance(expected, dict):
self.assertEqual(json.loads(zip.read(name).decode('utf8')),
json.loads(json.dumps(expected, default=str)))
else:
if not isinstance(expected, six.binary_type):
expected = expected.encode('utf8')
self.assertEqual(expected, zip.read(name))
# Download the same resources again, this time triggering the large zip
# file creation (artificially forced). We could do this naturally by
# downloading >65536 files, but that would make the test take several
# minutes.
girder.utility.ziputil.Z_FILECOUNT_LIMIT = 5
resourceList = {
'item': [str(item['_id']) for item in self.items]
}
resp = self.request(
path='/resource/download', method='POST', user=self.admin, params={
'resources': json.dumps(resourceList),
'includeMetadata': True
}, isJson=False,
additionalHeaders=[('X-HTTP-Method-Override', 'GET')])
self.assertStatusOk(resp)
self.assertEqual(resp.headers['Content-Type'], 'application/zip')
zip = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)), 'r')
self.assertTrue(zip.testzip() is None)
# Test deleting resources
resourceList = {
'collection': [str(self.collection['_id'])],
'folder': [str(self.adminSubFolder['_id'])],
}
resp = self.request(
path='/resource', method='DELETE', user=self.admin, params={
'resources': json.dumps(resourceList),
'progress': True
}, isJson=False)
self.assertStatusOk(resp)
# Make sure progress record exists and that it is set to expire soon
notifs = list(Notification().get(self.admin))
self.assertEqual(len(notifs), 1)
self.assertEqual(notifs[0]['type'], 'progress')
self.assertEqual(notifs[0]['data']['state'], ProgressState.SUCCESS)
self.assertEqual(notifs[0]['data']['title'], 'Deleting resources')
self.assertEqual(notifs[0]['data']['message'], 'Done')
self.assertEqual(notifs[0]['data']['total'], 6)
self.assertEqual(notifs[0]['data']['current'], 6)
self.assertTrue(notifs[0]['expires'] < datetime.datetime.utcnow()
+ datetime.timedelta(minutes=1))
# Test deletes using a body on the request
resourceList = {
'item': [str(self.items[1]['_id'])]
}
resp = self.request(
path='/resource', method='DELETE', user=self.admin,
body=urllib.parse.urlencode({
'resources': json.dumps(resourceList)
}),
type='application/x-www-form-urlencoded', isJson=False)
self.assertStatusOk(resp)
# Test deletes using POST and override method
resourceList = {
'item': [str(self.items[0]['_id'])]
}
resp = self.request(
path='/resource', method='POST', user=self.admin, params={
'resources': json.dumps(resourceList)
}, isJson=False,
additionalHeaders=[('X-HTTP-Method-Override', 'DELETE')])
self.assertStatusOk(resp)
# All of the items should be gone now
resp = self.request(path='/item', method='GET', user=self.admin,
params={'text': 'Item'})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 0)
# Add a file under the admin private folder
item = Item().createItem('Private Item', self.admin, self.adminPrivateFolder)
_, path, contents = self._uploadFile('private_file', item)
self.assertEqual(path, 'goodlogin/Private/Private Item/private_file')
# Download as admin, should get private file
resp = self.request(
path='/resource/download', method='GET', user=self.admin, params={
'resources': json.dumps({'user': [str(self.admin['_id'])]})
}, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(resp.headers['Content-Type'], 'application/zip')
zip = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)), 'r')
self.assertTrue(zip.testzip() is None)
self.assertEqual(zip.namelist(), [path])
self.assertEqual(zip.read(path), contents)
# Download as normal user, should get empty zip
resp = self.request(
path='/resource/download', method='GET', user=self.user, params={
'resources': json.dumps({'user': [str(self.admin['_id'])]})
}, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(resp.headers['Content-Type'], 'application/zip')
zip = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)), 'r')
self.assertTrue(zip.testzip() is None)
self.assertEqual(zip.namelist(), [])
def testDeleteResources(self):
self._createFiles(user=self.user)
# Make sure we cannot delete a non-AC resource
resp = self.request('/resource', method='DELETE', user=self.admin, params={
'resources': json.dumps({'assetstore': [str(self.assetstore['_id'])]})
})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid resource types requested: assetstore')
# Test delete of a file
resp = self.request(
path='/resource', method='DELETE', user=self.admin, params={
'resources': json.dumps({'file': [str(self.file1['_id'])]}),
'progress': True
}, isJson=False)
self.assertStatusOk(resp)
# Test delete of a user who owns a folder
resp = self.request(
path='/resource', method='DELETE', user=self.admin, params={
'resources': json.dumps({'user': [str(self.user['_id'])]}),
'progress': True
}, isJson=False)
self.assertStatusOk(resp)
resp = self.request(path='/user', method='GET', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
# Deleting a non-existant object should give an error
resp = self.request(
path='/resource', method='DELETE', user=self.admin, params={
'resources': json.dumps({'item': [str(self.admin['_id'])]})
}, isJson=False)
self.assertStatus(resp, 400)
def testGetResourceById(self):
self._createFiles()
resp = self.request(path='/resource/%s' % self.admin['_id'],
method='GET', user=self.admin,
params={'type': 'user'})
self.assertStatusOk(resp)
self.assertEqual(str(resp.json['_id']), str(self.admin['_id']))
self.assertEqual(resp.json['email'], 'good@email.com')
# Get a file via this method
resp = self.request(path='/resource/%s' % self.file1['_id'],
method='GET', user=self.admin,
params={'type': 'file'})
self.assertStatusOk(resp)
self.assertEqual(str(resp.json['_id']), str(self.file1['_id']))
def testGetResourceByPath(self):
self._createFiles()
# test users
resp = self.request(path='/resource/lookup',
method='GET', user=self.admin,
params={'path': '/user/goodlogin'})
self.assertStatusOk(resp)
self.assertEqual(str(resp.json['_id']), str(self.admin['_id']))
resp = self.request(path='/resource/lookup',
method='GET', user=self.user,
params={'path': '/user/userlogin'})
self.assertStatusOk(resp)
self.assertEqual(str(resp.json['_id']), str(self.user['_id']))
# test collections
resp = self.request(path='/resource/lookup',
method='GET', user=self.user,
params={'path': '/collection/Test Collection'})
self.assertStatusOk(resp)
self.assertEqual(str(resp.json['_id']), str(self.collection['_id']))
resp = self.request(path='/resource/lookup',
method='GET', user=self.admin,
params={'path':
'/collection/Test Collection/'
+ self.collectionPrivateFolder['name']})
self.assertStatusOk(resp)
self.assertEqual(str(resp.json['_id']),
str(self.collectionPrivateFolder['_id']))
# test folders
resp = self.request(path='/resource/lookup',
method='GET', user=self.user,
params={'path': '/user/goodlogin/Public'})
self.assertStatusOk(resp)
self.assertEqual(
str(resp.json['_id']), str(self.adminPublicFolder['_id']))
resp = self.request(path='/resource/lookup',
method='GET', user=self.user,
params={'path': '/user/goodlogin/Private'})
self.assertStatus(resp, 400)
# test subfolders
resp = self.request(path='/resource/lookup',
method='GET', user=self.admin,
params={'path': '/user/goodlogin/Public/Folder 1'})
self.assertStatusOk(resp)
self.assertEqual(
str(resp.json['_id']), str(self.adminSubFolder['_id']))
# test items
privateFolder = self.collectionPrivateFolder['name']
paths = ('/user/goodlogin/Public/Item 1',
'/user/goodlogin/Public/Item 2',
'/user/goodlogin/Public/Folder 1/It\\\\em\\/3',
'/collection/Test Collection/%s/Item 4' % privateFolder,
'/collection/Test Collection/%s/Item 5' % privateFolder)
users = (self.user,
self.user,
self.user,
self.admin,
self.admin)
for path, item, user in zip(paths, self.items, users):
resp = self.request(path='/resource/lookup',
method='GET', user=user,
params={'path': path})
self.assertStatusOk(resp)
self.assertEqual(
str(resp.json['_id']), str(item['_id']))
# test bogus path
resp = self.request(path='/resource/lookup',
method='GET', user=self.user,
params={'path': '/bogus/path'})
self.assertStatus(resp, 400)
resp = self.request(path='/resource/lookup',
method='GET', user=self.user,
params={'path': '/collection/bogus/path'})
self.assertStatus(resp, 400)
def testGetResourcePath(self):
self._createFiles()
# Get a user's path
resp = self.request(path='/resource/' + str(self.user['_id']) + '/path',
method='GET', user=self.user,
params={'type': 'user'})
self.assertStatusOk(resp)
self.assertEqual(resp.json, '/user/userlogin')
# Get a collection's path
resp = self.request(path='/resource/' + str(self.collection['_id']) + '/path',
method='GET', user=self.user,
params={'type': 'collection'})
self.assertStatusOk(resp)
self.assertEqual(resp.json, '/collection/Test Collection')
# Get a folder's path
resp = self.request(path='/resource/' + str(self.adminSubFolder['_id']) + '/path',
method='GET', user=self.user,
params={'type': 'folder'})
self.assertStatusOk(resp)
self.assertEqual(resp.json, '/user/goodlogin/Public/Folder 1')
# Get an item's path
resp = self.request(path='/resource/' + str(self.items[2]['_id']) + '/path',
method='GET', user=self.user,
params={'type': 'item'})
self.assertStatusOk(resp)
self.assertEqual(resp.json, '/user/goodlogin/Public/Folder 1/It\\\\em\\/3')
# Get a file's path
resp = self.request(path='/resource/' + str(self.file1['_id']) + '/path',
method='GET', user=self.user,
params={'type': 'file'})
self.assertStatusOk(resp)
self.assertEqual(resp.json, '/user/goodlogin/Public/Item 1/File 1')
# Test access denied response
resp = self.request(path='/resource/' + str(self.adminPrivateFolder['_id']) + '/path',
method='GET', user=self.user,
params={'type': 'folder'})
self.assertStatus(resp, 403)
# Test invalid id response
resp = self.request(path='/resource/' + str(self.user['_id']) + '/path',
method='GET', user=self.user,
params={'type': 'folder'})
self.assertStatus(resp, 400)
# Test invalid type response
resp = self.request(path='/resource/' + str(self.user['_id']) + '/path',
method='GET', user=self.user,
params={'type': 'invalid type'})
self.assertStatus(resp, 400)
def testMove(self):
self._createFiles()
# Make sure passing invalid resource type is caught gracefully
resp = self.request(
path='/resource/move', method='PUT', user=self.admin, params={
'resources': json.dumps({'invalid_type': [str(self.items[0]['_id'])]}),
'parentType': 'folder',
'parentId': str(self.adminPrivateFolder['_id'])
})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid resource types requested: invalid_type')
# Move item1 from the public to the private folder
resp = self.request(
path='/resource/move', method='PUT', user=self.admin, params={
'resources': json.dumps({'item': [str(self.items[0]['_id'])]}),
'parentType': 'folder',
'parentId': str(self.adminPrivateFolder['_id']),
'progress': True
})
self.assertStatusOk(resp)
resp = self.request(path='/item/%s' % self.items[0]['_id'],
method='GET', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['folderId'],
str(self.adminPrivateFolder['_id']))
# We shouldn't be able to move the item into the user
resp = self.request(
path='/resource/move', method='PUT', user=self.admin,
params={
'resources': json.dumps({'item': [str(self.items[0]['_id'])]}),
'parentType': 'user',
'parentId': str(self.admin['_id'])
})
self.assertStatus(resp, 400)
# Asking to move into a file is also an error
resp = self.request(
path='/resource/move', method='PUT', user=self.admin,
params={
'resources': json.dumps({'item': [str(self.items[0]['_id'])]}),
'parentType': 'file',
'parentId': str(self.file1['_id'])
})
self.assertStatus(resp, 400)
# Move item1 and subFolder from the public to the private folder (item1
# is already there).
resp = self.request(
path='/resource/move', method='PUT', user=self.admin,
params={
'resources': json.dumps({
'folder': [str(self.adminSubFolder['_id'])],
'item': [str(self.items[0]['_id'])]}),
'parentType': 'folder',
'parentId': str(self.adminPrivateFolder['_id']),
'progress': True
})
self.assertStatusOk(resp)
resp = self.request(path='/item/%s' % self.items[0]['_id'],
method='GET', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['folderId'],
str(self.adminPrivateFolder['_id']))
resp = self.request(
path='/folder/%s' % self.adminSubFolder['_id'], method='GET',
user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['parentId'],
str(self.adminPrivateFolder['_id']))
# You can't move a folder into itself
resp = self.request(
path='/resource/move', method='PUT', user=self.admin,
params={
'resources': json.dumps({
'folder': [str(self.adminSubFolder['_id'])]}),
'parentType': 'folder',
'parentId': str(self.adminSubFolder['_id']),
'progress': True
})
self.assertStatus(resp, 400)
# You can move a folder into a user
resp = self.request(
path='/resource/move', method='PUT', user=self.admin,
params={
'resources': json.dumps({
'folder': [str(self.adminSubFolder['_id'])]}),
'parentType': 'user',
'parentId': str(self.admin['_id'])
})
self.assertStatusOk(resp)
resp = self.request(
path='/folder/%s' % self.adminSubFolder['_id'], method='GET',
user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['parentCollection'], 'user')
self.assertEqual(resp.json['parentId'], str(self.admin['_id']))
# The non-admin user can't move other people's stuff
resp = self.request(
path='/resource/move', method='PUT', user=self.user,
params={
'resources': json.dumps({'item': [str(self.items[2]['_id'])]}),
'parentType': 'folder',
'parentId': str(self.adminPublicFolder['_id'])
})
self.assertStatus(resp, 403)
# You can't move files
resp = self.request(
path='/resource/move', method='PUT', user=self.admin,
params={
'resources': json.dumps({
'file': [str(self.file1['_id'])]}),
'parentType': 'item',
'parentId': str(self.items[1]['_id'])
})
self.assertStatus(resp, 400)
# Moving a non-existant object should give an error
resp = self.request(
path='/resource/move', method='PUT', user=self.admin, params={
'resources': json.dumps({'item': [str(self.admin['_id'])]}),
'parentType': 'folder',
'parentId': str(self.adminPublicFolder['_id'])
}, isJson=False)
self.assertStatus(resp, 400)
def testCopy(self):
self._createFiles()
# The non-admin user should be able to copy public documents
resp = self.request(
path='/resource/copy', method='POST', user=self.user,
params={
'resources': json.dumps({
'folder': [str(self.adminSubFolder['_id'])]}),
'parentType': 'user',
'parentId': str(self.user['_id']),
'progress': True
})
self.assertStatusOk(resp)
resp = self.request(
path='/folder', method='GET', user=self.user,
params={
'parentType': 'user',
'parentId': str(self.user['_id']),
'text': 'Folder 1'})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
copiedFolder = resp.json[0]
self.assertNotEqual(str(copiedFolder['_id']), str(self.adminSubFolder['_id']))
# We should have reported 2 things copied in the progress (1 folder and 1 item)
resp = self.request(
path='/notification/stream', method='GET', user=self.user,
isJson=False, params={'timeout': 1})
messages = self.getSseMessages(resp)
self.assertTrue(len(messages) >= 1)
self.assertEqual(messages[-1]['data']['current'], 2)
# The non-admin user should not be able to copy private documents
resp = self.request(
path='/resource/copy', method='POST', user=self.user,
params={
'resources': json.dumps({
'folder': [str(self.adminPrivateFolder['_id'])]}),
'parentType': 'user',
'parentId': str(self.user['_id'])
})
self.assertStatus(resp, 403)
# Copy a group of items from different spots. Do this as admin
resp = self.request(
path='/resource/copy', method='POST', user=self.admin,
params={
'resources': json.dumps({
'item': [str(item['_id']) for item in self.items]}),
'parentType': 'folder',
'parentId': str(copiedFolder['_id']),
'progress': True
})
self.assertStatusOk(resp)
# We already had one item in that folder, so now we should have one
# more than in the self.items list. The user should be able to see
# these items.
resp = self.request(path='/item', method='GET', user=self.user,
params={'folderId': str(copiedFolder['_id'])})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(self.items) + 1)
# Copying a non-existant object should give an error
resp = self.request(
path='/resource/copy', method='POST', user=self.admin, params={
'resources': json.dumps({'item': [str(self.admin['_id'])]}),
'parentType': 'folder',
'parentId': str(self.adminPublicFolder['_id'])
}, isJson=False)
self.assertStatus(resp, 400)
def testZipUtil(self):
# Exercise the large zip file code
def genEmptyFile(fileLength, chunkSize=65536):
chunk = '\0' * chunkSize
def genEmptyData():
for val in range(0, fileLength, chunkSize):
if fileLength - val < chunkSize:
yield chunk[:fileLength - val]
else:
yield chunk
return genEmptyData
zip = girder.utility.ziputil.ZipGenerator()
# Most of the time in generating a zip file is spent in CRC
# calculation. We turn it off so that we can perform tests in a timely
# fashion.
zip.useCRC = False
for _ in zip.addFile(
genEmptyFile(6 * 1024 * 1024 * 1024), 'bigfile'):
pass
# Add a second small file at the end to test some of the other Zip64
# code
for _ in zip.addFile(genEmptyFile(100), 'smallfile'):
pass
# Test that we don't crash on Unicode file names
for _ in zip.addFile(
genEmptyFile(100), u'\u0421\u0443\u043f\u0435\u0440-\u0440'
'\u0443\u0441\u0441\u043a\u0438, \u0627\u0633\u0645 \u0627'
'\u0644\u0645\u0644\u0641 \u0628\u0627\u0644\u0644\u063a'
'\u0629 \u0627\u0644\u0639\u0631\u0628\u064a\u0629'):
pass
# Test filename with a null
for _ in zip.addFile(genEmptyFile(100), 'with\x00null'):
pass
footer = zip.footer()
self.assertEqual(footer[-6:], b'\xFF\xFF\xFF\xFF\x00\x00')
def testResourceTimestamps(self):
self._createFiles()
created = datetime.datetime(2000, 1, 1)
updated = datetime.datetime(2001, 1, 1)
# non-admin cannot use this endpoint
resp = self.request(
path='/resource/%s/timestamp' % self.collection['_id'],
method='PUT',
user=self.user,
params={
'type': 'collection',
'created': str(created),
'updated': str(updated),
})
self.assertStatus(resp, 403)
c = Collection().load(self.collection['_id'], force=True)
self.assertNotEqual(c['created'], created)
self.assertNotEqual(c['updated'], updated)
# admin can change created timestamp
resp = self.request(
path='/resource/%s/timestamp' % self.collection['_id'],
method='PUT',
user=self.admin,
params={
'type': 'collection',
'created': str(created),
'updated': str(updated),
})
self.assertStatusOk(resp)
c = Collection().load(self.collection['_id'], force=True)
self.assertEqual(c['created'], created)
self.assertEqual(c['updated'], updated)
| |
# Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Portions copyright (c) 2004, CherryPy Team (team@cherrypy.org)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import # Avoid importing `importlib` from this package.
import os
import signal
import sys
import tempfile
import time
import traceback
from django.conf import settings
from django.core.signals import request_finished
from django.utils._os import upath
from importlib import import_module
from django.utils import six
try:
from django.utils.six.moves import _thread as thread
except ImportError:
from django.utils.six.moves import _dummy_thread as thread
# This import does nothing, but it's necessary to avoid some race conditions
# in the threading module. See http://code.djangoproject.com/ticket/2330 .
try:
import threading # NOQA
except ImportError:
pass
try:
import termios
except ImportError:
termios = None
USE_INOTIFY = False
try:
# Test whether inotify is enabled and likely to work
import pyinotify
fd = pyinotify.INotifyWrapper.create().inotify_init()
if fd >= 0:
USE_INOTIFY = True
os.close(fd)
except ImportError:
pass
try:
import select
select.kevent, select.kqueue
USE_KQUEUE = True
import resource
NOFILES_SOFT, NOFILES_HARD = resource.getrlimit(resource.RLIMIT_NOFILE)
import subprocess
command = ["sysctl", "-n", "kern.maxfilesperproc"]
NOFILES_KERN = int(subprocess.check_output(command).strip())
except Exception:
USE_KQUEUE = False
RUN_RELOADER = True
_mtimes = {}
_win = (sys.platform == "win32")
_error_files = []
def gen_filenames():
"""
Yields a generator over filenames referenced in sys.modules and translation
files.
"""
filenames = [filename.__file__ for filename in sys.modules.values()
if hasattr(filename, '__file__')]
if settings.USE_I18N:
# Add the names of the .mo files that can be generated
# by compilemessages management command to the list of files watched.
basedirs = [os.path.join(os.path.dirname(os.path.dirname(__file__)),
'conf', 'locale'),
'locale']
for appname in reversed(settings.INSTALLED_APPS):
app = import_module(appname)
basedirs.append(os.path.join(os.path.dirname(upath(app.__file__)),
'locale'))
basedirs.extend(settings.LOCALE_PATHS)
basedirs = [os.path.abspath(basedir) for basedir in basedirs
if os.path.isdir(basedir)]
for basedir in basedirs:
for dirpath, dirnames, locale_filenames in os.walk(basedir):
for filename in locale_filenames:
if filename.endswith('.mo'):
filenames.append(os.path.join(dirpath, filename))
for filename in filenames + _error_files:
if not filename:
continue
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
if os.path.exists(filename):
yield filename
def inotify_code_changed():
"""
Checks for changed code using inotify. After being called
it blocks until a change event has been fired.
"""
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm)
def update_watch(sender=None, **kwargs):
mask = (
pyinotify.IN_MODIFY |
pyinotify.IN_DELETE |
pyinotify.IN_ATTRIB |
pyinotify.IN_MOVED_FROM |
pyinotify.IN_MOVED_TO |
pyinotify.IN_CREATE
)
for path in gen_filenames():
wm.add_watch(path, mask)
# New modules may get imported when a request is processed.
request_finished.connect(update_watch)
# Block until an event happens.
update_watch()
notifier.check_events(timeout=None)
notifier.stop()
# If we are here the code must have changed.
return True
def kqueue_code_changed():
"""
Checks for changed code using kqueue. After being called
it blocks until a change event has been fired.
"""
kqueue = select.kqueue()
# Utility function to create kevents.
_filter = select.KQ_FILTER_VNODE
flags = select.KQ_EV_ADD | select.KQ_EV_CLEAR
fflags = select.KQ_NOTE_DELETE | select.KQ_NOTE_WRITE | select.KQ_NOTE_RENAME
def make_kevent(descriptor):
return select.kevent(descriptor, _filter, flags, fflags)
# New modules may get imported when a request is processed. We add a file
# descriptor to the kqueue to exit the kqueue.control after each request.
buf_kwargs = {'buffering' if six.PY3 else 'bufsize': 0}
watcher = tempfile.TemporaryFile(**buf_kwargs)
kqueue.control([make_kevent(watcher)], 0)
def update_watch(sender=None, **kwargs):
watcher.write(b'.')
request_finished.connect(update_watch)
# We have to manage a set of descriptors to avoid the overhead of opening
# and closing every files whenever we reload the set of files to watch.
filenames = set()
descriptors = set()
while True:
old_filenames = filenames
filenames = set(gen_filenames())
new_filenames = filenames - old_filenames
# If new files were added since the last time we went through the loop,
# add them to the kqueue.
if new_filenames:
# We must increase the maximum number of open file descriptors
# because each kevent uses one file descriptor and resource limits
# are too low by default.
#
# In fact there are two limits:
# - kernel limit: `sysctl kern.maxfilesperproc` -> 10240 on OS X.9
# - resource limit: `launchctl limit maxfiles` -> 256 on OS X.9
#
# The latter can be changed with Python's resource module, but it
# can never exceed the former. Unfortunately, getrlimit(3) -- used
# by both launchctl and the resource module -- reports no "hard
# limit", even though the kernel sets one.
# If project is too large or kernel limits are too tight, use polling.
if len(filenames) >= NOFILES_KERN:
return code_changed()
# Add the number of file descriptors we're going to use to the current
# resource limit, while staying within the kernel limit.
nofiles_target = min(len(filenames) + NOFILES_SOFT, NOFILES_KERN)
resource.setrlimit(resource.RLIMIT_NOFILE, (nofiles_target, NOFILES_HARD))
new_descriptors = set(open(filename) for filename in new_filenames)
descriptors |= new_descriptors
kqueue.control([make_kevent(descriptor) for descriptor in new_descriptors], 0)
events = kqueue.control([], 1)
# After a request, reload the set of watched files.
if len(events) == 1 and events[0].ident == watcher.fileno():
continue
# If the change affected another file, clean up and exit.
for descriptor in descriptors:
descriptor.close()
watcher.close()
kqueue.close()
return True
def code_changed():
global _mtimes, _win
for filename in gen_filenames():
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes = {}
try:
del _error_files[_error_files.index(filename)]
except ValueError:
pass
return True
return False
def check_errors(fn):
def wrapper(*args, **kwargs):
try:
fn(*args, **kwargs)
except (ImportError, IndentationError, NameError, SyntaxError,
TypeError, AttributeError):
et, ev, tb = sys.exc_info()
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def ensure_echo_on():
if termios:
fd = sys.stdin
if fd.isatty():
attr_list = termios.tcgetattr(fd)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(fd, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def reloader_thread():
ensure_echo_on()
if USE_INOTIFY:
fn = inotify_code_changed
elif USE_KQUEUE:
fn = kqueue_code_changed
else:
fn = code_changed
while RUN_RELOADER:
if fn():
sys.exit(3) # force reload
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def python_reloader(main_func, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread()
except KeyboardInterrupt:
pass
else:
try:
exit_code = restart_with_reloader()
if exit_code < 0:
os.kill(os.getpid(), -exit_code)
else:
sys.exit(exit_code)
except KeyboardInterrupt:
pass
def jython_reloader(main_func, args, kwargs):
from _systemrestart import SystemRestart
thread.start_new_thread(main_func, args)
while True:
if code_changed():
raise SystemRestart
time.sleep(1)
def main(main_func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if sys.platform.startswith('java'):
reloader = jython_reloader
else:
reloader = python_reloader
wrapped_main_func = check_errors(main_func)
reloader(wrapped_main_func, args, kwargs)
| |
from datetime import datetime
from django.contrib import messages
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.utils.html import format_html
from django.utils.safestring import mark_safe
import boto3
import json
from cbhooks.models import CloudBoltHook
from extensions.views import admin_extension, tab_extension, TabExtensionDelegate
from infrastructure.models import CustomField, Server, Environment
from tabs.views import TabGroup
from utilities.decorators import dialog_view
from utilities.logger import ThreadLogger
from utilities.permissions import cbadmin_required
from utilities.templatetags import helper_tags
from xui.veeam.forms import AzureRestoreForm, VeeamEndpointForm, EC2RestoreForm
from xui.veeam.veeam_admin import VeeamManager
logger = ThreadLogger(__name__)
class VeeamDelegate(TabExtensionDelegate):
def should_display(self):
veeam = VeeamManager()
if not veeam.get_connection_info():
return False
return True
@dialog_view
def take_backup(request, server_id):
server = Server.objects.get(id=server_id)
if request.method == 'GET':
content = _("Are you sure you want to take a backup for this server?")
return {
'title': _("Take Backup"),
'content': content,
'use_ajax': True,
'action_url': '/veeam/take_backup/{server_id}/'.format(server_id=server_id),
'submit': _("Take"),
}
if request.method == 'POST':
veeam_manager = VeeamManager()
try:
take_backup_action = CloudBoltHook.objects.get(name="Take Veeam Backup")
except Exception:
veeam_manager.setup_take_backup_action()
take_backup_action = CloudBoltHook.objects.get(name="Take Veeam Backup")
install_job = take_backup_action.run_as_job(server=server)[0]
messages.success(request,
mark_safe(f"<a href='{install_job.get_absolute_url()}'>Job</a> to take backup started"))
return HttpResponseRedirect(reverse('server_detail', args=[server_id]))
@dialog_view
def restore_backup(request, server_id, restore_point_href):
server = Server.objects.get(id=server_id)
if request.method == 'GET':
content = _("Are you sure you want to restore this backup?")
return {
'title': _("Restore Backup"),
'content': content,
'use_ajax': True,
'action_url': '/veeam/restore_backup/{server_id}/{restore_point}/'.format(restore_point=restore_point_href,
server_id=server_id),
'submit': _("Restore"),
}
if request.method == 'POST':
veeam_manager = VeeamManager()
try:
restore_backup_action = CloudBoltHook.objects.get(name="Restore Veeam Backup")
except Exception:
veeam_manager.setup_restore_backup_action()
restore_backup_action = CloudBoltHook.objects.get(name="Restore Veeam Backup")
install_job = restore_backup_action.run_as_job(server=server, restore_point_href=restore_point_href)[0]
messages.success(request,
mark_safe(f"<a href='{install_job.get_absolute_url()}'>Job</a> to restore backup started"))
return HttpResponseRedirect(reverse('server_detail', args=[server_id]))
@dialog_view
@cbadmin_required
def edit_veeam_endpoint(request, endpoint_id=None):
"""
Create and edit dialog for a RH's NSX endpoint.
If `endpoint_id` is None,
"""
endpoint = VeeamManager().get_connection_info()
action_url = reverse('create_veeam_endpoint')
if endpoint or endpoint_id:
action_url = reverse('edit_veeam_endpoint', args=[endpoint.id])
if request.method == 'POST':
form = VeeamEndpointForm(request.POST, instance=endpoint)
if form.is_valid():
form.save()
msg = "The Veeam Server Management Endpoint settings have been saved."
messages.success(request, msg)
profile = request.get_user_profile()
logger.info("Endpoint set to {} by {}.".format(endpoint, profile.user.username))
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
form = VeeamEndpointForm(instance=endpoint)
return {
'title': 'Modify Veeam Server Management Endpoint Settings',
'form': form,
'use_ajax': True,
'action_url': action_url,
'top_content': "Veeam Server Management Endpoint, Used to support advanced backup and restoration actions",
'submit': 'Save',
}
@dialog_view
@cbadmin_required
def delete_veeam_endpoint(request):
endpoint = VeeamManager().get_connection_info()
if request.method == 'POST':
endpoint.delete()
msg = "The Veeam Server Endpoint has been deleted."
messages.success(request, msg)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
return {
'title': 'Remove Veeam Server Manager Endpoint?',
'content': 'Are you sure you want to delete Veeam Server endpoint \'{}\'?'.format(endpoint),
'use_ajax': True,
'action_url': reverse('delete_veeam_endpoint'),
'submit': 'Remove'
}
@dialog_view
@cbadmin_required
def verify_veeam_endpoint(request):
veeam = VeeamManager()
endpoint = veeam.get_connection_info()
if not endpoint:
messages.warn(
request, "No Veeam Endpoint found! Nothing to verify")
return HttpResponseRedirect(request.META['HTTP_REFERER'])
try:
veeam.verify_connection()
except Exception as err:
msg = format_html('Could not make a connection to the Veeam Server Manager at'
'<b>"{}"</b>:<br>{}', endpoint, str(err))
else:
msg = format_html('Successfully connected to the Veeam Server Manager at '
'<b>"{}"</b>.', endpoint)
return {
'title': 'Verify connection to Veeam Server Manager Endpoint',
'content': msg,
'submit': None,
'cancel': "OK",
}
@dialog_view
def restore_backup_to_cloud(request, backup_name):
if request.method == 'GET':
content = _("Provide the information below to restore to Microsoft Azure.")
form = AzureRestoreForm()
return {
'title': _("Restore Backup"),
'content': content,
'form': form,
'use_ajax': True,
'action_url': '/veeam/restore_backup_to_cloud/{backup_name}/'.format(backup_name=backup_name),
'submit': _("Restore"),
}
if request.method == 'POST':
veeam_manager = VeeamManager()
form = AzureRestoreForm(request.POST)
if form.is_valid():
context = {
'vmname': form.cleaned_data['vm_name'],
'backup_name': backup_name,
'network_name': form.cleaned_data['network_name'],
'vm_size': form.cleaned_data['vm_size'],
'location': form.cleaned_data['location'],
'storage_account': form.cleaned_data['storage_account'],
'resource_group': form.cleaned_data['resource_group'],
}
try:
restore_backup_action = CloudBoltHook.objects.get(name="Restore Veeam Backup To Azure")
except Exception:
veeam_manager.setup_restore_backup_to_azure_action()
restore_backup_action = CloudBoltHook.objects.get(name="Restore Veeam Backup To Azure")
# Server running this job will be the veeam server. We can find it using the IP address in the
# connection Info
ip = veeam_manager.get_connection_info().ip
try:
server = Server.objects.get(ip=ip)
except Exception:
# No server associated with the connection info IP address exists.
messages.error(request, "The Veeam server could not be found.")
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
restore_backup_to_azure_job = restore_backup_action.run_as_job(server=server, script_context=context)[0]
messages.success(request,
mark_safe(
f"<a href='{restore_backup_to_azure_job.get_absolute_url()}'>Job {restore_backup_to_azure_job.id}</a> to restore backup started"))
else:
raise Exception(form.errors)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
def get_ec2_client(env):
ec2 = boto3.client('ec2',
region_name=env.aws_region,
aws_access_key_id=env.resource_handler.serviceaccount,
aws_secret_access_key=env.resource_handler.servicepasswd)
return ec2
def get_aws_vpc(request):
env_id = request.GET.get('env_id')
if not env_id:
return HttpResponse(json.dumps([]))
env = Environment.objects.get(id=env_id)
ec2 = get_ec2_client(env)
vpc_ids = []
for reservation in ec2.describe_instances().get('Reservations'):
reservation_instances = reservation.get('Instances')
for instance in reservation_instances:
vpc_id = instance.get('VpcId')
vpc_ids.append(vpc_id)
return HttpResponse(json.dumps(vpc_ids))
def get_aws_security_groups(request):
env_id = request.GET.get('env_id')
if not env_id:
return HttpResponse(json.dumps([]))
env = Environment.objects.get(id=env_id)
ec2 = get_ec2_client(env)
sg_ids = []
for SecurityGroup in ec2.describe_security_groups().get('SecurityGroups'):
group_id = SecurityGroup.get('GroupId')
sg_ids.append(group_id)
return HttpResponse(json.dumps(sg_ids))
def get_aws_availability_zones(request):
env_id = request.GET.get('env_id')
if not env_id:
return HttpResponse(json.dumps([]))
env = Environment.objects.get(id=env_id)
ec2 = get_ec2_client(env)
availability_zones = []
for availability_zone in ec2.describe_availability_zones().get('AvailabilityZones'):
zone_name = availability_zone.get('ZoneName')
availability_zones.append(zone_name)
return HttpResponse(json.dumps(availability_zones))
@dialog_view
def restore_backup_to_ec2_cloud(request, backup_name):
if request.method == 'GET':
content = _("Provide the information below to restore to EC2")
form = EC2RestoreForm()
return {
'title': _("Restore Backup"),
'content': content,
'form': form,
'use_ajax': True,
'action_url': reverse('restore_to_ec2_cloud', kwargs={"backup_name": backup_name}),
'submit': _("Restore"),
'extra_onready_js': mark_safe("""
$('#div_id_environment').on('change',(function () {
var env_id = $('#div_id_environment option:selected').val();
$.getJSON('%s?env_id='+env_id, function (vpc_ids) {
var $selectElement = $("#id_vpc_id");
$selectElement.empty();
$.each(vpc_ids, function (key, value) {
$selectElement.append($("<option></option>").attr("value", value).text(value));
});
});
$.getJSON('%s?env_id='+env_id, function (sg_ids) {
var $securityGroupElement = $("#id_sgroup_name");
$securityGroupElement.empty();
$.each(sg_ids, function (key, value) {
$securityGroupElement.append($("<option></option>").attr("value", value).text(value));
});
});
$.getJSON('%s?env_id='+env_id, function (availability_zones) {
var $availabilityZoneElement = $("#id_availability_zone");
$availabilityZoneElement.empty();
$.each(availability_zones, function (key, value) {
$availabilityZoneElement.append($("<option></option>").attr("value", value).text(value));
});
});
})).change();
""" % (reverse('get_aws_vpc'), reverse('get_aws_security_groups'),
reverse('get_aws_availability_zones')))
}
if request.method == 'POST':
veeam_manager = VeeamManager()
form = EC2RestoreForm(request.POST)
# Since the choices fields have not been declared during form creation, we need to dynamically declare them.
vpc_id = request.POST.get('vpc_id')
sgroup_name = request.POST.get('sgroup_name')
availability_zone = request.POST.get('availability_zone')
form.fields['vpc_id'].choices = [(vpc_id, vpc_id)]
form.fields['sgroup_name'].choices = [(sgroup_name, sgroup_name)]
form.fields['availability_zone'].choices = [(availability_zone, availability_zone)]
if form.is_valid():
environment = form.cleaned_data['environment']
env = Environment.objects.get(id=environment)
resource_handler = env.resource_handler
context = {
'vm_name': form.cleaned_data['vm_name'],
'environment': environment,
'backup_name': backup_name,
'amazon_access_key': resource_handler.serviceaccount,
'region_name': env.aws_region,
'region_type': form.cleaned_data['region_type'],
'disk_type': form.cleaned_data['disk_type'],
'instance_type': form.cleaned_data['instance_type'],
'license_type': form.cleaned_data['license_type'],
'vpc_id': form.cleaned_data['vpc_id'],
'sgroup_name': form.cleaned_data['sgroup_name'],
'reason': form.cleaned_data['reason'],
'availability_zone': form.cleaned_data['availability_zone'],
}
try:
restore_backup_action = CloudBoltHook.objects.get(name="Restore Veeam Backup To EC2")
except Exception:
veeam_manager.setup_restore_backup_to_ec2__action()
restore_backup_action = CloudBoltHook.objects.get(name="Restore Veeam Backup To EC2")
# Server running this job will be the veeam server. We can find it using the IP address in the connection Info
ip = veeam_manager.get_connection_info().ip
try:
server = Server.objects.get(ip=ip)
except Exception as error:
# No server associated with the connection info IP address exists.
messages.error(request, "The Veeam server could not be found.")
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
restore_backup_job = restore_backup_action.run_as_job(server=server, script_context=context)[0]
messages.success(request,
mark_safe(
f"<a href='{restore_backup_job.get_absolute_url()}'>Job {restore_backup_job.id}</a> to restore backup started"))
else:
raise Exception(form.errors)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
@dialog_view
def install_agent(request, server_id):
server = Server.objects.get(id=server_id)
veeam = VeeamManager()
if request.method == 'GET':
content = _(
"Are you sure you want to install Veeam Agent on this server?")
return {
'title': _("Install Agent?"),
'content': content,
'use_ajax': True,
'action_url': '/veeam/install_agent/{server_id}/'.format(server_id=server_id),
'submit': _("Install"),
}
if request.method == 'POST':
try:
install_agent_action = CloudBoltHook.objects.get(name="Install Veeam Agent")
except Exception:
veeam.setup_veeam_install_agent_action()
install_agent_action = CloudBoltHook.objects.get(name="Install Veeam Agent")
install_job = install_agent_action.run_as_job(server=server)[0]
messages.success(request, mark_safe(
f"<a href='{install_job.get_absolute_url()}'>Job {install_job.id}</a> to install agent " f"started"))
return HttpResponseRedirect(reverse('server_detail', args=[server_id]))
@dialog_view
def refresh_agent(request, server_id):
"""
Checks if the server specified has a veeam agent
"""
veeam = VeeamManager()
server = Server.objects.get(id=server_id)
# Start a Job to do the refresh
server = veeam.refresh_server(server, {'server': server, 'veeam_server': veeam.get_connection_info()})
if server.veeam_agent_id:
messages.success(request, "Veeam Agent Found")
else:
messages.warning(request, "No Veeam Agent Found")
return HttpResponseRedirect(reverse('server_detail', args=[server_id]))
@tab_extension(model=Server, title='Veeam', delegate=VeeamDelegate)
def veeam_tab(request, obj_id):
server = Server.objects.get(id=obj_id)
veeam = VeeamManager()
restore_points = veeam.get_restore_points(server.hostname)
if restore_points:
restore_points.sort(key=lambda r: datetime.strptime(
r.get('time'), '%Y-%m-%d %H:%M:%S'), reverse=True)
is_agent_installed = veeam.should_install_agent(server)
take_backup_job = server.jobs.filter(job_parameters__hookparameters__hook__name='Take Veeam Backup').last()
context = {}
backup_job_running = False
if take_backup_job:
if take_backup_job.is_active():
backup_job_running = True
context.update({'take_backup_job_url': take_backup_job.get_absolute_url()})
context.update({'backup_job_running': backup_job_running})
if not context:
# Check if server is powered on.
if server.power_status == 'POWERON':
# This server has no agent installed
context.update({'install_agent': False})
context.update({'power_status': True})
else:
context.update({'power_status': False})
status = 'warning'
msg = "Veeam might be installed in this server but the server is powered off"
server_not_powered_on = helper_tags.alert(status, msg)
context.update(
{'server_not_powered_on': server_not_powered_on})
else:
context.update({'install_agent': True})
context.update({'server_id': obj_id})
server_settings_ok = check_server_settings_status(server)
if not server_settings_ok:
status = 'warning'
msg = "Veeam agent is not installed and the server username and password are not correctly setup. This might make it imposible to install the agent on this server from cloudbolt. You can configure them on the Configuration page on the server details tab. "
server_credentials_not_set = helper_tags.alert(status, msg)
context.update(
{'server_credentials_not_set': server_credentials_not_set})
install_job = server.jobs.filter(job_parameters__hookparameters__hook__name='Install Veeam Agent').last()
install_job_running = False
if install_job:
if install_job.is_active():
install_job_running = True
context.update({'job_url': install_job.get_absolute_url()})
context.update({'job_running': install_job_running})
restore_job = server.jobs.filter(job_parameters__hookparameters__hook__name='Restore Veeam Backup').last()
restore_job_running = False
if restore_job:
if restore_job.is_active():
restore_job_running = True
context.update({'restore_job_url': restore_job.get_absolute_url()})
context.update({'restore_job_running': restore_job_running})
context.update({'server': server, 'restore_points': restore_points, 'install_agent': is_agent_installed})
return render(request, 'veeam/templates/server_tab.html', context)
@admin_extension(title='Veeam Management Integration', description='Admin tab to show available backups and jobs')
def admin_page(request):
veeam = VeeamManager()
endpoint = veeam.get_connection_info()
# If no Connection info, show a dialog for adding a connection info.
if not endpoint:
veeam_context = {
'tabs': TabGroup(
template_dir='veeam/templates',
context={},
request=request,
tabs=[
(_(""), 'dashboard', dict(context={}))
],
)
}
return render(request, 'veeam/templates/admin_page.html', veeam_context)
jobs = veeam.get_jobs()
backups = veeam.get_backups()
summary = veeam.get_summary()
context = {}
ip = veeam.get_connection_info().ip
try:
server = Server.objects.get(ip=ip)
except Exception:
messages.error(request, message="The server running Veeam could not be found.")
return render(request, 'veeam/templates/admin_page.html', {})
restore_to_ec2_job = server.jobs.filter(
job_parameters__hookparameters__hook__name='Restore Veeam Backup To EC2').last()
restore_to_ec2_job_running = False
if restore_to_ec2_job and restore_to_ec2_job.is_active():
restore_to_ec2_job_running = True
context.update({'restore_to_ec2_job_url': restore_to_ec2_job.get_absolute_url()})
context.update({'restore_to_ec2_job_running': restore_to_ec2_job_running})
restore_to_azure_job = server.jobs.filter(
job_parameters__hookparameters__hook__name='Restore Veeam Backup To Azure').last()
restore_to_azure_job_running = False
if restore_to_azure_job and restore_to_azure_job.is_active():
restore_to_azure_job_running = True
context.update({'restore_to_azure_job_url': restore_to_azure_job.get_absolute_url()})
context.update({'restore_to_azure_job_running': restore_to_azure_job_running})
context.update({'jobs': jobs, 'backups': backups, 'endpoint': endpoint})
veeam_context = {
'tabs': TabGroup(
template_dir='veeam/templates',
context=context,
request=request,
tabs=[
# First tab uses template 'groups/tabs/tab-main.html'
# (_("Configuration"), 'configuration', {}),
# Tab 2 is conditionally-shown in this slot and
# uses template 'groups/tabs/tab-related-items.html'
(_("Dashboard"), 'dashboard', dict(context=summary)),
(_("Jobs"), 'jobs', {}),
(_("Backups"), 'backups', {})
],
)
}
return render(request, 'veeam/templates/admin_page.html', veeam_context)
def check_server_settings_status(server):
# If the server doesn't have username and password,
# We can't use Connection info to execute a script on it.
if server.password is None:
return False
return True
| |
##########################################################################
#
# Copyright 2012, Electric Theatre Collective Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import subprocess
import unittest
import IECore
import IECoreMantra
_dir = os.path.dirname( __file__ )
class RendererTest( unittest.TestCase ):
def __greenSquare( self, r ):
r.shader( "surface", "constant", { "Cd": IECore.V3fData( IECore.V3f( 0, 1, 0 ) ) } )
r.mesh(
IECore.IntVectorData( [ 4, 4 ] ),
IECore.IntVectorData( [ 0, 1, 2, 3, 3, 2, 4, 5 ] ),
"linear",
{
"P" :IECore.PrimitiveVariable(
IECore.PrimitiveVariable.Interpolation.Vertex,
IECore.V3fVectorData( [ IECore.V3f( 0, 0, 0 ), IECore.V3f( 0, 1, 0 ),
IECore.V3f( 1, 1, 0 ), IECore.V3f( 1, 0, 0 ),
IECore.V3f( 2, 1, 0 ), IECore.V3f( 2, 0, 0 ) ] )
)
}
)
def testTypeId( self ):
self.assertEqual( IECoreMantra.Renderer().typeId(), IECoreMantra.Renderer.staticTypeId() )
self.assertNotEqual( IECoreMantra.Renderer.staticTypeId(), IECore.Renderer.staticTypeId() )
def testTypeName( self ):
r = IECoreMantra.Renderer()
self.assertEqual( r.typeName(), "IECoreMantra::Renderer" )
def testWorldMesh( self ):
# test that the ieworld procedural picks up the cache file written by worldEnd() and renders correctly
r = IECoreMantra.Renderer()
r.display(
_dir + "/output/testWorldMesh.tif" ,
"tiff",
"rgba",
{ "variable": "Cf+Af", "vextype": "vector4", "channel": "C" }
)
m = IECore.M44f().translate( IECore.V3f(0,0,6) )
r.camera( "main", { "projection": "perspective", "transform": m } )
r.worldBegin()
self.__greenSquare( r )
r.worldEnd()
del r
imageCreated = IECore.Reader.create( _dir + "/output/testWorldMesh.tif" ).read()
expectedImage = IECore.Reader.create( _dir + "/data/testWorldMesh.tif" ).read()
self.assertEqual(
IECore.ImageDiffOp()( imageA=imageCreated, imageB=expectedImage, maxError=0.01 ),
IECore.BoolData( False )
)
def testIfdGen( self ):
# the image generated by this scene should be identical to the output of testWorldMesh()
ifd = _dir + "/output/testIfdGen.ifd"
r = IECoreMantra.Renderer( ifd )
r. display(
_dir + "/output/testIfdGen.tif",
"tiff",
"rgba",
{ "variable": "Cf+Af", "vextype": "vector4", "channel": "C" }
)
m = IECore.M44f().translate( IECore.V3f(0,0,6) )
r.camera( "main", { "projection": "perspective", "transform": m } )
r.worldBegin()
self.__greenSquare( r )
r.worldEnd()
del r
self.assertTrue( os.path.isfile( ifd ) )
p = subprocess.Popen( ['mantra'], stdin=open( ifd ), stdout=subprocess.PIPE)
p.communicate()
imageCreated = IECore.Reader.create( _dir + "/output/testIfdGen.tif" ).read()
expectedImage = IECore.Reader.create( _dir + "/data/testWorldMesh.tif" ).read()
self.assertEqual(
IECore.ImageDiffOp()( imageA=imageCreated, imageB=expectedImage, maxError=0.01 ),
IECore.BoolData( False )
)
def __renderGeometry( self ):
r = IECoreMantra.Renderer()
r.display(
_dir + "/output/testGeometry.tif",
"tiff",
"rgba",
{ "variable": "Cf+Af", "vextype": "vector4", "channel": "C" }
)
m = IECore.M44f().translate( IECore.V3f(0,0,6) )
r.camera( "main", { "projection": "perspective", "transform": m } )
r.worldBegin()
r.geometry(
"ieprocedural",
{"className": "sphereProcedural", "classVersion": 1, "parameterString": ""},
{}
)
r.worldEnd()
del r
def testGeometry( self ):
self.__renderGeometry()
imageCreated = IECore.Reader.create( _dir + "/output/testGeometry.tif" ).read()
expectedImage = IECore.Reader.create( _dir + "/data/testGeometry.tif" ).read()
self.assertEqual(
IECore.ImageDiffOp()( imageA=imageCreated, imageB=expectedImage, maxError=0.01 ),
IECore.BoolData( False )
)
def testVrayIncludes( self ):
# test that mantra can find VRAY_ieProcedural.so and VRAY_ieWorld.so
p = subprocess.Popen( ['mantra', '-V8'], stdin=open('/dev/null'), stdout=subprocess.PIPE )
out = p.communicate()[0]
self.assertTrue( out )
self.failUnless( "Registering procedural 'ieprocedural'" in out )
self.failUnless( "Registering procedural 'ieworld'" in out )
def testOptions( self ):
ifd = _dir + "/output/testOptions.ifd"
r = IECoreMantra.Renderer( ifd )
r.setOption( "itest", IECore.IntData(42) );
r.setOption( "ftest", IECore.FloatData(1.23) );
r.setOption( "v3ftest", IECore.V3f(1,0,0) );
r.setOption( "stringtest", IECore.StringData("hello") );
r.worldBegin()
r.worldEnd()
del r
l = "".join( file( ifd ).readlines() ).replace( "\n", "" )
self.failUnless( 'ray_declare global int itest 42' in l )
self.failUnless( 'ray_declare global float ftest 1.23' in l )
self.failUnless( 'ray_declare global vector3 v3ftest 1 0 0' in l )
self.failUnless( 'ray_declare global string stringtest "hello"' in l )
def testShaderParameters( self ):
# Test the shader parameters end up in the scene.. you would expect them in
# ifd but because everything post-world is stored in a side-car .cob file
# we look for that instead and check the shader invocation string is on the
# top of the render state.
ifd = _dir + "/output/testShaderParameters.ifd"
r = IECoreMantra.Renderer( ifd )
r.worldBegin()
r.shader("surface", "testshader",
{
"p1": IECore.IntData(11),
"p2": IECore.FloatData(1.234),
"p3": IECore.StringData("hello"),
"p4": IECore.V3fData( IECore.V3f(1,2,3) ),
"p5": IECore.Color3fData( IECore.Color3f(1,0,0) ),
}
)
r.worldEnd()
del r
self.failUnless( os.path.exists( ifd ) )
self.failUnless( os.path.exists( ifd + ".ieworld.cob" ) )
world = IECore.Reader.create( ifd + ".ieworld.cob" ).read()
self.assertTrue( world )
self.assertEquals( world.typeId(), IECore.Group.staticTypeId() )
self.assertTrue( world.state() )
self.assertEquals(
world.state()[0].attributes[':surface'],
IECore.StringData( 'testshader p2 1.234 p3 "hello" p1 11 p4 1 2 3 p5 1 0 0 ')
)
def tearDown( self ):
files = [
_dir + "/output/testGeometry.tif",
_dir + "/output/testWorldMesh.tif",
_dir + "/output/testIfdGen.tif",
_dir + "/output/testIfdGen.ifd",
_dir + "/output/testIfdGen.ifd.ieworld.cob",
_dir + "/output/testOptions.ifd",
_dir + "/output/testOptions.ifd.ieworld.cob",
_dir + "/output/testShaderParameters.ifd",
_dir + "/output/testShaderParameters.ifd.ieworld.cob",
]
for f in files:
if os.path.exists( f ):
os.remove( f )
if __name__ == "__main__":
unittest.main()
| |
# Feature extraction code
# Header pending
import os
import time
import subprocess
import csv
import shelve
import re
import sys
import pickle
import geneontology
#import pdb
import time
import ents
import ppipred
import irefindex
def verbosecheck(verbose):
'''returns a function depending on the state of the verbose flag'''
if verbose:
def v_print(*args):
'''declare v_print function that prints to stdout
if verbose flag is on'''
for argument in args:
print argument,
print
else:
def v_print(*args):
None
return v_print
class FeatureVectorAssembler():
'''Assembles feature vectors from protein pair files, data source lists
and gold standard protein pair lists.'''
def __init__(self, sourcetab, verbose=False):
# Instantiate protein pair parsers
# first parse the data source table
# store the directory of the table and it's name
self.sourcetabdir, self.tabfile = os.path.split(sourcetab)
v_print = verbosecheck(verbose)
v_print("Using {0} from top data directory {1}.".format(self.sourcetabdir,
self.tabfile))
# open the table and parse for initialisation options
c = csv.reader(open(sourcetab), delimiter="\t")
# iterate over lines adding to list of protein pair parsers
v_print("Reading data source table:")
self.parserinitlist = []
for line in c:
# store the information in a dictionary
d = {}
d["data path"] = os.path.join(self.sourcetabdir, line[0])
d["output path"] = os.path.join(self.sourcetabdir, line[1])
# store options in a dictionary in the dictionary
# if there are options
d["options"] = {}
if line[2] != "":
options = line[2].split(";")
for x in options:
# split each option to find out which option it is:
x = x.split("=")
# store it in the dictionary
# if there are invalid options this code
# WILL NOT DETECT THEM
d["options"][x[0]] = x[1]
# update the script directory
if "script" in d["options"].keys():
d["options"]["script"] = os.path.join(self.sourcetabdir,
d["options"]["script"])
# parse protindexes and valindexes:
if "protindexes" in d["options"].keys():
d["options"]["protindexes"] = tuple(int(v) for v in re.findall("[0-9]+", d["options"]["protindexes"]))
if "valindexes" in d["options"].keys():
d["options"]["valindexes"] = tuple(int(v) for v in re.findall("[0-9]+", d["options"]["valindexes"]))
# copy the dictionary into the list
v_print("\t"+"Data source: {0} to be processed to {1}".format(d["data path"],
d["output path"]))
self.parserinitlist.append(d.copy())
# then initialise each of these parsers and keep them in a list
self.parserlist = []
v_print("Initialising parsers.")
for parser in self.parserinitlist:
self.parserlist.append(ProteinPairParser(parser["data path"],
parser["output path"],
verbose=verbose,
**parser["options"]))
# have to initialise mcount here
self.mcount = None
v_print("Finished Initialisation.")
return None
def regenerate(self, force=False, verbose=False):
'''Calls all known protein parsers and gets them to regenerate their
output, if they have to.'''
v_print = verbosecheck(verbose)
v_print("Regenerating parsers:")
for parser in self.parserlist:
v_print("\t parser {0}".format(self.parserlist.index(parser)))
parser.regenerate(force, verbose)
return None
def checkfeaturesizes(self,pairs,verbose=False):
"""Check the length of each feature so that missing values can be
padded to the correct length."""
v_print = verbosecheck(verbose)
v_print("Checking feature sizes:")
# check size of feature in each file
# will be important later
featuresizes = {}
for parser, i in zip(self.parserlist, range(len(self.parserlist))):
#try to get an example feature
examplefeature = None
for pair in pairs:
try:
examplefeature = parser[pair]
#if we got a feature then break
break
except KeyError:
#keep trying
pass
#check we actually got an example feature
if examplefeature == None:
# should probably not include a feature that's going to be all missing values
del self.parserlist[i]
v_print("\t Feature from {0} does not map to these protein pairs.".format(parser.datadir))
else:
#then we've got a feature so we should see what size it is
featuresizes[parser.datadir] = len(examplefeature)
v_print("\t Data source {0} produces features of size {1}.".format(parser.datadir,
featuresizes[parser.datadir]))
return featuresizes
def assemble(self, pairfile, outputfile, pairlabels=False, delim="\t",
missinglabel="missing", verbose=False):
'''Assembles a file of feature vectors for each protein pair in a
protein pair file supplied.
Assumes the pairfile is tab delimited.'''
v_print = verbosecheck(verbose)
v_print("Reading pairfile: {0}".format(pairfile))
# first parse the pairfile into a list of frozensets
pairs = map(lambda l: frozenset(l), csv.reader(open(pairfile), delimiter="\t"))
# open the file to put the feature vector in
c = csv.writer(open(outputfile, "w"), delimiter=delim)
# checking feature sizes
featuresizes = self.checkfeaturesizes(pairs,verbose=verbose)
if verbose:
sys.stdout.write("Writing feature vectors")
lcount = 0
# counters for each database reporting numbers of missing values
self.mcount = {}
for parser in self.parserlist:
self.mcount[parser.datadir] = 0
# then iterate through the pairs, querying all parser databases and building a list of rows
rows = []
for pair in pairs:
row = self.getfeaturevector(pair,featuresizes,pairlabels=pairlabels,missinglabel=missinglabel)
c.writerow(row)
if verbose:
lcount = lcount+1
if lcount % 10000 == 0:
sys.stdout.write(".")
if verbose:
sys.stdout.write("\n")
print "Wrote {0} vectors.".format(lcount)
for parser in self.parserlist:
percentage_match = 100.0 - 100.0 * self.mcount[parser.datadir] / lcount
print "Matched {0:.2f} % of protein pairs in {1} to features from {2}".format(percentage_match,
pairfile,
parser.datadir)
return None
def getfeaturevector(self,pair,featuresizes,pairlabels=False,missinglabel="missing",mcount=None):
"""Produces a single feature vector to be written to a file in the assemble method."""
row = []
if pairlabels is True:
lpair = list(pair)
if len(lpair) == 1:
lpair = lpair * 2
row = row + lpair
for parser in self.parserlist:
# if there are features there then append them to the row
try:
row = row + parser[pair]
except KeyError:
row = row + [missinglabel] * featuresizes[parser.datadir]
if self.mcount:
self.mcount[parser.datadir] += 1
return row
def close(self, verbose=False):
v_print = verbosecheck(verbose)
for parser in self.parserlist:
if parser.db != None:
parser.close()
v_print("{0} closed".format(parser.outdir))
return None
class ProteinPairDB(shelve.DbfilenameShelf):
'''A simple database for protein pairs using shelve.'''
def __setitem__(self, key, value):
# key will be frozenset so make it a list first
key = list(key)
# then make it a string
if len(key) == 1:
key = key[0] * 2
else:
key = key[0] + "\t" + key[1]
shelve.DbfilenameShelf.__setitem__(self, key, value)
return None
def __getitem__(self, key):
# make two strings from the key
key = list(key)
if len(key) == 1:
key1 = key[0] * 2
key2 = key[0] * 2
else:
key1 = key[0] + "\t" + key[1]
key2 = key[1] + "\t" + key[0]
# try the first one
try:
value = shelve.DbfilenameShelf.__getitem__(self, key1)
except KeyError:
# couldn't find the first key, try the second
value = shelve.DbfilenameShelf.__getitem__(self, key2)
# if we don't find this one then error out as usual
return value
def keys(self):
# retrieve the string keys used by shelve
ks = shelve.DbfilenameShelf.keys(self)
# convert them to frozensets
ks = map(lambda x: frozenset(x.split("\t")), ks)
return ks
class ProteinPairParser():
'''Does simple parsing on data files to produce protein pair files with feature values'''
def __init__(self,
datadir,
outdir,
protindexes=(0, 1),
valindexes=[2],
script=None,
csvdelim="\t",
ignoreheader=0,
generator=False,
verbose=False,
zeromissing=0,
zeromissinginternal=0,
interpolator=False,
interpolatordata=False,
fillmissing=False):
v_print = verbosecheck(verbose)
# first, store the initialisation
self.datadir = datadir
self.outdir = outdir
self.protindexes = protindexes
# had to hack this together from the list above
# passing tuple in as default did not work
self.valindexes = tuple(valindexes)
self.script = script
self.csvdelim = csvdelim
self.ignoreheader = ignoreheader
self.zeromissing = zeromissing
self.zeromissinginternal = zeromissinginternal
if generator:
#then open up this pickle file
f = open(generator)
self.generator = pickle.load(f)
f.close()
self.db = None
else:
#otherwise open the database that is assumed to exist
self.generator = None
self.db = openpairshelf(self.outdir)
#check if this is a new database
try:
v_print("Database {0} last updated {1}".format(self.outdir,time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(self.db["last written"]))))
except KeyError:
v_print("Database {0} may be empty, must be updated, please run regenerate.".format(self.outdir))
#get a list of the proteins in the keys of the database
self.proteins = set([x for y in self.db.keys() for x in y])
if interpolator:
#load it
f = open(interpolator)
self.interpolator = pickle.load(f)
f.close()
#and then try to load its data
self.interpolatordata = FeatureVectorAssembler(interpolatordata)
if fillmissing:
#load it
f = open(fillmissing)
self.fillmissing = pickle.load(f)
f.close()
else:
self.fillmissing = False
return None
def regenerate(self, force=False, verbose=False):
'''Regenerate the pair file from the data source
if the data source is newer than the pair file'''
v_print = verbosecheck(verbose)
if self.generator == None:
# so first check the age of the data file
datamtime = os.stat(self.datadir)[-2]
# check if the database file has ever been written to before
try:
v_print("Database {0} last updated {1}".format(self.outdir,time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(self.db["last written"]))))
except KeyError:
#if not make sure it goes through the next if statement
self.db["last written"] = 0
# if the data modification time is greater than last time we wrote to the database
if datamtime > self.db["last written"] or force is True:
# now regenerate the data file according to the options defined above:
if verbose and datamtime > self.db["last written"]:
if self.db["last written"] == 0:
print "Database file may be empty, regenerating at {0} from {1}.".format(self.outdir, self.datadir)
else:
print "Data file {0} is newer than processed database {1}, regenerating.".format(self.datadir, self.outdir)
if verbose and force:
print "Forcing regeneration of database {0} from data file {1}.".format(self.outdir, self.datadir)
# if there's a script to run
if self.script is not None:
v_print("Executing script: {0}.".format(self.script))
# then execute the script
retcode = subprocess.call("python2 {0}".format(self.script), shell=True)
v_print("Script returned: {0}".format(retcode))
# open the data file
c = csv.reader(open(self.datadir), delimiter=self.csvdelim)
# if the header should be ignored then ignore it
if self.ignoreheader == "1":
v_print("Ignoring header.")
c.next()
if verbose:
sys.stdout.write("Filling database")
lcount = 0
for line in c:
# each line use the protein pair as a key
# by formatting it as a frozenset
pair = frozenset([line[self.protindexes[0]], line[self.protindexes[1]]])
# and the value is indexed by valindexes
values = []
for i in self.valindexes:
values.append(line[i])
self.db[pair] = values[:]
if verbose:
lcount = lcount + 1
if lcount % 1000 == 0:
sys.stdout.write(".")
if verbose:
sys.stdout.write("\n")
print "Parsed {0} lines.".format(lcount)
# add the current time to the database "last written" entry:
self.db["last written"] = time.time()
else:
v_print("Custom generator function, no database to regenerate.")
return None
def __getitem__(self,key):
if self.generator != None:
#try and read a key from the custom generator
return self.generator[key]
else:
#read key from database
try:
return self.db[key]
except KeyError:
#catch the key error and check if this should be zeroed.
if self.zeromissing == '1':
featurelen = len(self.valindexes)
return [0]*featurelen
elif self.zeromissinginternal == '1':
pair = list(key)
if len(pair) == 1:
pair = pair*2
if pair[0] in self.proteins and pair[1] in self.proteins:
featurelen = len(self.valindexes)
return [0]*featurelen
elif self.interpolator != False:
#try to interpolate the point
#using a feature vector generated for this point
#using this interpolator's own assembler
fvector = self.interpolatordata.getfeaturevector(key)
fvector = map(float,fvector)
#use the regressor on this new vector
self.interpolator.predict(fvector)
if self.fillmissing != False:
#a pickled list to fill any missing values with
return self.fillmissing
else:
raise KeyError
return self.db[key]
def close(self):
self.db.close()
return None
def openpairshelf(filename, flag='c', protocol=None, writeback=False):
"""Returns a ProteinPairDB object, with similar functionality to shelve.open()"""
return ProteinPairDB(filename, flag, protocol, writeback)
| |
#!/usr/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""Generate documentation for reportlab.graphics classes.
Type the following for usage info:
python graphdocpy.py -h
"""
__version__ = '0.8'
import sys
sys.path.insert(0, '.')
import os, re, types, getopt, pickle, copy, time, pprint, traceback
from reportlab import isPy3
from reportlab import rl_config
from docpy import PackageSkeleton0, ModuleSkeleton0
from docpy import DocBuilder0, PdfDocBuilder0, HtmlDocBuilder0
from docpy import htmlescape, htmlrepr, defaultformat, \
getdoc, reduceDocStringLength
from docpy import makeHtmlSection, makeHtmlSubSection, \
makeHtmlInlineImage
from reportlab.lib.units import inch, cm
from reportlab.lib.pagesizes import A4
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER, TA_LEFT
from reportlab.lib.utils import getStringIO
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.pdfgen import canvas
from reportlab.platypus.flowables import Flowable, Spacer
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.platypus.flowables \
import Flowable, Preformatted,Spacer, Image, KeepTogether, PageBreak
from reportlab.platypus.xpreformatted import XPreformatted
from reportlab.platypus.frames import Frame
from reportlab.platypus.doctemplate \
import PageTemplate, BaseDocTemplate
from reportlab.platypus.tables import TableStyle, Table
from reportlab.graphics.shapes import NotImplementedError
import inspect
# Needed to draw Widget/Drawing demos.
from reportlab.graphics.widgetbase import Widget
from reportlab.graphics.shapes import Drawing
from reportlab.graphics import shapes
from reportlab.graphics import renderPDF
VERBOSE = rl_config.verbose
VERIFY = 1
_abstractclasserr_re = re.compile(r'^\s*abstract\s*class\s*(\w+)\s*instantiated',re.I)
####################################################################
#
# Stuff needed for building PDF docs.
#
####################################################################
def mainPageFrame(canvas, doc):
"The page frame used for all PDF documents."
canvas.saveState()
pageNumber = canvas.getPageNumber()
canvas.line(2*cm, A4[1]-2*cm, A4[0]-2*cm, A4[1]-2*cm)
canvas.line(2*cm, 2*cm, A4[0]-2*cm, 2*cm)
if pageNumber > 1:
canvas.setFont('Times-Roman', 12)
canvas.drawString(4 * inch, cm, "%d" % pageNumber)
if hasattr(canvas, 'headerLine'): # hackish
headerline = ' \xc2\x8d '.join(canvas.headerLine)
canvas.drawString(2*cm, A4[1]-1.75*cm, headerline)
canvas.setFont('Times-Roman', 8)
msg = "Generated with docpy. See http://www.reportlab.com!"
canvas.drawString(2*cm, 1.65*cm, msg)
canvas.restoreState()
class MyTemplate(BaseDocTemplate):
"The document template used for all PDF documents."
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
frame1 = Frame(2.5*cm, 2.5*cm, 15*cm, 25*cm, id='F1')
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
self.addPageTemplates(PageTemplate('normal', [frame1], mainPageFrame))
def afterFlowable(self, flowable):
"Takes care of header line, TOC and outline entries."
if flowable.__class__.__name__ == 'Paragraph':
f = flowable
# Build a list of heading parts.
# So far, this is the *last* item on the *previous* page...
if f.style.name[:8] == 'Heading0':
self.canv.headerLine = [f.text] # hackish
elif f.style.name[:8] == 'Heading1':
if len(self.canv.headerLine) == 2:
del self.canv.headerLine[-1]
elif len(self.canv.headerLine) == 3:
del self.canv.headerLine[-1]
del self.canv.headerLine[-1]
self.canv.headerLine.append(f.text)
elif f.style.name[:8] == 'Heading2':
if len(self.canv.headerLine) == 3:
del self.canv.headerLine[-1]
self.canv.headerLine.append(f.text)
if f.style.name[:7] == 'Heading':
# Register TOC entries.
headLevel = int(f.style.name[7:])
self.notify('TOCEntry', (headLevel, flowable.getPlainText(), self.page))
# Add PDF outline entries.
c = self.canv
title = f.text
key = str(hash(f))
lev = int(f.style.name[7:])
try:
if lev == 0:
isClosed = 0
else:
isClosed = 1
c.bookmarkPage(key)
c.addOutlineEntry(title, key, level=lev, closed=isClosed)
c.showOutline()
except:
if VERBOSE:
# AR hacking in exception handlers
print('caught exception in MyTemplate.afterFlowable with heading text %s' % f.text)
traceback.print_exc()
else:
pass
####################################################################
#
# Utility functions
#
####################################################################
def indentLevel(line, spacesPerTab=4):
"""Counts the indent levels on the front.
It is assumed that one tab equals 4 spaces.
"""
x = 0
nextTab = 4
for ch in line:
if ch == ' ':
x = x + 1
elif ch == '\t':
x = nextTab
nextTab = x + spacesPerTab
else:
return x
assert indentLevel('hello') == 0, 'error in indentLevel'
assert indentLevel(' hello') == 1, 'error in indentLevel'
assert indentLevel(' hello') == 2, 'error in indentLevel'
assert indentLevel(' hello') == 3, 'error in indentLevel'
assert indentLevel('\thello') == 4, 'error in indentLevel'
assert indentLevel(' \thello') == 4, 'error in indentLevel'
assert indentLevel('\t hello') == 5, 'error in indentLevel'
####################################################################
#
# Special-purpose document builders
#
####################################################################
class GraphPdfDocBuilder0(PdfDocBuilder0):
"""A PDF document builder displaying widgets and drawings.
This generates a PDF file where only methods named 'demo' are
listed for any class C. If C happens to be a subclass of Widget
and has a 'demo' method, this method is assumed to generate and
return a sample widget instance, that is then appended graphi-
cally to the Platypus story.
Something similar happens for functions. If their names start
with 'sample' they are supposed to generate and return a sample
drawing. This is then taken and appended graphically to the
Platypus story, as well.
"""
fileSuffix = '.pdf'
def begin(self, name='', typ=''):
styleSheet = getSampleStyleSheet()
self.code = styleSheet['Code']
self.bt = styleSheet['BodyText']
self.story = []
# Cover page
t = time.gmtime(time.time())
timeString = time.strftime("%Y-%m-%d %H:%M", t)
self.story.append(Paragraph('<font size=18>Documentation for %s "%s"</font>' % (typ, name), self.bt))
self.story.append(Paragraph('<font size=18>Generated by: graphdocpy.py version %s</font>' % __version__, self.bt))
self.story.append(Paragraph('<font size=18>Date generated: %s</font>' % timeString, self.bt))
self.story.append(Paragraph('<font size=18>Format: PDF</font>', self.bt))
self.story.append(PageBreak())
# Table of contents
toc = TableOfContents()
self.story.append(toc)
self.story.append(PageBreak())
def end(self, fileName=None):
if fileName: # overrides output path
self.outPath = fileName
elif self.packageName:
self.outPath = self.packageName + self.fileSuffix
elif self.skeleton:
self.outPath = self.skeleton.getModuleName() + self.fileSuffix
else:
self.outPath = ''
if self.outPath:
doc = MyTemplate(self.outPath)
doc.multiBuild(self.story)
def beginModule(self, name, doc, imported):
story = self.story
bt = self.bt
# Defer displaying the module header info to later...
self.shouldDisplayModule = (name, doc, imported)
self.hasDisplayedModule = 0
def endModule(self, name, doc, imported):
if self.hasDisplayedModule:
DocBuilder0.endModule(self, name, doc, imported)
def beginClasses(self, names):
# Defer displaying the module header info to later...
if self.shouldDisplayModule:
self.shouldDisplayClasses = names
# Skip all methods.
def beginMethod(self, name, doc, sig):
pass
def endMethod(self, name, doc, sig):
pass
def beginClass(self, name, doc, bases):
"Append a graphic demo of a Widget or Drawing at the end of a class."
if VERBOSE:
print('GraphPdfDocBuilder.beginClass(%s...)' % name)
aClass = eval('self.skeleton.moduleSpace.' + name)
if issubclass(aClass, Widget):
if self.shouldDisplayModule:
modName, modDoc, imported = self.shouldDisplayModule
self.story.append(Paragraph(modName, self.makeHeadingStyle(self.indentLevel-2, 'module')))
self.story.append(XPreformatted(modDoc, self.bt))
self.shouldDisplayModule = 0
self.hasDisplayedModule = 1
if self.shouldDisplayClasses:
self.story.append(Paragraph('Classes', self.makeHeadingStyle(self.indentLevel-1)))
self.shouldDisplayClasses = 0
PdfDocBuilder0.beginClass(self, name, doc, bases)
self.beginAttributes(aClass)
elif issubclass(aClass, Drawing):
if self.shouldDisplayModule:
modName, modDoc, imported = self.shouldDisplayModule
self.story.append(Paragraph(modName, self.makeHeadingStyle(self.indentLevel-2, 'module')))
self.story.append(XPreformatted(modDoc, self.bt))
self.shouldDisplayModule = 0
self.hasDisplayedModule = 1
if self.shouldDisplayClasses:
self.story.append(Paragraph('Classes', self.makeHeadingStyle(self.indentLevel-1)))
self.shouldDisplayClasses = 0
PdfDocBuilder0.beginClass(self, name, doc, bases)
def beginAttributes(self, aClass):
"Append a list of annotated attributes of a class."
self.story.append(Paragraph(
'Public Attributes',
self.makeHeadingStyle(self.indentLevel+1)))
map = aClass._attrMap
if map:
map = list(map.items())
map.sort()
else:
map = []
for name, typ in map:
if typ != None:
if hasattr(typ, 'desc'):
desc = typ.desc
else:
desc = '<i>%s</i>' % typ.__class__.__name__
else:
desc = '<i>None</i>'
self.story.append(Paragraph(
"<b>%s</b> %s" % (name, desc), self.bt))
self.story.append(Paragraph("", self.bt))
def endClass(self, name, doc, bases):
"Append a graphic demo of a Widget or Drawing at the end of a class."
PdfDocBuilder0.endClass(self, name, doc, bases)
aClass = eval('self.skeleton.moduleSpace.' + name)
if hasattr(aClass, '_nodoc'):
pass
elif issubclass(aClass, Widget):
try:
widget = aClass()
except AssertionError as err:
if _abstractclasserr_re.match(str(err)): return
raise
self.story.append(Spacer(0*cm, 0.5*cm))
self._showWidgetDemoCode(widget)
self.story.append(Spacer(0*cm, 0.5*cm))
self._showWidgetDemo(widget)
self.story.append(Spacer(0*cm, 0.5*cm))
self._showWidgetProperties(widget)
self.story.append(PageBreak())
elif issubclass(aClass, Drawing):
drawing = aClass()
self.story.append(Spacer(0*cm, 0.5*cm))
self._showDrawingCode(drawing)
self.story.append(Spacer(0*cm, 0.5*cm))
self._showDrawingDemo(drawing)
self.story.append(Spacer(0*cm, 0.5*cm))
def beginFunctions(self, names):
srch = ' '.join(names)
if ' '.join(names).find(' sample') > -1:
PdfDocBuilder0.beginFunctions(self, names)
# Skip non-sample functions.
def beginFunction(self, name, doc, sig):
"Skip function for 'uninteresting' names."
if name[:6] == 'sample':
PdfDocBuilder0.beginFunction(self, name, doc, sig)
def endFunction(self, name, doc, sig):
"Append a drawing to the story for special function names."
if name[:6] != 'sample':
return
if VERBOSE:
print('GraphPdfDocBuilder.endFunction(%s...)' % name)
PdfDocBuilder0.endFunction(self, name, doc, sig)
aFunc = eval('self.skeleton.moduleSpace.' + name)
drawing = aFunc()
self.story.append(Spacer(0*cm, 0.5*cm))
self._showFunctionDemoCode(aFunc)
self.story.append(Spacer(0*cm, 0.5*cm))
self._showDrawingDemo(drawing)
self.story.append(PageBreak())
def _showFunctionDemoCode(self, function):
"""Show a demo code of the function generating the drawing."""
# Heading
self.story.append(Paragraph("<i>Example</i>", self.bt))
self.story.append(Paragraph("", self.bt))
# Sample code
codeSample = inspect.getsource(function)
self.story.append(Preformatted(codeSample, self.code))
def _showDrawingCode(self, drawing):
"""Show code of the drawing class."""
# Heading
#className = drawing.__class__.__name__
self.story.append(Paragraph("<i>Example</i>", self.bt))
# Sample code
codeSample = inspect.getsource(drawing.__class__.__init__)
self.story.append(Preformatted(codeSample, self.code))
def _showDrawingDemo(self, drawing):
"""Show a graphical demo of the drawing."""
# Add the given drawing to the story.
# Ignored if no GD rendering available
# or the demo method does not return a drawing.
try:
flo = renderPDF.GraphicsFlowable(drawing)
self.story.append(Spacer(6,6))
self.story.append(flo)
self.story.append(Spacer(6,6))
except:
if VERBOSE:
print('caught exception in _showDrawingDemo')
traceback.print_exc()
else:
pass
def _showWidgetDemo(self, widget):
"""Show a graphical demo of the widget."""
# Get a demo drawing from the widget and add it to the story.
# Ignored if no GD rendering available
# or the demo method does not return a drawing.
try:
if VERIFY:
widget.verify()
drawing = widget.demo()
flo = renderPDF.GraphicsFlowable(drawing)
self.story.append(Spacer(6,6))
self.story.append(flo)
self.story.append(Spacer(6,6))
except:
if VERBOSE:
print('caught exception in _showWidgetDemo')
traceback.print_exc()
else:
pass
def _showWidgetDemoCode(self, widget):
"""Show a demo code of the widget."""
# Heading
#className = widget.__class__.__name__
self.story.append(Paragraph("<i>Example</i>", self.bt))
# Sample code
codeSample = inspect.getsource(widget.__class__.demo)
self.story.append(Preformatted(codeSample, self.code))
def _showWidgetProperties(self, widget):
"""Dump all properties of a widget."""
props = widget.getProperties()
keys = list(props.keys())
keys.sort()
lines = []
for key in keys:
value = props[key]
f = getStringIO()
pprint.pprint(value, f)
value = f.getvalue()[:-1]
valueLines = value.split('\n')
for i in range(1, len(valueLines)):
valueLines[i] = ' '*(len(key)+3) + valueLines[i]
value = '\n'.join(valueLines)
lines.append('%s = %s' % (key, value))
text = '\n'.join(lines)
self.story.append(Paragraph("<i>Properties of Example Widget</i>", self.bt))
self.story.append(Paragraph("", self.bt))
self.story.append(Preformatted(text, self.code))
class GraphHtmlDocBuilder0(HtmlDocBuilder0):
"A class to write the skeleton of a Python source."
fileSuffix = '.html'
def beginModule(self, name, doc, imported):
# Defer displaying the module header info to later...
self.shouldDisplayModule = (name, doc, imported)
self.hasDisplayedModule = 0
def endModule(self, name, doc, imported):
if self.hasDisplayedModule:
HtmlDocBuilder0.endModule(self, name, doc, imported)
def beginClasses(self, names):
# Defer displaying the module header info to later...
if self.shouldDisplayModule:
self.shouldDisplayClasses = names
# Skip all methods.
def beginMethod(self, name, doc, sig):
pass
def endMethod(self, name, doc, sig):
pass
def beginClass(self, name, doc, bases):
"Append a graphic demo of a widget at the end of a class."
aClass = eval('self.skeleton.moduleSpace.' + name)
if issubclass(aClass, Widget):
if self.shouldDisplayModule:
modName, modDoc, imported = self.shouldDisplayModule
self.outLines.append('<H2>%s</H2>' % modName)
self.outLines.append('<PRE>%s</PRE>' % modDoc)
self.shouldDisplayModule = 0
self.hasDisplayedModule = 1
if self.shouldDisplayClasses:
self.outLines.append('<H2>Classes</H2>')
self.shouldDisplayClasses = 0
HtmlDocBuilder0.beginClass(self, name, doc, bases)
def endClass(self, name, doc, bases):
"Append a graphic demo of a widget at the end of a class."
HtmlDocBuilder0.endClass(self, name, doc, bases)
aClass = eval('self.skeleton.moduleSpace.' + name)
if issubclass(aClass, Widget):
widget = aClass()
self._showWidgetDemoCode(widget)
self._showWidgetDemo(widget)
self._showWidgetProperties(widget)
def beginFunctions(self, names):
if ' '.join(names).find(' sample') > -1:
HtmlDocBuilder0.beginFunctions(self, names)
# Skip non-sample functions.
def beginFunction(self, name, doc, sig):
"Skip function for 'uninteresting' names."
if name[:6] == 'sample':
HtmlDocBuilder0.beginFunction(self, name, doc, sig)
def endFunction(self, name, doc, sig):
"Append a drawing to the story for special function names."
if name[:6] != 'sample':
return
HtmlDocBuilder0.endFunction(self, name, doc, sig)
aFunc = eval('self.skeleton.moduleSpace.' + name)
drawing = aFunc()
self._showFunctionDemoCode(aFunc)
self._showDrawingDemo(drawing, aFunc.__name__)
def _showFunctionDemoCode(self, function):
"""Show a demo code of the function generating the drawing."""
# Heading
self.outLines.append('<H3>Example</H3>')
# Sample code
codeSample = inspect.getsource(function)
self.outLines.append('<PRE>%s</PRE>' % codeSample)
def _showDrawingDemo(self, drawing, name):
"""Show a graphical demo of the drawing."""
# Add the given drawing to the story.
# Ignored if no GD rendering available
# or the demo method does not return a drawing.
try:
from reportlab.graphics import renderPM
modName = self.skeleton.getModuleName()
path = '%s-%s.jpg' % (modName, name)
renderPM.drawToFile(drawing, path, fmt='JPG')
self.outLines.append('<H3>Demo</H3>')
self.outLines.append(makeHtmlInlineImage(path))
except:
if VERBOSE:
print('caught exception in GraphHTMLDocBuilder._showDrawingDemo')
traceback.print_exc()
else:
pass
def _showWidgetDemo(self, widget):
"""Show a graphical demo of the widget."""
# Get a demo drawing from the widget and add it to the story.
# Ignored if no GD rendering available
# or the demo method does not return a drawing.
try:
from reportlab.graphics import renderPM
drawing = widget.demo()
if VERIFY:
widget.verify()
modName = self.skeleton.getModuleName()
path = '%s-%s.jpg' % (modName, widget.__class__.__name__)
renderPM.drawToFile(drawing, path, fmt='JPG')
self.outLines.append('<H3>Demo</H3>')
self.outLines.append(makeHtmlInlineImage(path))
except:
if VERBOSE:
print('caught exception in GraphHTMLDocBuilder._showWidgetDemo')
traceback.print_exc()
else:
pass
def _showWidgetDemoCode(self, widget):
"""Show a demo code of the widget."""
# Heading
#className = widget.__class__.__name__
self.outLines.append('<H3>Example Code</H3>')
# Sample code
codeSample = inspect.getsource(widget.__class__.demo)
self.outLines.append('<PRE>%s</PRE>' % codeSample)
self.outLines.append('')
def _showWidgetProperties(self, widget):
"""Dump all properties of a widget."""
props = widget.getProperties()
keys = list(props.keys())
keys.sort()
lines = []
for key in keys:
value = props[key]
# Method 3
f = getStringIO()
pprint.pprint(value, f)
value = f.getvalue()[:-1]
valueLines = value.split('\n')
for i in range(1, len(valueLines)):
valueLines[i] = ' '*(len(key)+3) + valueLines[i]
value = '\n'.join(valueLines)
lines.append('%s = %s' % (key, value))
text = '\n'.join(lines)
self.outLines.append('<H3>Properties of Example Widget</H3>')
self.outLines.append('<PRE>%s</PRE>' % text)
self.outLines.append('')
# Highly experimental!
class PlatypusDocBuilder0(DocBuilder0):
"Document the skeleton of a Python module as a Platypus story."
fileSuffix = '.pps' # A pickled Platypus story.
def begin(self, name='', typ=''):
styleSheet = getSampleStyleSheet()
self.code = styleSheet['Code']
self.bt = styleSheet['BodyText']
self.story = []
def end(self):
if self.packageName:
self.outPath = self.packageName + self.fileSuffix
elif self.skeleton:
self.outPath = self.skeleton.getModuleName() + self.fileSuffix
else:
self.outPath = ''
if self.outPath:
f = open(self.outPath, 'w')
pickle.dump(self.story, f)
def beginPackage(self, name):
DocBuilder0.beginPackage(self, name)
self.story.append(Paragraph(name, self.bt))
def beginModule(self, name, doc, imported):
story = self.story
bt = self.bt
story.append(Paragraph(name, bt))
story.append(XPreformatted(doc, bt))
def beginClasses(self, names):
self.story.append(Paragraph('Classes', self.bt))
def beginClass(self, name, doc, bases):
bt = self.bt
story = self.story
if bases:
bases = [b.__name__ for b in bases] # hack
story.append(Paragraph('%s(%s)' % (name, ', '.join(bases)), bt))
else:
story.append(Paragraph(name, bt))
story.append(XPreformatted(doc, bt))
def beginMethod(self, name, doc, sig):
bt = self.bt
story = self.story
story.append(Paragraph(name+sig, bt))
story.append(XPreformatted(doc, bt))
def beginFunctions(self, names):
if names:
self.story.append(Paragraph('Functions', self.bt))
def beginFunction(self, name, doc, sig):
bt = self.bt
story = self.story
story.append(Paragraph(name+sig, bt))
story.append(XPreformatted(doc, bt))
####################################################################
#
# Main
#
####################################################################
def printUsage():
"""graphdocpy.py - Automated documentation for the RL Graphics library.
Usage: python graphdocpy.py [options]
[options]
-h Print this help message.
-f name Use the document builder indicated by 'name',
e.g. Html, Pdf.
-m module Generate document for module named 'module'.
'module' may follow any of these forms:
- docpy.py
- docpy
- c:\\test\\docpy
and can be any of these:
- standard Python modules
- modules in the Python search path
- modules in the current directory
-p package Generate document for package named 'package'
(default is 'reportlab.graphics').
'package' may follow any of these forms:
- reportlab
- reportlab.graphics.charts
- c:\\test\\reportlab
and can be any of these:
- standard Python packages (?)
- packages in the Python search path
- packages in the current directory
-s Silent mode (default is unset).
Examples:
python graphdocpy.py reportlab.graphics
python graphdocpy.py -m signsandsymbols.py -f Pdf
python graphdocpy.py -m flags.py -f Html
python graphdocpy.py -m barchart1.py
"""
# The following functions, including main(), are actually
# the same as in docpy.py (except for some defaults).
def documentModule0(pathOrName, builder, opts={}):
"""Generate documentation for one Python file in some format.
This handles Python standard modules like string, custom modules
on the Python search path like e.g. docpy as well as modules
specified with their full path like C:/tmp/junk.py.
The doc file will always be saved in the current directory with
a basename equal to that of the module, e.g. docpy.
"""
cwd = os.getcwd()
# Append directory to Python search path if we get one.
dirName = os.path.dirname(pathOrName)
if dirName:
sys.path.append(dirName)
# Remove .py extension from module name.
if pathOrName[-3:] == '.py':
modname = pathOrName[:-3]
else:
modname = pathOrName
# Remove directory paths from module name.
if dirName:
modname = os.path.basename(modname)
# Load the module.
try:
module = __import__(modname)
except:
print('Failed to import %s.' % modname)
os.chdir(cwd)
return
# Do the real documentation work.
s = ModuleSkeleton0()
s.inspect(module)
builder.write(s)
# Remove appended directory from Python search path if we got one.
if dirName:
del sys.path[-1]
os.chdir(cwd)
def _packageWalkCallback(xxx_todo_changeme, dirPath, files):
"A callback function used when waking over a package tree."
(builder, opts) = xxx_todo_changeme
cwd = os.getcwd()
os.chdir(dirPath)
# Skip __init__ files.
files = [f for f in files if f != '__init__.py']
files = [f for f in files if f[-3:] == '.py']
for f in files:
path = os.path.join(dirPath, f)
## if not opts.get('isSilent', 0):
## print path
builder.indentLevel = builder.indentLevel + 1
#documentModule0(path, builder)
documentModule0(f, builder)
builder.indentLevel = builder.indentLevel - 1
#CD back out
os.chdir(cwd)
def documentPackage0(pathOrName, builder, opts={}):
"""Generate documentation for one Python package in some format.
'pathOrName' can be either a filesystem path leading to a Python
package or package name whose path will be resolved by importing
the top-level module.
The doc file will always be saved in the current directory with
a basename equal to that of the package, e.g. reportlab.lib.
"""
# Did we get a package path with OS-dependant seperators...?
if os.sep in pathOrName:
path = pathOrName
name = os.path.splitext(os.path.basename(path))[0]
# ... or rather a package name?
else:
name = pathOrName
package = __import__(name)
# Some special care needed for dotted names.
if '.' in name:
subname = 'package' + name[name.find('.'):]
package = eval(subname)
path = os.path.dirname(package.__file__)
cwd = os.getcwd()
os.chdir(path)
builder.beginPackage(name)
if isPy3:
for dirpath, dirnames, filenames in os.walk(path):
_packageWalkCallback((builder, opts), dirpath, dirnames + filenames)
else:
os.path.walk(path, _packageWalkCallback, (builder, opts))
builder.endPackage(name)
os.chdir(cwd)
def makeGraphicsReference(outfilename):
"Make reportlab-graphics-reference.pdf"
builder = GraphPdfDocBuilder0()
builder.begin(name='reportlab.graphics', typ='package')
documentPackage0('reportlab.graphics', builder, {'isSilent': 0})
builder.end(outfilename)
print('made graphics reference in %s' % outfilename)
def main():
"Handle command-line options and trigger corresponding action."
opts, args = getopt.getopt(sys.argv[1:], 'hsf:m:p:')
# Make an options dictionary that is easier to use.
optsDict = {}
for k, v in opts:
optsDict[k] = v
hasOpt = optsDict.__contains__
# On -h print usage and exit immediately.
if hasOpt('-h'):
print(printUsage.__doc__)
sys.exit(0)
# On -s set silent mode.
isSilent = hasOpt('-s')
# On -f set the appropriate DocBuilder to use or a default one.
builder = { 'Pdf': GraphPdfDocBuilder0,
'Html': GraphHtmlDocBuilder0,
}[optsDict.get('-f', 'Pdf')]()
# Set default module or package to document.
if not hasOpt('-p') and not hasOpt('-m'):
optsDict['-p'] = 'reportlab.graphics'
# Save a few options for further use.
options = {'isSilent':isSilent}
# Now call the real documentation functions.
if hasOpt('-m'):
nameOrPath = optsDict['-m']
if not isSilent:
print("Generating documentation for module %s..." % nameOrPath)
builder.begin(name=nameOrPath, typ='module')
documentModule0(nameOrPath, builder, options)
elif hasOpt('-p'):
nameOrPath = optsDict['-p']
if not isSilent:
print("Generating documentation for package %s..." % nameOrPath)
builder.begin(name=nameOrPath, typ='package')
documentPackage0(nameOrPath, builder, options)
builder.end()
if not isSilent:
print("Saved %s." % builder.outPath)
#if doing the usual, put a copy in docs
cwd = os.getcwd()
if builder.outPath=='reportlab.graphics.pdf':
import shutil
try:
import tools
except ImportError: #probably running in tools/docco
sys.path.insert(0, os.path.dirname(os.path.dirname(cwd)))
import tools
topDir=tools.__path__[0]
if not os.path.isabs(topDir): topDir=os.path.abspath(topDir)
topDir=os.path.dirname(topDir)
dst = os.path.join(topDir,'docs')
if not os.path.isdir(dst):
if os.path.basename(cwd)=='docco':
dst=os.path.realpath(os.path.join(cwd,'..','..','docs'))
dst = os.path.join(dst,'reportlab-graphics-reference.pdf')
try:
shutil.copyfile('reportlab.graphics.pdf', dst)
if not isSilent:
print('copied to '+dst)
except:
if not isSilent:
print('!!!!! cannot copy to '+dst)
def makeSuite():
"standard test harness support - run self as separate process"
from tests.utils import ScriptThatMakesFileTest
return ScriptThatMakesFileTest('tools/docco',
'graphdocpy.py',
'reportlab.graphics.pdf')
if __name__ == '__main__':
main()
| |
import unittest
import functools
import numpy
from operator import mul
import chainer
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import conv
@testing.parameterize(*(testing.product({
'dims': [(5,), (4, 3), (3, 4, 3)],
'dilate': [1, 2],
'groups': [1, 2],
'cover_all': [True, False],
'c_contiguous': [True],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
'autotune': [True, False],
}) + testing.product({
'dims': [(4,)],
'dilate': [1],
'groups': [1],
'cover_all': [False],
'c_contiguous': [False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'autotune': [False],
})))
class TestConvolutionND(unittest.TestCase):
def setUp(self):
N = 2
in_channels = 4
out_channels = 2
ndim = len(self.dims)
ksize = (2,) * ndim
self.stride = (1,) * ndim
self.pad = (1,) * ndim
self.dilate = (self.dilate,) * ndim
W_scale = numpy.sqrt(1. / functools.reduce(mul, ksize, in_channels))
W_shape = (out_channels, in_channels // self.groups) + ksize
self.W = numpy.random.normal(0, W_scale, W_shape).astype(self.W_dtype)
self.b = numpy.random.uniform(-1, 1, out_channels).astype(self.x_dtype)
x_shape = (N, in_channels) + self.dims
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.x_dtype)
gy_shape = (N, out_channels) + tuple(
conv.get_conv_outsize(d, k, s, p, cover_all=self.cover_all, d=di)
for (d, k, s, p, di)
in zip(self.dims, ksize, self.stride, self.pad, self.dilate))
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.x_dtype)
self.check_forward_options = {}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 3e-5, 'rtol': 3e-4}
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 2 ** -4, 'rtol': 2 ** -4}
self.ggx = numpy.random.uniform(-1, 1, self.x.shape).astype(
self.x_dtype)
self.ggW = numpy.random.uniform(-1, 1, self.W.shape).astype(
self.W_dtype)
self.ggb = numpy.random.uniform(-1, 1, self.b.shape).astype(
self.x_dtype)
def check_forward_consistency(
self, transfer_func, nobias=False, use_cudnn='never'):
x_cpu = chainer.Variable(self.x)
W_cpu = chainer.Variable(self.W)
b_cpu = None if nobias else chainer.Variable(self.b)
y_cpu = F.convolution_nd(
x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
x_gpu = chainer.Variable(transfer_func(self.x))
W_gpu = chainer.Variable(transfer_func(self.W))
b_gpu = None if nobias else chainer.Variable(transfer_func(self.b))
with chainer.using_config('use_cudnn', use_cudnn):
with chainer.using_config('autotune', self.autotune):
y_gpu = F.convolution_nd(
x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
testing.assert_allclose(
y_cpu.data, y_gpu.data, **self.check_forward_options)
def _skip_if_not_chainerx_supported(self):
# TODO(hvy): chainerx does not support fp16 yet.
if self.x_dtype is numpy.float16 or self.W_dtype is numpy.float16:
raise unittest.SkipTest('Not yet supported')
@attr.chainerx
def test_forward_chainerx_native(self):
self._skip_if_not_chainerx_supported()
self.check_forward_consistency(backend.to_chainerx, nobias=False)
@attr.chainerx
def test_forward_chainerx_native_nobias(self):
self._skip_if_not_chainerx_supported()
self.check_forward_consistency(backend.to_chainerx, nobias=True)
@attr.chainerx
@attr.gpu
def test_forward_chainerx_cuda(self):
self._skip_if_not_chainerx_supported()
self.check_forward_consistency(
lambda xs: backend.to_chainerx(cuda.to_gpu(xs)), nobias=False)
@attr.chainerx
@attr.gpu
def test_forward_chainerx_cuda_nobias(self):
self._skip_if_not_chainerx_supported()
self.check_forward_consistency(
lambda xs: backend.to_chainerx(cuda.to_gpu(xs)), nobias=True)
@attr.cudnn
def test_forward_consistency(self):
self.check_forward_consistency(
cuda.to_gpu, nobias=False, use_cudnn='always')
@attr.cudnn
def test_forward_consistency_nobias(self):
self.check_forward_consistency(
cuda.to_gpu, nobias=True, use_cudnn='always')
@attr.gpu
def test_forward_consistency_im2col(self):
self.check_forward_consistency(
cuda.to_gpu, nobias=False, use_cudnn='never')
@attr.gpu
def test_forward_consistency_im2col_nobias(self):
self.check_forward_consistency(
cuda.to_gpu, nobias=True, use_cudnn='never')
def check_forward_consistency_regression(self, nobias=False):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
b = None if nobias else chainer.Variable(self.b)
with chainer.using_config('use_cudnn', 'never'):
y_nd = F.convolution_nd(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
y_2d = F.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
testing.assert_allclose(
y_nd.data, y_2d.data, **self.check_forward_options)
def test_forward_consistency_regression(self):
# Regression test to convolution_2d.
if len(self.dims) == 2:
self.check_forward_consistency_regression(nobias=False)
def test_forward_consistency_regression_nobias(self):
# Regression test to convolution_2d.
if len(self.dims) == 2:
self.check_forward_consistency_regression(nobias=True)
def check_backward(self, x_data, W_data, b_data, y_grad,
use_cudnn='never'):
if not self.c_contiguous:
x_data, W_data, b_data, y_grad = (
testing.array._as_noncontiguous_array(
(x_data, W_data, b_data, y_grad)))
args = (x_data, W_data)
if b_data is not None:
args += (b_data,)
def f(*args):
return F.convolution_nd(
*args, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
with chainer.using_config('use_cudnn', use_cudnn):
with chainer.using_config('autotune', self.autotune):
gradient_check.check_backward(
f, args, y_grad, **self.check_backward_options)
@attr.chainerx
def test_backward_chainerx_native(self):
self._skip_if_not_chainerx_supported()
self.check_backward(
backend.to_chainerx(self.x), backend.to_chainerx(self.W),
backend.to_chainerx(self.b), backend.to_chainerx(self.gy))
@attr.chainerx
def test_backward_chainerx_native_nobias(self):
self._skip_if_not_chainerx_supported()
self.check_backward(
backend.to_chainerx(self.x), backend.to_chainerx(self.W), None,
backend.to_chainerx(self.gy))
@attr.chainerx
@attr.gpu
def test_backward_chainerx_cuda(self):
self._skip_if_not_chainerx_supported()
self.check_backward(
backend.to_chainerx(cuda.to_gpu(self.x)),
backend.to_chainerx(cuda.to_gpu(self.W)),
backend.to_chainerx(cuda.to_gpu(self.b)),
backend.to_chainerx(cuda.to_gpu(self.gy)))
@attr.chainerx
@attr.gpu
def test_backward_chainerx_cuda_nobias(self):
self._skip_if_not_chainerx_supported()
self.check_backward(
backend.to_chainerx(cuda.to_gpu(self.x)),
backend.to_chainerx(cuda.to_gpu(self.W)),
None,
backend.to_chainerx(cuda.to_gpu(self.gy)))
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
@condition.retry(3)
def test_backward_cpu_nobias(self):
self.check_backward(self.x, self.W, None, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy),
use_cudnn='always')
@attr.cudnn
@condition.retry(3)
def test_backward_gpu_nobias(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy),
use_cudnn='always')
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy),
use_cudnn='never')
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col_nobias(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy),
use_cudnn='never')
def check_double_backward(self, x_data, W_data, b_data, y_grad,
x_grad_grad, W_grad_grad, b_grad_grad,
use_cudnn='always'):
if not self.c_contiguous:
(x_data, W_data, b_data, y_grad, x_grad_grad, W_grad_grad,
b_grad_grad) = testing.array._as_noncontiguous_array(
(x_data, W_data, b_data, y_grad, x_grad_grad, W_grad_grad,
b_grad_grad))
args = (x_data, W_data)
grad_grads = (x_grad_grad, W_grad_grad)
if b_data is not None:
args += (b_data,)
grad_grads += (b_grad_grad,)
def f(*args):
return F.convolution_nd(
*args, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
with chainer.using_config('use_cudnn', use_cudnn):
with chainer.using_config('autotune', self.autotune):
gradient_check.check_double_backward(
f, args, y_grad, grad_grads,
dtype='d', atol=5e-3, rtol=5e-2)
@attr.chainerx
def test_double_backward_chainerx_native(self):
self._skip_if_not_chainerx_supported()
self.check_double_backward(
backend.to_chainerx(self.x), backend.to_chainerx(self.W),
backend.to_chainerx(self.b), backend.to_chainerx(self.gy),
backend.to_chainerx(self.ggx), backend.to_chainerx(self.ggW),
backend.to_chainerx(self.ggb))
@attr.chainerx
def test_double_backward_chainerx_native_nobias(self):
self._skip_if_not_chainerx_supported()
self.check_double_backward(
backend.to_chainerx(self.x), backend.to_chainerx(self.W), None,
backend.to_chainerx(self.gy), backend.to_chainerx(self.ggx),
backend.to_chainerx(self.ggW), None)
@attr.chainerx
@attr.gpu
def test_double_backward_chainerx_cuda(self):
self._skip_if_not_chainerx_supported()
self.check_double_backward(
backend.to_chainerx(cuda.to_gpu(self.x)),
backend.to_chainerx(cuda.to_gpu(self.W)),
backend.to_chainerx(cuda.to_gpu(self.b)),
backend.to_chainerx(cuda.to_gpu(self.gy)),
backend.to_chainerx(cuda.to_gpu(self.ggx)),
backend.to_chainerx(cuda.to_gpu(self.ggW)),
backend.to_chainerx(cuda.to_gpu(self.ggb)))
@attr.chainerx
@attr.gpu
def test_double_backward_chainerx_cuda_nobias(self):
self._skip_if_not_chainerx_supported()
self.check_double_backward(
backend.to_chainerx(cuda.to_gpu(self.x)),
backend.to_chainerx(cuda.to_gpu(self.W)),
None,
backend.to_chainerx(cuda.to_gpu(self.gy)),
backend.to_chainerx(cuda.to_gpu(self.ggx)),
backend.to_chainerx(cuda.to_gpu(self.ggW)),
None)
@condition.retry(3)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.W, self.b, self.gy,
self.ggx, self.ggW, self.ggb,
use_cudnn='always')
@condition.retry(3)
def test_double_backward_cpu_nobias(self):
self.check_double_backward(self.x, self.W, None, self.gy,
self.ggx, self.ggW, None,
use_cudnn='always')
def check_double_backward_gpu(self, bias=True, im2col=False):
use_cudnn = 'never' if im2col else 'always'
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b) if bias else None,
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx), cuda.to_gpu(self.ggW),
cuda.to_gpu(self.ggb) if bias else None,
use_cudnn=use_cudnn)
@attr.gpu
@condition.retry(3)
def test_double_backward_gpu(self):
self.check_double_backward_gpu()
@attr.gpu
@condition.retry(3)
def test_double_backward_gpu_nobias(self):
self.check_double_backward_gpu(bias=False)
@attr.gpu
@condition.retry(3)
def test_double_backward_gpu_im2col(self):
self.check_double_backward_gpu(im2col=True)
@attr.gpu
@condition.retry(3)
def test_double_backward_gpu_im2col_nobias(self):
self.check_double_backward_gpu(bias=False, im2col=True)
@testing.parameterize(*testing.product({
'dims': [(10,), (10, 8), (10, 8, 6)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestConvolutionNDCudnnCall(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
self.stride = (2,) * ndim
self.pad = (1,) * ndim
x_shape = (2, 3) + self.dims
self.x = cuda.cupy.random.uniform(-1, 1, x_shape).astype(self.dtype)
W_scale = numpy.sqrt(1. / functools.reduce(mul, ksize, in_channels))
W_shape = (out_channels, in_channels) + ksize
self.W = cuda.cupy.random.normal(
0, W_scale, W_shape).astype(self.dtype)
gy_shape = (2, 2) + tuple(
conv.get_conv_outsize(d, k, s, p) for (d, k, s, p) in zip(
self.dims, ksize, self.stride, self.pad))
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('>=auto') and ndim > 1
def forward(self):
x = chainer.Variable(cuda.to_gpu(self.x))
W = chainer.Variable(cuda.to_gpu(self.W))
return F.convolution_nd(
x, W, None, stride=self.stride, pad=self.pad)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.convolution_forward') as func:
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
name = 'cupy.cudnn.convolution_backward_data'
with testing.patch(name) as func:
y.backward()
self.assertEqual(func.called, self.expect)
class TestConvolutionNDarraySupplied(unittest.TestCase):
def setUp(self):
N = 2
in_channels = 3
out_channels = 2
dtype = numpy.float32
x_shape = (N, in_channels, 3, 3, 3)
self.x_data = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
W_shape = (out_channels, in_channels, 1, 1, 1)
self.W_data = numpy.random.uniform(-1, 1, W_shape).astype(dtype)
self.b_data = numpy.random.uniform(-1, 1, out_channels).astype(dtype)
def check_array_supplied(self, x_ary, W_ary, b_ary):
y_ary = F.convolution_nd(x_ary, W_ary, b_ary)
x_var = chainer.Variable(x_ary)
W_var = chainer.Variable(W_ary)
b_var = chainer.Variable(b_ary)
y_var = F.convolution_nd(x_var, W_var, b_var)
testing.assert_allclose(y_ary.data, y_var.data)
def test_array_supplied_cpu(self):
self.check_array_supplied(self.x_data, self.W_data, self.b_data)
@attr.gpu
def test_array_supplied_gpu(self):
self.check_array_supplied(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.W_data),
cuda.to_gpu(self.b_data))
class TestConvolutionNDBackwardNoncontiguousGradOutputs(unittest.TestCase):
# NumPy raises an error when the inputs of dot operation are not
# contiguous. This test ensures this issue is correctly handled.
# (https://github.com/chainer/chainer/issues/2744)
# This test depdends on that backward() of F.sum generates
# a non-contiguous array.
def test_1(self):
n_batches = 2
in_channels = 3
out_channels = 1 # important
x_shape = (n_batches, in_channels, 4)
w_shape = (out_channels, in_channels, 3)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = F.convolution_nd(chainer.Variable(x), w)
z = F.sum(y)
z.backward()
def test_2(self):
n_batches = 2
in_channels = 3
out_channels = 1 # important
x_shape = (n_batches, in_channels, 4)
w_shape = (out_channels, in_channels, 3)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = F.convolution_nd(x, chainer.Variable(w))
z = F.sum(y)
z.backward()
class TestConvolutionNDWrappers(unittest.TestCase):
def _get_data(self, ndim):
in_channels = 3
out_channels = 2
dtype = numpy.float32
x_shape = (2, in_channels) + (3,) * ndim
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
W_shape = (out_channels, in_channels) + (1,) * ndim
W = numpy.random.uniform(-1, 1, W_shape).astype(dtype)
b = numpy.random.uniform(-1, 1, out_channels).astype(dtype)
return x, W, b
def test_conv1d(self):
(x, W, b) = self._get_data(1)
testing.assert_allclose(
F.convolution_nd(x, W, b).data, F.convolution_1d(x, W, b).data)
def test_conv1d_invalid(self):
(x, W, b) = self._get_data(2)
with self.assertRaises(ValueError):
F.convolution_1d(x, W, b)
def test_conv3d(self):
(x, W, b) = self._get_data(3)
testing.assert_allclose(
F.convolution_nd(x, W, b).data, F.convolution_3d(x, W, b).data)
def test_conv3d_invalid(self):
(x, W, b) = self._get_data(2)
with self.assertRaises(ValueError):
F.convolution_3d(x, W, b)
testing.run_module(__name__, __file__)
| |
from django.db import models
from django.contrib.auth.models import User, Group, AbstractUser
from django.utils.translation import ugettext_lazy as _
import os
# Create your models here.
class IITGUser(models.Model):
user=models.OneToOneField(User, related_name='user', default=False)
is_student = models.BooleanField(_('Is student'), default=False,
help_text=_('Designates whether the user is a student or a professor.'))
def __str__(self):
return self.user.username
class StudentCycle(models.Model):
user=models.OneToOneField(User, related_name='cycle_user')
cycle_model=models.CharField(max_length=32, blank=False)
cycle_color=models.CharField(max_length=32)
cycle_pass_no=models.CharField(max_length=10)
hostel=models.CharField(max_length=50, blank=True,
choices=[
('Manas', 'Manas'),
('Dihing', 'Dihing'),
('Kameng', 'Kameng'),
('Umiam', 'Umiam'),
('Barak', 'Barak'),
('Brahmaputra', 'Brahmaputra'),
('Kapili', 'Kapili'),
('Siang','Siang'),
('Dibang','Dibang'),
('Lohit','Lohit'),
('Subansiri','Subansiri'),
('Dhansiri','Dhansiri'),
])
room_number=models.CharField(max_length=5)
def __str__(self):
return self.cycle_pass_no
class StudentVehicle(models.Model):
"""
Personal Details
"""
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
roll_number = models.IntegerField()
department = models.CharField(max_length=100)
programme = models.CharField(max_length=10)
date_of_birth = models.DateField()
hostel_name = models.CharField(max_length=32,choices=[
('Manas', 'Manas'),
('Dihing', 'Dihing'),
('Kameng', 'Kameng'),
('Umiam', 'Umiam'),
('Barak', 'Barak'),
('Brahmaputra', 'Brahmaputra'),
('Kapili', 'Kapili'),
('Siang','Siang'),
('Dibang','Dibang'),
('Siang','Siang'),
('Lohit','Lohit'),
('Subansiri','Subansiri'),
('Dhansiri','Dhansiri'),
])
room_number = models.CharField(max_length=5)
mobile_number = models.IntegerField()
user_photo = models.ImageField()
identity_card = models.FileField(upload_to='identity_card')
"""
Parents' Contact Details
"""
address_of_communication = models.TextField()
address_of_communication_district = models.CharField(max_length=100)
address_of_communication_state = models.CharField(max_length=100)
address_of_communication_pincode = models.IntegerField()
permanent_address = models.TextField()
permanent_address_district = models.CharField(max_length=100)
permanent_address_state = models.CharField(max_length=100)
permanent_address_pincode = models.IntegerField()
parents_contact_no = models.IntegerField()
parents_emailid = models.EmailField(max_length=75)
"""
Vehicle Details
"""
vehicle_registration_number = models.CharField(max_length=100, unique=True)
color = models.CharField(max_length=32)
make_and_model = models.CharField(max_length=100)
chassis_number = models.CharField(max_length=100)
engine_number = models.CharField(max_length=100)
registered_in_the_name_of = models.CharField(max_length=100)
relation_with_owner = models.CharField(max_length=32)
vehicle_insurance_no = models.CharField(max_length=100, unique=True)
insurance_valid_upto = models.DateField()
vehicle_registration_card = models.FileField(upload_to='vehicle_registration_card')
vehicle_insurance = models.FileField(upload_to='vehicle_insurance')
vehicle_photo = models.ImageField()
"""
Driving License
"""
driving_license_number = models.CharField(max_length=15)
driving_license_issue_date = models.DateField()
driving_license_expiry_date = models.DateField()
driving_license = models.FileField(upload_to='driving_license')
declaration = models.TextField(blank=True, null=True,
default="By submitting this form, I hereby declare that " +
"I will be obliged to the following terms and conditions:\n\n" +
"1) I will abide by the rules of Traffic,\n" +
"2) I will not cause inconvenience to other road users.")
date_of_application = models.DateTimeField(blank=True, null=True)
registered_with_security_section = models.NullBooleanField(default=None)
vehicle_pass_no = models.CharField(max_length=32, blank=True, null=True, unique=True)
issue_date = models.DateField(null=True) #Vehicle Pass issue Date
expiry_date = models.DateField(null=True) #Vehicle Pass Expiry Date
def __str__(self):
return self.vehicle_pass_no
class EmployeeVehicle(models.Model):
"""
Personal Details
"""
user=models.ForeignKey(User)
name = models.CharField(max_length=255)
employee_no=models.IntegerField()
department = models.CharField(max_length=100)
date_of_birth = models.DateField()
block_number = models.CharField(max_length=5)
flat_number = models.CharField(max_length=5)
mobile_number = models.IntegerField()
user_photo = models.ImageField()
identity_card = models.FileField(upload_to='identity_card')
parking_slot_no =models.CharField(max_length=50)
"""
Vehicle Details
"""
vehicle_registration_number = models.CharField(max_length=100)
color = models.CharField(max_length=32)
make_and_model = models.CharField(max_length=100)
chassis_number = models.CharField(max_length=100)
engine_number = models.CharField(max_length=100)
registered_in_the_name_of = models.CharField(max_length=100)
vehicle_insurance_no = models.CharField(max_length=100, unique=True)
insurance_valid_upto = models.DateField()
vehicle_registration_card = models.FileField(
upload_to='vehicle_registration_card')
vehicle_insurance = models.FileField(upload_to='vehicle_insurance')
vehicle_photo = models.ImageField(null=True)
"""
Driving License
"""
driving_license_number = models.CharField(max_length=15)
driving_license_issue_date = models.DateField()
driving_license_expiry_date = models.DateField()
driving_license = models.FileField(upload_to='driving_license')
declaration = models.TextField(blank=True, null=True,
default="By submitting this form, I hereby declare that " +
"I will be obliged to the following terms and conditions:\n\n" +
"1) I will abide by the rules of Traffic,\n" +
"2) I will not cause inconvenience to other road users.")
date_of_application = models.DateTimeField(blank=True, null=True)
registered_with_security_section = models.NullBooleanField(default=None)
vehicle_pass_no = models.CharField(max_length=32, blank=True, null=True, unique=True)
issue_date = models.DateField(null=True)
expiry_date = models.DateField(null=True)
def __str__(self):
return self.vehicle_pass_no
class Guard(models.Model):
"""
Details of all security guards
"""
guard_user = models.OneToOneField(User, related_name='guard_user')
guard_phone_number=models.IntegerField()
is_security=models.BooleanField(default=True)
def __str__(self):
return self.guard_user.username
class OnDutyGuard(models.Model):
guard = models.OneToOneField('Guard', related_name='guard')
place = models.CharField(max_length=100)
is_gate = models.BooleanField()
class Gate(models.Model):
"""
Entry/Exit gates for vehicles
"""
gate_name = models.CharField(max_length=50, unique=True)
# security_on_duty = models.ForeignKey(Guard, blank=True, null=True)
def __str__(self):
return self.gate_name
class ParkingSlot(models.Model):
"""
Details of parking slot along with number of vehicles
"""
parking_area_name = models.CharField(max_length = 100, unique=True)
# security_on_duty = models.ForeignKey(Guard, blank=True, null=True)
total_slots = models.IntegerField(default=0, blank=True, null=True)
available_slots = models.IntegerField(default=0, blank=True, null=True)
def __str__(self):
return self.parking_area_name
class PersonPass(models.Model):
old_card_reference=models.CharField(max_length=10)
pass_number=models.CharField(max_length=10, unique=True)
name = models.CharField(max_length=255)
user_photo = models.ImageField()
age=models.IntegerField()
identified_by = models.CharField(max_length=255)
work_area = models.CharField(max_length=255)
working_time = models.CharField(max_length=255)
nature_of_work = models.CharField(max_length=255)
issue_date=models.DateField()
expiry_date=models.DateField()
is_blocked=models.BooleanField()
reason=models.TextField(blank=True)
def __str__(self):
return self.pass_number
class SuspiciousVehicle(models.Model):
"""
Details of suspicious vehicle
"""
reporter=models.ForeignKey(User)
vehicle_number = models.CharField(max_length=20, unique=True)
vehicle_type = models.CharField(max_length=50, blank=True, null=True,
choices=[
('bicycle', 'bicycle'),
('bike', 'bike'),
('car', 'car'),
('truck', 'truck'),
('courier', 'courier'),
('auto', 'auto'),
('other', 'other'),
])
vehicle_model = models.CharField(max_length=100, blank=True, null=True)
vehicle_image = models.ImageField(blank=True, null=True, upload_to='suspicious_image')
remarks = models.TextField(max_length=1000, blank=True, null=True)
def __str__(self):
return self.vehicle_number
class ResidentLog(models.Model):
"""
Log for residents of the campus
"""
vehicle_pass_no = models.CharField(max_length=50)
in_gate = models.ForeignKey(Gate, related_name='resident_in_gate', null=True)
out_gate = models.ForeignKey(Gate, related_name='resident_out_gate', null=True)
in_time = models.DateTimeField(blank=True, null=True)
out_time = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.vehicle_pass_no
class VisitorLog(models.Model):
"""
Log of visitors for additional details
"""
vehicle_number = models.CharField(max_length=20)
driver_name = models.CharField(max_length=255, blank=True, null=True)
license_number = models.CharField(max_length=20, blank=True, null=True)
in_gate = models.ForeignKey(Gate, related_name='visitor_in_gate', null=True)
place_to_visit = models.CharField(max_length=100, blank=True, null=True)
purpose_of_visit = models.TextField(max_length=1000, blank=True, null=True)
out_gate = models.ForeignKey(Gate, related_name='visitor_out_gate', null=True)
in_time = models.DateTimeField(blank=True, null=True)
vehicle_type = models.CharField(max_length=50, blank=True, null=True)
vehicle_model = models.CharField(max_length=100, blank=True, null=True)
out_time = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.vehicle_number
class TheftReport(models.Model):
vehicle_pass_no = models.CharField(max_length=50, unique=True) #CHECK BETWEEN STUDENT AND EMPLOYEE VEHICLE
reporter = models.ForeignKey(User, null=True) #VEHICLE SHOULD BE USERS
stud_vehicle = models.ForeignKey('StudentVehicle', blank=True, null=True)
emp_vehicle = models.ForeignKey('EmployeeVehicle', blank=True, null=True)
# theft_date = models.DateField(blank=False, null=True)
theft_time = models.DateTimeField(blank=False, null=True)
theft_place = models.CharField(max_length=100, blank=False, null=True)
remarks = models.TextField(max_length=1000, blank=True, null=True)
status = models.CharField(max_length=100, default="Submitted", choices=[("Submitted", "Submitted"), ("Received by Security Section", "Received by Security Section"), ("Search in Progress","Search in Progress"), ("Vehicle Found","Vehicle Found"), ("Case Closed (Vehicle Not Found)","Case Closed (Vehicle Not Found)"), ("Vehicle Returned","Vehicle Returned")])
def __str__(self):
return self.vehicle_pass_no
class Place(models.Model):
place_name=models.CharField(max_length=32, unique=True)
in_campus=models.BooleanField(default=True)
def __str__(self):
return self.place_name
class Day(models.Model):
day=models.CharField(max_length=32, unique=True)
def __str__(self):
return self.day
class BusTiming(models.Model):
"""route contains all the passing points"""
bus_route = models.CharField(max_length=512)
from_time = models.TimeField()
#to_time = models.TimeField()
bus_no = models.CharField(max_length=10 ,blank=False, unique=True)
starting_point = models.ForeignKey('Place', related_name="starting_point")
ending_point=models.ForeignKey('Place', related_name="ending_point")
availability = models.ManyToManyField('Day')
working_day=models.BooleanField()
def __str__(self):
return self.bus_no
| |
from __future__ import print_function
import datetime
import os
import re
import six
import textwrap
import time
from errata_tool import ErrataException, ErrataConnector, security, User
class Erratum(ErrataConnector):
def fmt(self, s):
# The textwrap library doesn't parse newlines, so you'll want to
# split on them first, then format each line, then join it all back
# up.
lines = s.split('\n')
page = []
for l in lines:
b = textwrap.TextWrapper(width=75, replace_whitespace=True,
break_long_words=False,
break_on_hyphens=False)
page.append(b.fill(l))
return '\n'.join(page)
def _do_init(self):
self.errata_id = 0
self._original_bugs = []
self._cve_bugs = []
self._original_state = 'NEW_FILES'
self._original_json = {}
self._product = None # Set when you call new
self._release = None # Set when you call new
self._new = False # set when you call create
self._update = False # Set to true if you update any fields
self._format = True # Format fields on update (new adv.)
self._buildschanged = False # Set to true if you changed builds
# These should be updated with the 'update()' method, and are provided
# primarily for debugging/printing by user apps
self.errata_type = None
self.text_only = False
self.text_only_cpe = None
self.publish_date_override = None
self.publish_date = None
self.creation_date = None
self.ship_date = None # Set if SHIPPED_LIVE
self.age = 0 # Erratum age in days
self.package_owner_email = None
self.manager_email = None
self.manager_id = 0
self.product_id = 0
self.release_id = 0
self.qe_email = ''
self.qe_group = ''
self.synopsis = None
self.topic = None
self.description = None
self.solution = None
self.security_impact = None
self.cve_names = None
self.errata_bugs = []
self.errata_builds = {}
self.current_flags = []
self.missing_prod_listings = []
self.batch_id = None
def update(self, **kwargs):
if 'errata_type' in kwargs:
self.errata_type = kwargs['errata_type']
self._update = True
if 'security_impact' in kwargs:
self.security_impact = kwargs['security_impact']
self._update = True
if 'text_only' in kwargs:
self.text_only = kwargs['text_only']
self._update = True
if 'text_only_cpe' in kwargs:
self.text_only_cpe = kwargs['text_only_cpe']
self._update = True
if 'date' in kwargs:
try:
datetime.datetime.strptime(kwargs['date'], '%Y-%b-%d')
except ValueError:
raise ValueError(
'Date must be of the form: YYYY-MON-DD; 2015-Mar-11')
self.publish_date_override = kwargs['date']
self._update = True
if 'owner_email' in kwargs:
self.package_owner_email = kwargs['owner_email']
self._update = True
if 'manager_email' in kwargs:
self.manager_email = kwargs['manager_email']
self._update = True
if 'manager_id' in kwargs:
self.manager_id = kwargs['manager_id']
self._update = True
if 'qe_email' in kwargs:
self.qe_email = kwargs['qe_email']
self._update = True
if 'qe_group' in kwargs:
self.qe_group = kwargs['qe_group']
self._update = True
if 'synopsis' in kwargs:
self.synopsis = kwargs['synopsis']
self._update = True
if 'cve_names' in kwargs:
self.cve_names = kwargs['cve_names']
self._update = True
if 'topic' in kwargs:
self.topic = self.fmt(kwargs['topic'])
self._update = True
if 'description' in kwargs:
self.description = self.fmt(kwargs['description'])
self._update = True
if 'solution' in kwargs:
self.solution = self.fmt(kwargs['solution'])
self._update = True
def __init__(self, **kwargs):
self.ssl_verify = security.security_settings.ssl_verify()
# Blank erratum e.g. if create is required
self._do_init()
if 'errata_id' in kwargs:
self._fetch(kwargs['errata_id'])
return
if 'bug_id' in kwargs:
self._fetch_by_bug(kwargs['bug_id'])
return
if 'product' not in kwargs:
raise ErrataException('Creating errata requires a product')
if 'release' not in kwargs:
raise ErrataException('Creating errata requires a release')
if 'format' in kwargs:
self._format = kwargs['format']
self._new = True
self.errata_name = '(unassigned)'
self.errata_state = 'NEW_FILES'
self._product = kwargs['product']
self._release = kwargs['release']
self.update(**kwargs)
if 'solution' not in kwargs:
self.solution = self.fmt("Before applying this update, \
make sure all previously released errata relevant to your system \
have been applied.\n\
\n\
For details on how to apply this update, refer to:\n\
\n\
https://access.redhat.com/articles/11258")
# errata tool defaults
if 'errata_type' in kwargs:
self.errata_type = kwargs['errata_type']
else:
self.errata_type = 'RHBA'
# Pull down the state of the erratum and store it.
def _fetch(self, errata_id):
self._new = False
self._update = False
self._buildschanged = False
self.errata_builds = {}
self.current_flags = []
try:
# TODO: remove call to /advisory/X.json once new API
# supports all the information
endpoint_list = [
'/advisory/' + str(errata_id) + '.json',
'/api/v1/erratum/' + str(errata_id),
]
# Want to ditch advisory_old eventually
advisory_old = None
advisory = None
erratum = None
for endpoint in endpoint_list:
r = self._get(endpoint)
if r is None:
continue
if advisory is None and 'erratum' in endpoint:
advisory = r
continue
# Fallthrough
if advisory_old is None:
advisory_old = r
if advisory is None:
print('do not have requested data bailing')
return None
# Short circuit to get the advisory
for key in advisory['errata']:
erratum = advisory['errata'][key]
self.errata_type = key.upper()
break
self.errata_id = erratum['id']
# NEW_FILES QE etc.
self.errata_state = erratum['status']
self._original_state = self.errata_state
self._original_json = erratum
# Check if the erratum is under embargo
self.embargoed = False
self.release_date = erratum['release_date']
if self.release_date is not None:
cur = datetime.datetime.utcnow()
cur = str(cur).split()[0]
if self.release_date > cur:
self.embargoed = True
# Target Ship date
d = erratum['publish_date_override']
if d is not None:
pd = time.strptime(str(d), '%Y-%m-%dT%H:%M:%SZ')
self.publish_date_override = time.strftime('%Y-%b-%d', pd)
# Target Ship date (immutable; e.g. from batch)
d = erratum['publish_date']
if d is not None:
pd = time.strptime(str(d), '%Y-%m-%dT%H:%M:%SZ')
self.publish_date = time.strftime('%Y-%b-%d', pd)
# Actual ship date (if in SHIPPED_LIVE)
if self.errata_state in ('SHIPPED_LIVE'):
d = erratum['actual_ship_date']
# Could be None. e.g. advisory 43686
if d:
d = time.strptime(str(d), '%Y-%m-%dT%H:%M:%SZ')
self.ship_date = time.strftime('%Y-%b-%d', d)
# File date
d = erratum['created_at']
d = time.strptime(str(d), '%Y-%m-%dT%H:%M:%SZ')
self.creation_date = time.strftime('%Y-%b-%d', d)
d = time.strftime('%Y-%b-%d', time.gmtime())
if self.ship_date is not None:
d = self.ship_date
filed = datetime.datetime.strptime(self.creation_date, '%Y-%b-%d')
ship = datetime.datetime.strptime(d, '%Y-%b-%d')
age = ship - filed
self.age = age.days
# Baseline flags.
if self.errata_state in ('QE'):
if 'sign_requested' in erratum and \
erratum['sign_requested'] == 0:
self.addFlags('request_sigs')
if 'rhnqa' in erratum and erratum['rhnqa'] == 0:
self.addFlags('needs_distqa')
if 'doc_complete' in erratum and erratum['doc_complete'] == 0:
self.addFlags('needs_docs')
if self.errata_state == 'NEW_FILES':
self.addFlags('needs_devel')
# Note: new errata return values will have other bits.
self.errata_name = erratum['fulladvisory']
# Grab immutable fields
self._product = advisory_old['product']['short_name']
self._release = advisory_old['release']['name']
# A maybe-empty list, containing eg. "rpm" or "docker"
self.content_types = erratum['content_types']
# store product and release IDs
self.product_id = advisory_old['product']['id']
self.release_id = advisory_old['release']['id']
self.package_owner_email = advisory_old['people']['package_owner']
self.reporter = advisory_old['people']['reporter']
self.qe_email = advisory_old['people']['assigned_to']
self.qe_group = advisory_old['people']['qe_group']
# XXX Errata tool doesn't report manager?
# https://bugzilla.redhat.com/show_bug.cgi?id=1664884
# self.manager_email = ???
self.manager_id = erratum.get('manager_id')
# Grab mutable errata content
self.text_only = erratum['text_only']
self.synopsis = erratum['synopsis']
content = advisory['content']['content']
self.text_only_cpe = content['text_only_cpe']
self.topic = content['topic']
self.description = content['description']
self.solution = content['solution']
self.errata_bugs = [int(b['bug']['id']) for b
in advisory['bugs']['bugs']]
self.cve_names = content['cve']
if self.cve_names == '':
self.cve_names = None
self._original_bugs = list(self.errata_bugs)
self._cache_bug_info(self._original_bugs)
# Try to check to see if we need devel assistance, qe assistance or
# rel prep assistance
if self.errata_state == 'QE':
self._check_tps()
self._check_bugs()
self._check_need_rel_prep()
# Check for security review
if 'rhsa' in advisory['errata']:
sa = advisory['errata']['rhsa']['security_approved']
self.security_impact = advisory['errata']['rhsa']['security_impact'] # NOQA
if sa is None:
self.addFlags('request_security')
elif sa is False:
self.addFlags('needs_security')
check_signatures = self.errata_state != 'NEW_FILES'
self._get_build_list(check_signatures)
self.batch_id = erratum.get('batch_id')
return
except RuntimeError:
# Requests seems to loop infinitely if this happens...
raise ErrataException('Pigeon crap. Did it forget to run kinit?')
except IndexError:
# errata_id not found
raise ErrataException('Errata ID field not found in response')
except Exception:
# Todo: better handling
raise
def _check_signature_for_build(self, build):
signed = False
url = os.path.join('/api/v1/build/', build)
nvr_json = self._get(url)
if u'rpms_signed' in nvr_json:
if nvr_json[u'rpms_signed']:
signed = True
return signed
def _cache_bug_info(self, bug_id_list):
# Omitted: RHOS shale's use of bz_cache here.
pass
def metadataCdnRepos(self, enable=[], disable=[]):
"""Get or set the CDN repos for this advisory.
Note: This method applies only for advisories containing Docker images.
When called with no arguments, this method returns all available CDN
repos for advisory metadata. Otherwise you may enable or disable repos
here.
:param enable: (optional) A list of CDN repos to enable.
Example: ["rhel-7-server-rhceph-3-mon-rpms__x86_64"]
:param disable: (optional) A list of CDN repos to disable.
:returns: a list of dicts about each available repo, and whether they
are enabled or disabled.
"""
return self._cdn_repos('metadata_cdn_repos', enable, disable)
def textOnlyRepos(self, enable=[], disable=[]):
"""Get or set the text-only repos for this advisory.
Note: This method applies only for text-only advisories.
When called with no arguments, this method returns all available CDN
repos for the advisory text. Otherwise you may enable or disable repos
here.
:param enable: (optional) A list of CDN repos to enable.
Example: ["rhel-7-server-rhceph-3-mon-rpms__x86_64"]
:param disable: (optional) A list of CDN repos to disable.
:returns: a list of dicts about each available repo, and whether they
are enabled or disabled.
"""
return self._cdn_repos('text_only_repos', enable, disable)
def _cdn_repos(self, endpoint, enable=[], disable=[]):
"""Get or set the repos for this advisory.
Use this for setting repos on text-only or docker advisories.
:param endpoint: The erratum API endpoint to request.
Example: "metadata_cdn_repos" or "text_only_repos".
:param enable: (optional) A list of CDN repos to enable.
Example: ["rhel-7-server-rhceph-3-mon-rpms__x86_64"]
:param disable: (optional) A list of CDN repos to disable.
:returns: a list of dicts about each available repo, and whether they
are enabled or disabled.
"""
if endpoint not in ('metadata_cdn_repos', 'text_only_repos'):
raise ValueError('unsupported endpoint %s', endpoint)
url = '/api/v1/erratum/%d/%s' % (self.errata_id, endpoint)
if not enable and not disable:
return self._get(url)
payload = [{'enabled': True, 'repo': repo} for repo in enable]
payload += [{'enabled': False, 'repo': repo} for repo in disable]
result = self._put(url, json=payload)
# XXX we should fix error handling and return values in _put() to work
# like _get() currently does.
result.raise_for_status()
return result.json()
def _check_tps(self):
# Check for TPS failure (QE state only)
url = '/advisory/%i/tps_jobs.json' % self.errata_id
r = self._get(url)
distqa_tps = 0
distqa_passing = 0
for tps in r:
if tps['rhnqa'] is True:
distqa_tps = distqa_tps + 1
if tps['state'] == 'BAD' or \
'failed to generate' in tps['state']:
self.addFlags('tps_errors')
continue
if tps['state'] in ('BUSY', 'NOT_STARTED'):
self.addFlags('tps_wait')
continue
if tps['rhnqa'] is True:
distqa_passing = distqa_passing + 1
# Assume testing is done... ;)
if distqa_tps > 0 and distqa_passing != distqa_tps:
self.addFlags('needs_distqa')
self.need_rel_prep = False
else:
self.need_rel_prep = True
def _check_bugs(self):
pass
def _check_need_rel_prep(self):
# Omitted: RHOS shale's "need_rel_prep" here, uses bz_cache.
pass
def externalTests(self, test_type=None):
"""Get active external test results for this advisory.
:param test_type: str, like "rpmdiff" or "covscan"
:returns: a possibly-empty list of dicts, one per result.
"""
tmpl = '/api/v1/external_tests/?filter[active]=true'
tmpl += '&filter[errata_id]={errata_id}'
if test_type:
tmpl += '&filter[test_type]={test_type}'
url = tmpl.format(errata_id=self.errata_id, test_type=test_type)
data = self.get_paginated_data(url)
return data
def _get_build_list(self, check_signatures=False):
# Grab build list; store on a per-key basis
# REFERENCE
# Item 5.2.10.3. GET /advisory/{id}/builds.json
# Then try to check to see if they are signed or not
# Item 5.2.2.1. GET /api/v1/build/{id_or_nvr}
url = "/advisory/%i/builds.json" % self.errata_id
product_versions = self._get(url)
have_all_sigs = True
for product_version in product_versions:
builds = []
for pv_builds in product_versions[product_version]:
for nvr, mappings in six.iteritems(pv_builds):
builds.append(nvr)
if not mappings:
self.missing_prod_listings.append(nvr)
if have_all_sigs and check_signatures:
if not self._check_signature_for_build(nvr):
self.addFlags('needs_sigs')
have_all_sigs = False
self.errata_builds[product_version] = builds
if have_all_sigs:
self.removeFlags(['request_sigs', 'needs_sigs'])
def _fetch_by_bug(self, bug_id):
# print("fetch_by_bug")
try:
url = "/bugs/%i/advisories.json" % bug_id
rj = self._get(url)
stored = False
for e in rj:
if not stored:
stored = True
self._fetch(e['id'])
else:
print('Warning: Ignoring additional erratum ' +
str(e['id']) + ' for bug ', str(bug_id))
except RuntimeError:
# Requests seems to loop infinitely if this happens...
raise ErrataException('Pigeon crap. Did it forget to run kinit?')
except IndexError:
# errata_id not found
raise ErrataException('Errata ID field not found in response')
except LookupError:
# Errata not found
pass
except Exception:
# Todo: better handling
raise
def refresh(self):
if self.errata_id != 0:
self._fetch(self.errata_id)
def reloadBuilds(self,
no_rpm_listing_only=False,
no_current_files_only=False):
val = {
'no_rpm_listing_only': int(no_rpm_listing_only),
'no_current_files_only': int(no_current_files_only),
}
url = '/api/v1/erratum/%d/reload_builds' % self.errata_id
r = self._post(url, data=val)
self._processResponse(r)
return r.json()
def setState(self, state):
if self._new:
raise ErrataException('Cannot simultaneously create and change ' +
'an erratum\'s state')
if self.errata_id == 0:
raise ErrataException('Cannot change state for uninitialized ' +
'erratum')
if self.errata_state.upper() == 'NEW_FILES':
if state.upper() == 'QE':
self.errata_state = 'QE'
elif self.errata_state.upper() == 'QE':
if state.upper() == 'NEW_FILES':
self.errata_state = 'NEW_FILES'
if state.upper() == 'REL_PREP':
self.errata_state = 'REL_PREP'
elif self.errata_state.upper() == 'REL_PREP':
if state.upper() == 'NEW_FILES':
self.errata_state = 'NEW_FILES'
if state.upper() == 'QE':
self.errata_state = 'QE'
else:
raise ErrataException('Cannot change state from ' +
self.errata_state.upper() + " to " +
state.upper())
def _addBug(self, b):
if not isinstance(b, int):
b = int(b)
if self.errata_bugs is None:
self.errata_bugs = []
self.errata_bugs.append(b)
return
if b not in self.errata_bugs:
self.errata_bugs.append(b)
def addBugs(self, buglist):
if isinstance(buglist, int):
self._addBug(buglist)
return
for b in buglist:
self._addBug(b)
def _removeBug(self, b):
if not isinstance(b, int):
b = int(b)
if b in self.errata_bugs:
self.errata_bugs.remove(b)
def removeBugs(self, buglist):
if isinstance(buglist, int):
self._removeBug(buglist)
return
for b in buglist:
self._removeBug(b)
# Omitted: RHOS shale's syncBugs()
def syncBugs(self):
raise NotImplementedError('RHOS-only method')
# Omitted: RHOS shale's findMissingBuilds()
def findMissingBuilds(self):
raise NotImplementedError('RHOS-only method')
def changeDocsReviewer(self, login_name):
val = {'login_name': login_name}
url = '/api/v1/erratum/%d/change_docs_reviewer' % self.errata_id
r = self._post(url, data=val)
self._processResponse(r)
def addCC(self, email):
"""Add someone to the CC list for this advisory. """
# rhbz#1572000 will add an official API for this.
val = {'id': self.errata_id, 'email': email}
url = '/carbon_copies/add_to_cc_list'
r = self._post(url, data=val)
self._processResponse(r)
#
# Flag list could be replaced with a set at some
# point.
#
# Some flags are tracked and managed here in
# errata-tool, but users can add their own as well.
#
def addFlags(self, flags):
if not isinstance(flags, list):
flags = [flags]
# Two loops intentionally. First one is for
# input validation.
for f in flags:
if not isinstance(f, str):
raise ValueError('flag ' + str(f) + ' is not a string')
for f in flags:
if f not in self.current_flags:
self.current_flags.append(f)
def removeFlags(self, flags):
if not isinstance(flags, list):
flags = [flags]
# Two loops intentionally. First one is for
# input validation.
for f in flags:
if not isinstance(f, str):
raise ValueError('flag ' + str(f) + ' is not a string')
for f in flags:
if f in self.current_flags:
self.current_flags.remove(f)
# Adding and removing builds can't be done atomically. Wondering whether
def addBuildsDirect(self, buildlist, release, **kwargs):
if 'file_types' not in kwargs:
file_types = None
else:
file_types = kwargs['file_types']
blist = []
if isinstance(buildlist, six.string_types):
blist.append(buildlist)
else:
blist = buildlist
# Adding builds
# List of dicts.
pdata = []
for b in blist:
# Avoid double-add
if release in self.errata_builds and \
b in self.errata_builds[release]:
continue
val = {}
if file_types is not None and b in file_types:
val['file_types'] = file_types[b]
val['build'] = b
val['product_version'] = release
pdata.append(val)
url = "/api/v1/erratum/%i/add_builds" % self.errata_id
r = self._post(url, json=pdata)
self._processResponse(r)
self._buildschanged = True
return
def addBuilds(self, buildlist, **kwargs):
"""Add Build(s) to erratum"""
if self._new:
raise ErrataException('Cannot add builds to unfiled erratum')
release = None
if 'release' in kwargs:
release = kwargs['release']
del kwargs['release']
if release is None and len(self.errata_builds.keys()) == 1:
release = list(self.errata_builds.keys())[0]
if release is None:
raise ErrataException('Need to specify a release')
return self.addBuildsDirect(buildlist, release, **kwargs)
def setFileInfo(self, file_info):
# XXX API broken??
if not isinstance(file_info, dict):
raise ValueError('file_info is not a dict')
if len(file_info) < 1:
return
# Get:
url = '/api/v1/erratum/%i/filemeta' % self.errata_id
r = self._get(url)
info = []
files = [k for k in file_info]
for f in r:
# print(f['file']['path'] + f['file']['id'])
fn = os.path.basename(f['file']['path'])
if fn in files:
info.append({'file': f['file']['id'],
'title': file_info[fn]['title']})
# print(info)
# Set:
# url += '?put_rank=true'
r = self._put(url, data=info)
self._processResponse(r)
def removeBuilds(self, buildlist):
"""Remove build(s) from advisory"""
if not isinstance(buildlist, (str, list)):
raise IndexError
# Removing builds
# REFERENCE
if isinstance(buildlist, six.string_types):
builds = []
if len(buildlist.strip()) == 0:
raise IndexError
builds.append(buildlist.strip())
else:
builds = buildlist
if len(builds) == 0:
raise IndexError
for b in builds:
val = {}
val['nvr'] = b
url = "/api/v1/erratum/%i/remove_build" % self.errata_id
r = self._post(url, data=val)
self._processResponse(r)
self._buildschanged = True
def _write(self):
pdata = {}
# See below for APIs used when talking to the errata tool.
if self._new:
if self.package_owner_email is None:
raise ErrataException("Can't create erratum without " +
"package owner email")
if self.manager_email is None:
if self.manager_id:
manager = User(self.manager_id)
self.manager_email = manager.email_address
else:
raise ErrataException("Can't create erratum without " +
"manager email or manager id")
if self._product is None:
raise ErrataException("Can't create erratum with no " +
"product specified")
if self._release is None:
raise ErrataException("Can't create erratum with no " +
"release specified")
if self.errata_type is None:
self.errata_type = 'RHBA'
pdata['product'] = self._product
pdata['release'] = self._release
if self.package_owner_email is not None:
pdata['advisory[package_owner_email]'] = self.package_owner_email
if self.manager_email is not None:
pdata['advisory[manager_email]'] = self.manager_email
if self.qe_email is not None and self.qe_email != '':
pdata['advisory[assigned_to_email]'] = self.qe_email
if self.qe_group is not None and self.qe_group != '':
pdata['advisory[quality_responsibility_name]'] = self.qe_group
if self.synopsis is None:
raise ErrataException("Can't write erratum without synopsis")
if self.topic is None:
raise ErrataException("Can't write erratum without topic")
if self.description is None:
raise ErrataException("Can't write erratum without description")
if self.solution is None:
raise ErrataException("Can't write erratum without a solution")
if self.errata_bugs is None:
raise ErrataException("Can't write erratum without a list of " +
"bugs")
# Default from errata tool
pdata['advisory[errata_type]'] = self.errata_type
# POST/PUT a 1 or 0 value for this text_only boolean
pdata['advisory[text_only]'] = int(self.text_only)
if self.text_only_cpe:
pdata['advisory[text_only_cpe]'] = self.text_only_cpe
if self.publish_date_override:
pdata['advisory[publish_date_override]'] = \
self.publish_date_override
# ET automagically handles the severity for the synopsis in RHSA's
# but will still see it as a docs change if we write the same one
# back again, so remove it.
if self.errata_type == 'RHSA':
severity = r'^(Low|Moderate|Important|Critical): '
self.synopsis = re.sub(severity, "", self.synopsis)
pdata['advisory[cve]'] = self.cve_names
val = 'None'
if self.security_impact is not None:
val = self.security_impact
pdata['advisory[security_impact]'] = val
pdata['advisory[synopsis]'] = self.synopsis
pdata['advisory[topic]'] = self.topic
pdata['advisory[description]'] = self.description
pdata['advisory[solution]'] = self.solution
# XXX Delete all bugs is a special case
last_bug = None
if len(self.errata_bugs) == 0 and len(self._original_bugs) > 0:
last_bug = self._original_bugs[0]
self.errata_bugs = [last_bug]
# Add back any Vulnerability bugs
allbugs = list(set(self.errata_bugs) | set(self._cve_bugs))
idsfixed = ' '.join(str(i) for i in allbugs)
pdata['advisory[idsfixed]'] = idsfixed
# Sync newly added bug states
newbugs = list(set(allbugs) - set(self._original_bugs))
if len(newbugs):
# url = '/api/v1/bug/refresh'
# print(allbugs)
# r = self._post(url, data=newbugs)
# self._processResponse(r)
# ^ XXX broken
#
# XXX Sync bug states by force using UI
# Note: UI limits syncs to 100 bugs per run, so split
# up into chunks
syncs = [newbugs[x:x + 100] for x in range(0, len(newbugs), 100)]
bug_list = {}
for s in syncs:
bug_list['issue_list'] = ' '.join(str(i) for i in s)
url = "/bugs/sync_bug_list"
r = self._post(url, data=bug_list)
# XXX should we process return code?
# Push it
if self._new:
# REFERENCE
# New is 'POST'
url = "/api/v1/erratum"
r = self._post(url, data=pdata)
self._processResponse(r)
rj = r.json()
json_errata_type = self.errata_type.lower()
self.errata_id = rj['errata'][json_errata_type]['errata_id']
# XXX return JSON returns full advisory name but not
# typical advisory name - e.g. RHSA-2015:19999-01, but not
# RHSA-2015:19999, but it's close enough
self.errata_name = rj['errata'][json_errata_type]['fulladvisory']
else:
# REFERENCE
# Update is 'PUT'
url = "/api/v1/erratum/%i" % self.errata_id
r = self._put(url, data=pdata)
self._processResponse(r)
# XXX WOW VERY HACK
# If deleting last bug...
if last_bug is not None:
# This doesn't work to remove the last bug, nor does setting
# idsfixed to empty-string
# url = "/api/v1/erratum/%i/remove_bug" % self.errata_id
# pdata = {'bug': str(last_bug)}
# Solution: Use hacks to pretend we're using the remove-bugs
# web UI :(
url = '/bugs/remove_bugs_from_errata/%i' % self.errata_id
pdata = {}
pdata['bug[' + str(last_bug) + ']'] = 1
r = self._post(url, data=pdata)
self._processResponse(r)
def _putStatus(self):
# REFERENCE
# State change is 'POST'
pdata = {}
pdata['new_state'] = self.errata_state
url = "/api/v1/erratum/%i" % self.errata_id
url += "/change_state"
r = self._post(url, data=pdata)
self._processResponse(r)
def commit(self):
ret = False
# Commit changes
if self._new:
self._write()
self.refresh()
# self.syncBugs() # RHOS shale only
return
# XXX Not atomic, but we should refresh on commit
if self._buildschanged:
ret = True
try:
# Special case:
# If new state is 'NEW_FILES', set it before anything else
if (self._original_state != self.errata_state and
self.errata_state.upper() == 'NEW_FILES'):
self._putStatus()
ret = True
# Update buglist if it changed
# Errata tool is very slow - don't PUT if it hasn't changed
allbugs = list(set(self.errata_bugs) | set(self._cve_bugs))
if sorted(self._original_bugs) != sorted(allbugs) \
or self._update:
self._write()
# self.syncBugs() # RHOS shale only
ret = True
# Perhaps someone did addbugs + setState('QE')
if (self._original_state != self.errata_state and
self.errata_state.upper() != 'NEW_FILES'):
self._putStatus()
ret = True
except ErrataException:
raise
if ret:
self.refresh()
return ret
def push(self, target='stage'):
"""Push an advisory to "stage", "live", or both.
:param target: A string "stage" or "live". Defaults to "stage". You
can also pass a list here, ['stage', 'live'] to do both
in one operation.
:returns: a list describing the Errata Tool's newly triggered push
tasks. Each push task includes an "id". You can query the
status of this push ID at
/api/v1/erratum/{id}/push/{push_id} .
"""
# Accept 'stage', 'live', or a set of specific options
if self.errata_id == 0:
return False
# Basic mode: 'stage' or 'live'
url = '/api/v1/erratum/' + str(self.errata_id) + '/push'
if isinstance(target, str):
if target != 'stage' and target != 'live':
raise ValueError('Wrong value for target: expected ' +
'\'stage\', \'live\', or a list')
r = self._post(url + '?defaults=' + str(target))
self._processResponse(r)
return r.json()
# Advanced mode: see ET documentation.
if not isinstance(target, list):
raise ValueError('Wrong value for target: expected ' +
'\'stage\', \'live\', or a list')
r = self._post(url, data=target)
self._processResponse(r)
return r.json()
def dump(self):
print(self)
print("Package Owner Email: " + self.package_owner_email)
print("Manager Email: " + self.manager_email)
print("QE: " + self.qe_email + " " + self.qe_group)
print("Type: " + self.errata_type)
if self.creation_date is not None:
print("Created: " + self.creation_date)
if self.errata_state == 'SHIPPED_LIVE':
print("Shipped: " + self.ship_date)
print("Age: " + str(self.age) + " days")
if len(self.current_flags) > 0:
print("Flags: " + ' '.join(self.current_flags))
print("Synopsis: " + self.synopsis)
if self.publish_date_override is not None:
print('')
print("Ship Target: {0}".format(self.publish_date_override))
if self.publish_date is not None:
print('')
print("Ship Target: {0}".format(self.publish_date))
if self.batch_id is not None:
print('')
print("Batch: {0}".format(self.batch_id))
print("Ship Target: {0}".format(self.publish_date))
print('')
print("Topic")
print("=====")
print(self.topic)
print('')
print("Description")
print("===========")
print(self.description)
print('')
print("Solution")
print("========")
print(self.solution)
def url(self):
return super(Erratum, self).canonical_url("/advisory/" +
str(self.errata_id))
def get_erratum_data(self):
"""Return the server's JSON data for this advisory.
This returns the JSON response from the Errata Tool's
REST API /advisory/<errata_id>.json and /api/v1/erratum/<errata_id>
endpoints. Use this when debugging interactions with the Errata Tool
or when passing this data on to other non-Python tools.
:returns: a dict that is simply the parsed JSON from the server.
"""
return dict(self._original_json) # shallow copy
def __lt__(self, other):
return self.errata_id < other.errata_id
def __gt__(self, other):
return self.errata_id > other.errata_id
def __eq__(self, other):
return self.errata_id == other.errata_id
def __le__(self, other):
return self.errata_id <= other.errata_id
def __ge__(self, other):
return self.errata_id >= other.errata_id
def __ne__(self, other):
return self.errata_id != other.errata_id
def __str__(self):
s = "\n builds: \n"
for k in self.errata_builds:
s = s + " " + k + "\n"
for b in sorted(self.errata_builds[k], key=lambda x: x.lower()):
s = s + " " + b + "\n"
if len(self.current_flags) > 0:
s = "\n Flags: " + ' '.join(self.current_flags) + s
if len(self._cve_bugs) > 0:
s = "\n Impact: " + str(self.security_impact) + s
s = "\n CVE bugs: " + str(self._cve_bugs) + s
if self.cve_names is not None:
s = "\n CVEs: " + str(self.cve_names) + s
pdate = self.publish_date_override
if pdate is None and self.publish_date is not None:
pdate = self.publish_date
if self.batch_id is not None and self.publish_date is not None:
pdate = self.publish_date
return self.errata_name + ": " + self.synopsis + \
"\n package owner: " + self.package_owner_email + \
" qe: " + self.qe_email + \
" qe_group: " + self.qe_group + \
"\n url: " + \
self.url() + \
"\n state: " + self.errata_state + \
"\n created: " + str(self.creation_date) + \
"\n ship target: " + str(pdate) + \
"\n batch_id: " + str(self.batch_id) + \
"\n ship date: " + str(self.ship_date) + \
"\n age: " + str(self.age) + " days" \
"\n bugs: " + str(self.errata_bugs) + \
s
def __int__(self):
return self.errata_id
| |
# Copyright 2015 Hewlett-Packard Development Company, L.P.dsvsv
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
import testtools
from neutron.tests.api import base
class SharedNetworksTest(base.BaseAdminNetworkTest):
@classmethod
def resource_setup(cls):
super(SharedNetworksTest, cls).resource_setup()
cls.shared_network = cls.create_shared_network()
@test.idempotent_id('6661d219-b96d-4597-ad10-55766123421a')
def test_filtering_shared_networks(self):
# this test is necessary because the 'shared' column does not actually
# exist on networks so the filter function has to translate it into
# queries against the RBAC table
self.create_network()
self._check_shared_correct(
self.client.list_networks(shared=True)['networks'], True)
self._check_shared_correct(
self.admin_client.list_networks(shared=True)['networks'], True)
self._check_shared_correct(
self.client.list_networks(shared=False)['networks'], False)
self._check_shared_correct(
self.admin_client.list_networks(shared=False)['networks'], False)
def _check_shared_correct(self, items, shared):
self.assertNotEmpty(items)
self.assertTrue(all(n['shared'] == shared for n in items))
@test.idempotent_id('6661d219-b96d-4597-ad10-51672353421a')
def test_filtering_shared_subnets(self):
# shared subnets need to be tested because their shared status isn't
# visible as a regular API attribute and it's solely dependent on the
# parent network
reg = self.create_network()
priv = self.create_subnet(reg, client=self.client)
shared = self.create_subnet(self.shared_network,
client=self.admin_client)
self.assertIn(shared, self.client.list_subnets(shared=True)['subnets'])
self.assertIn(shared,
self.admin_client.list_subnets(shared=True)['subnets'])
self.assertNotIn(priv,
self.client.list_subnets(shared=True)['subnets'])
self.assertNotIn(priv,
self.admin_client.list_subnets(shared=True)['subnets'])
self.assertIn(priv, self.client.list_subnets(shared=False)['subnets'])
self.assertIn(priv,
self.admin_client.list_subnets(shared=False)['subnets'])
self.assertNotIn(shared,
self.client.list_subnets(shared=False)['subnets'])
self.assertNotIn(shared,
self.admin_client.list_subnets(shared=False)['subnets'])
@test.idempotent_id('6661d219-b96d-4597-ad10-55766ce4abf7')
def test_create_update_shared_network(self):
shared_network = self.create_shared_network()
net_id = shared_network['id']
self.assertEqual('ACTIVE', shared_network['status'])
self.assertIsNotNone(shared_network['id'])
self.assertTrue(self.shared_network['shared'])
new_name = "New_shared_network"
body = self.admin_client.update_network(net_id, name=new_name,
admin_state_up=False,
shared=False)
updated_net = body['network']
self.assertEqual(new_name, updated_net['name'])
self.assertFalse(updated_net['shared'])
self.assertFalse(updated_net['admin_state_up'])
@test.idempotent_id('9c31fabb-0181-464f-9ace-95144fe9ca77')
def test_create_port_shared_network_as_non_admin_tenant(self):
# create a port as non admin
body = self.client.create_port(network_id=self.shared_network['id'])
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
# verify the tenant id of admin network and non admin port
self.assertNotEqual(self.shared_network['tenant_id'],
port['tenant_id'])
@test.idempotent_id('3e39c4a6-9caf-4710-88f1-d20073c6dd76')
def test_create_bulk_shared_network(self):
# Creates 2 networks in one request
net_nm = [data_utils.rand_name('network'),
data_utils.rand_name('network')]
body = self.admin_client.create_bulk_network(net_nm, shared=True)
created_networks = body['networks']
for net in created_networks:
self.addCleanup(self.admin_client.delete_network, net['id'])
self.assertIsNotNone(net['id'])
self.assertTrue(net['shared'])
def _list_shared_networks(self, user):
body = user.list_networks(shared=True)
networks_list = [net['id'] for net in body['networks']]
self.assertIn(self.shared_network['id'], networks_list)
self.assertTrue(self.shared_network['shared'])
@test.idempotent_id('a064a9fd-e02f-474a-8159-f828cd636a28')
def test_list_shared_networks(self):
# List the shared networks and confirm that
# shared network extension attribute is returned for those networks
# that are created as shared
self._list_shared_networks(self.admin_client)
self._list_shared_networks(self.client)
def _show_shared_network(self, user):
body = user.show_network(self.shared_network['id'])
show_shared_net = body['network']
self.assertEqual(self.shared_network['name'], show_shared_net['name'])
self.assertEqual(self.shared_network['id'], show_shared_net['id'])
self.assertTrue(show_shared_net['shared'])
@test.idempotent_id('e03c92a2-638d-4bfa-b50a-b1f66f087e58')
def test_show_shared_networks_attribute(self):
# Show a shared network and confirm that
# shared network extension attribute is returned.
self._show_shared_network(self.admin_client)
self._show_shared_network(self.client)
class AllowedAddressPairSharedNetworkTest(base.BaseAdminNetworkTest):
allowed_address_pairs = [{'ip_address': '1.1.1.1'}]
@classmethod
@test.requires_ext(extension="allowed-address-pairs", service="network")
def skip_checks(cls):
super(AllowedAddressPairSharedNetworkTest, cls).skip_checks()
@classmethod
def resource_setup(cls):
super(AllowedAddressPairSharedNetworkTest, cls).resource_setup()
cls.network = cls.create_shared_network()
cls.create_subnet(cls.network, client=cls.admin_client)
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-ffffffff1fff')
def test_create_with_address_pair_blocked_on_other_network(self):
with testtools.ExpectedException(lib_exc.Forbidden):
self.create_port(self.network,
allowed_address_pairs=self.allowed_address_pairs)
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-ffffffff2fff')
def test_update_with_address_pair_blocked_on_other_network(self):
port = self.create_port(self.network)
with testtools.ExpectedException(lib_exc.Forbidden):
self.update_port(
port, allowed_address_pairs=self.allowed_address_pairs)
class RBACSharedNetworksTest(base.BaseAdminNetworkTest):
force_tenant_isolation = True
credentials = ['primary', 'alt', 'admin']
@classmethod
@test.requires_ext(extension="rbac-policies", service="network")
def resource_setup(cls):
super(RBACSharedNetworksTest, cls).resource_setup()
cls.client2 = cls.alt_manager.network_client
def _make_admin_net_and_subnet_shared_to_tenant_id(self, tenant_id):
net = self.admin_client.create_network(
name=data_utils.rand_name('test-network-'))['network']
self.addCleanup(self.admin_client.delete_network, net['id'])
subnet = self.create_subnet(net, client=self.admin_client)
# network is shared to first unprivileged client by default
pol = self.admin_client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=tenant_id
)['rbac_policy']
return {'network': net, 'subnet': subnet, 'policy': pol}
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff1fff')
def test_network_only_visible_to_policy_target(self):
net = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)['network']
self.client.show_network(net['id'])
with testtools.ExpectedException(lib_exc.NotFound):
# client2 has not been granted access
self.client2.show_network(net['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff2fff')
def test_subnet_on_network_only_visible_to_policy_target(self):
sub = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)['subnet']
self.client.show_subnet(sub['id'])
with testtools.ExpectedException(lib_exc.NotFound):
# client2 has not been granted access
self.client2.show_subnet(sub['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff2eee')
def test_policy_target_update(self):
res = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
res['policy']['id'], target_tenant=self.client2.tenant_id)
self.assertEqual(self.client2.tenant_id,
update_res['rbac_policy']['target_tenant'])
# make sure everything else stayed the same
res['policy'].pop('target_tenant')
update_res['rbac_policy'].pop('target_tenant')
self.assertEqual(res['policy'], update_res['rbac_policy'])
@test.idempotent_id('86c3529b-1231-40de-803c-affefefef321')
def test_duplicate_policy_error(self):
res = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.create_rbac_policy(
object_type='network', object_id=res['network']['id'],
action='access_as_shared', target_tenant=self.client.tenant_id)
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff3fff')
def test_port_presence_prevents_network_rbac_policy_deletion(self):
res = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
port = self.client.create_port(network_id=res['network']['id'])['port']
# a port on the network should prevent the deletion of a policy
# required for it to exist
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.delete_rbac_policy(res['policy']['id'])
# a wildcard policy should allow the specific policy to be deleted
# since it allows the remaining port
wild = self.admin_client.create_rbac_policy(
object_type='network', object_id=res['network']['id'],
action='access_as_shared', target_tenant='*')['rbac_policy']
self.admin_client.delete_rbac_policy(res['policy']['id'])
# now that wildcard is the only remaining, it should be subjected to
# to the same restriction
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.delete_rbac_policy(wild['id'])
# similarly, we can't update the policy to a different tenant
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.update_rbac_policy(
wild['id'], target_tenant=self.client2.tenant_id)
self.client.delete_port(port['id'])
# anchor is gone, delete should pass
self.admin_client.delete_rbac_policy(wild['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-beefbeefbeef')
def test_tenant_can_delete_port_on_own_network(self):
net = self.create_network() # owned by self.client
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
port = self.client2.create_port(network_id=net['id'])['port']
self.client.delete_port(port['id'])
@test.idempotent_id('f7539232-389a-4e9c-9e37-e42a129eb541')
def test_tenant_cant_delete_other_tenants_ports(self):
net = self.create_network()
port = self.client.create_port(network_id=net['id'])['port']
self.addCleanup(self.client.delete_port, port['id'])
with testtools.ExpectedException(lib_exc.NotFound):
self.client2.delete_port(port['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff4fff')
def test_regular_client_shares_to_another_regular_client(self):
net = self.create_network() # owned by self.client
with testtools.ExpectedException(lib_exc.NotFound):
self.client2.show_network(net['id'])
pol = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
self.client2.show_network(net['id'])
self.assertIn(pol['rbac_policy'],
self.client.list_rbac_policies()['rbac_policies'])
# ensure that 'client2' can't see the policy sharing the network to it
# because the policy belongs to 'client'
self.assertNotIn(pol['rbac_policy']['id'],
[p['id']
for p in self.client2.list_rbac_policies()['rbac_policies']])
@test.attr(type='smoke')
@test.idempotent_id('bf5052b8-b11e-407c-8e43-113447404d3e')
def test_filter_fields(self):
net = self.create_network()
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
('tenant_id', 'target_tenant'))
for fields in field_args:
res = self.client.list_rbac_policies(fields=fields)
self.assertEqual(set(fields), set(res['rbac_policies'][0].keys()))
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff5fff')
def test_policy_show(self):
res = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
p1 = res['policy']
p2 = self.admin_client.create_rbac_policy(
object_type='network', object_id=res['network']['id'],
action='access_as_shared',
target_tenant='*')['rbac_policy']
self.assertEqual(
p1, self.admin_client.show_rbac_policy(p1['id'])['rbac_policy'])
self.assertEqual(
p2, self.admin_client.show_rbac_policy(p2['id'])['rbac_policy'])
@test.attr(type='smoke')
@test.idempotent_id('e7bcb1ea-4877-4266-87bb-76f68b421f31')
def test_filter_policies(self):
net = self.create_network()
pol1 = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared',
target_tenant=self.client2.tenant_id)['rbac_policy']
pol2 = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared',
target_tenant=self.client.tenant_id)['rbac_policy']
res1 = self.client.list_rbac_policies(id=pol1['id'])['rbac_policies']
res2 = self.client.list_rbac_policies(id=pol2['id'])['rbac_policies']
self.assertEqual(1, len(res1))
self.assertEqual(1, len(res2))
self.assertEqual(pol1['id'], res1[0]['id'])
self.assertEqual(pol2['id'], res2[0]['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff6fff')
def test_regular_client_blocked_from_sharing_anothers_network(self):
net = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)['network']
with testtools.ExpectedException(lib_exc.BadRequest):
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client.tenant_id)
@test.attr(type='smoke')
@test.idempotent_id('c5f8f785-ce8d-4430-af7e-a236205862fb')
@test.requires_ext(extension="quotas", service="network")
def test_rbac_policy_quota(self):
quota = self.client.show_quotas(self.client.tenant_id)['quota']
max_policies = quota['rbac_policy']
self.assertGreater(max_policies, 0)
net = self.client.create_network(
name=data_utils.rand_name('test-network-'))['network']
self.addCleanup(self.client.delete_network, net['id'])
with testtools.ExpectedException(lib_exc.Conflict):
for i in range(0, max_policies + 1):
self.admin_client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared',
target_tenant=str(uuid.uuid4()).replace('-', ''))
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff7fff')
def test_regular_client_blocked_from_sharing_with_wildcard(self):
net = self.create_network()
with testtools.ExpectedException(lib_exc.Forbidden):
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant='*')
# ensure it works on update as well
pol = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
with testtools.ExpectedException(lib_exc.Forbidden):
self.client.update_rbac_policy(pol['rbac_policy']['id'],
target_tenant='*')
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-aeeeeeee7fff')
def test_filtering_works_with_rbac_records_present(self):
resp = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
net = resp['network']['id']
sub = resp['subnet']['id']
self.admin_client.create_rbac_policy(
object_type='network', object_id=net,
action='access_as_shared', target_tenant='*')
self._assert_shared_object_id_listing_presence('subnets', False, sub)
self._assert_shared_object_id_listing_presence('subnets', True, sub)
self._assert_shared_object_id_listing_presence('networks', False, net)
self._assert_shared_object_id_listing_presence('networks', True, net)
def _assert_shared_object_id_listing_presence(self, resource, shared, oid):
lister = getattr(self.admin_client, 'list_%s' % resource)
objects = [o['id'] for o in lister(shared=shared)[resource]]
if shared:
self.assertIn(oid, objects)
else:
self.assertNotIn(oid, objects)
| |
import curses
import sys
import time
import os.path
import random
import pickle
from curses import wrapper
gamedims = 22
currPosList = ['0','2']
def main(stdscr):#{
curses.start_color()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_WHITE)
curses.init_pair(4, curses.COLOR_RED, curses.COLOR_WHITE)
curses.init_pair(5, curses.COLOR_YELLOW, curses.COLOR_WHITE)
curses.init_pair(6, curses.COLOR_MAGENTA, curses.COLOR_WHITE)
curses.init_pair(7, curses.COLOR_CYAN, curses.COLOR_WHITE)
curses.init_pair(8, curses.COLOR_GREEN, curses.COLOR_WHITE)
curses.init_pair(9, curses.COLOR_BLUE, curses.COLOR_CYAN)
stdscr.clear()
stdscr.keypad(1)
curses.curs_set(0)
dims = stdscr.getmaxyx()
if os.path.exists('./constants/size.pkl'):#{
f = open('./constants/size.pkl')
global gamedims
gamedims = pickle.load(f)
#}
else:#{
output = open('./constants/size.pkl', 'w')
data = gamedims
pickle.dump(data, output)
output.close()
#}
while True:
selection = makeMenu(stdscr, gamemenu)
if selection == 1:#{
sampleselection = 0
while True:#{
if sampleselection == curses.KEY_BACKSPACE: break
initanswer=makeMenu(stdscr, initgame)
if initanswer == 1:#{
boardArray=[[0 for j in range(gamedims)]for i in range(gamedims)]
showProgress(stdscr)
drawBoard(stdscr, boardArray)
#}
elif initanswer == 2:#{
while True:#{
sampleselection=makeMenu(stdscr, samples)
if sampleselection == 1:#{
print ('1')
#}
elif sampleselection == 2:#{
print ('2')
#}
elif sampleselection == 3:#{
print ('3')
#}
elif sampleselection == 4:#{
print ('4')
#}
elif sampleselection == 5:#{
print ('5')
#}
elif sampleselection == 0:
break
#}
#}
elif initanswer == 0:#{
break
#}
#}
#}
elif selection == 2:#{
while True:#{
option = makeMenu(stdscr, optionsmenu)
if option == 1:#{
fieldsize=numberSelector(stdscr, sizeselector, gamedims)
if fieldsize != -1:#{
gamedims = fieldsize
break
#}
#}
elif option == 2:#{
characters = elemSelector(stdscr, charselector, elemArray, currPosList)
#}
elif option == 3:#{
rulesmenu=makeMenu(stdscr, rules)
if rulesmenu == 1:#{
print ('1')
elif rulesmenu == 2:#{
print ('2')
#}
#}
elif option == 0:#{
break
#}
#}
#}
elif selection == 3:#{
stdscr.clear()
stdscr.refresh()
curses.endwin()
output = open('./constants/size.pkl', 'w')
data = gamedims
pickle.dump(data, output)
output.close()
sys.exit()
#}
elif selection == 0:#{
stdscr.clear()
stdscr.refresh()
curses.endwin()
output = open('./constants/size.pkl', 'w')
data = gamedims
pickle.dump(data, output)
output.close()
sys.exit()
#}
#elif selection == ord('h'):#{
# stdscr.addstr(dims[0]-2,dims[1]/2-len(gamemenu[1])/2, gamemenu[1])
# stdscr.refresh()
##}
#}
def makeMenu(stdscr, L):#{
pos1 = 1
header = L[0]
optCount = len(L)-2
dims = stdscr.getmaxyx()
lenList = [0]
for i in L[2:]:#{
lenList.append(len(i))
#}
maxWidth = max(lenList)+1
while True:#{
for x in range (0,optCount):#{
if pos1 == x+1:#{
stdscr.addstr(dims[0]/2-optCount+2*x, dims[1]/2-maxWidth/2-2,'> '+L[x+2].center(maxWidth,' ')+' <', curses.color_pair(2))
#}
else:#{
stdscr.addstr(dims[0]/2-optCount+2*x, dims[1]/2-maxWidth/2-2, L[x+2].center(maxWidth+4, ' '))
#}
#}
for y in range (0,maxWidth+10):#{
stdscr.addstr(dims[0]/2-optCount-2, dims[1]/2-maxWidth/2+y-3-2, '=',curses.color_pair(2))
#}
stdscr.addnstr(dims[0]/2+optCount, dims[1]/2-maxWidth/2-3-2, ornament1 ,maxWidth+10,curses.color_pair(2))
for a in range (0,optCount*2+1):#{
stdscr.addstr(dims[0]/2-optCount-1+a, dims[1]/2-maxWidth/2+maxWidth+6-2,'I',curses.color_pair(2))
stdscr.addstr(dims[0]/2-optCount-1+a, dims[1]/2-maxWidth/2-3-2, 'I', curses.color_pair(2))
#}
stdscr.addstr(dims[0]/2+optCount+1, dims[1]/2-len('Press "h" for help')/2, 'Press "h" for help',curses.color_pair(2))
for b in range(0,len(header)):#{
stdscr.addstr(dims[0]/2-optCount-2, dims[1]/2-len(L[0])/2+b, header[b],curses.color_pair(random.randint(3,8)))
#}
stdscr.refresh()
selection1=stdscr.getch()
if selection1 == curses.KEY_UP and pos1 > 1:#{
pos1-=1
#}
elif selection1 == curses.KEY_DOWN and pos1 < optCount:#{
pos1+=1
#}
elif selection1 == 10:#{
stdscr.clear()
stdscr.refresh()
return pos1
break
#}
elif selection1 == curses.KEY_BACKSPACE:#{
stdscr.clear()
stdscr.refresh()
pos1 = 0
return pos1
break
#}
#if (selection1 > ord('0') and selection1 <=ord(str(optCount))) or selection1 == curses.KEY_BACKSPACE:#{
# stdscr.clear()
# stdscr.refresh()
# return selection1
# break
##}
elif selection1 == ord('h'):#{
stdscr.addstr(dims[0]-2,dims[1]/2-len(L[1])/2, L[1])
#}
#}
#}
def drawBoard(stdscr, boardArray):#{
x = len(boardArray)
dims = stdscr.getmaxyx()
y= int(round(float(x)/2))-x/2
corner1 = [dims[0]/2-x/2,dims[1]/2-x]
corner2 = [dims[0]/2-x/2,dims[1]/2+x]
corner3 = [dims[0]/2+x/2,dims[1]/2+x]
corner4 = [dims[0]/2+x/2,dims[1]/2-x]
stdscr.addstr(corner1[0]-1, corner1[1]-1, "+", curses.A_REVERSE)
stdscr.addstr(corner2[0]-1, corner2[1]+1, "+", curses.A_REVERSE)
stdscr.addstr(corner3[0]+y, corner3[1]+1, "+", curses.A_REVERSE)
stdscr.addstr(corner4[0]+y, corner4[1]-1, "+", curses.A_REVERSE)
for k in range(1,x*2+2):#{
stdscr.addstr(corner1[0]-1,corner1[1]-1+k, "-", curses.A_REVERSE)
stdscr.addstr(corner4[0]+y,corner4[1]-1+k, "-", curses.A_REVERSE)
if k <= x:#{
stdscr.addstr(corner1[0]-1+k,corner1[1]-1, "|", curses.A_REVERSE)
stdscr.addstr(corner2[0]-1+k,corner2[1]+1, "|", curses.A_REVERSE)
#}
#}
for i in range(0,len(boardArray)):#{
for j in range(0,len(boardArray[i])):#{
stdscr.addstr(corner1[0]+j, corner1[1]+2*i, ' '+str(boardArray[i][j]))
#}
#}
stdscr.refresh()
input = stdscr.getch()
stdscr.clear()
stdscr.refresh()
#}
def makePrompt(stdscr, L):#{
question = L[0]
header = L[1]
ansCount = len(L)-2
queLength = len(L[0])
dims = stdscr.getmaxyx()
while True:#{
stdscr.clear()
stdscr.box()
stdscr.addstr(dims[0]/2-5, dims[1]/2-queLength/2, L[0])
if ansCount == 2:#{
stdscr.addstr(dims[0]/2+2, dims[1]/2-len(L[2])-5, L[2]+'(1)')
stdscr.addstr(dims[0]/2+2, dims[1]/2+5, L[3]+'(2)')
#}
elif ansCount == 3:#{
stdscr.addstr(dims[0]/2+2, dims[1]/2-len(L[2])-4, L[2]+' (1)')
stdscr.addstr(dims[0]/2+2, dims[1]/2+4, L[3]+'(2)')
stdscr.addstr(dims[0]/2+4, dims[1]/2-len(L[4])/2, L[4]+'(3)')
#}
else:#{
for x in range(1,ansCount+1):
stdscr.addstr(dims[0]/2+2*x, dims[1]/2-len(L[x+1])/2, L[x+1]+'('+str(x)+')')
#}
stdscr.refresh()
answer = stdscr.getch()
if answer > ord('0') and answer <= ord(str(ansCount)):#{
stdscr.clear()
stdscr.refresh()
return answer
break
#}
elif answer == curses.KEY_BACKSPACE:#{
stdscr.clear()
stdscr.refresh()
answer = -1
return answer
break
#}
elif answer == ord('h'):#{
stdscr.addstr(dims[0]-2, dims[1]-len(L[1])-1, L[1])
stdscr.addstr(dims[0]-2, dims[1]/2, L[2])
#}
#}
#}
def showProgress(stdscr):#{
dims = stdscr.getmaxyx()
coords = [dims[0]/2-1,dims[1]/2-16]
win = stdscr.subwin(3,32,coords[0],coords[1])
win.border(0)
win.addstr(1,1,'Progress ')
time.sleep(0.5)
pos = 10
for i in range(15):#{
win.addstr(1,pos,'.')
win.refresh()
time.sleep(0.01)
pos+=1
#}
win.addstr(1,26,'Done!')
win.refresh()
time.sleep(0.5)
win.clear()
win.refresh()
#}
def elemSelector(stdscr, L, elemArray, currPosList):#{
selections = len(elemArray)
#for x in range(0,len(currPosList)):#{
# pos[x+1] = currPosList[x]
##}
#for x in range(0,len(elemArray)):#{
# elemList[x+1] = elemArray[x]
##}
subject = L[0]
dims = stdscr.getmaxyx()
while True:#{
if selections == 1:#{
for x in range(0,3):#{
stdscr.addnstr(dims[0]/2-5+x, dims[1]/2-18*3/2, ornament2, 18*3, curses.A_REVERSE)
stdscr.addnstr(dims[0]/2+3+x, dims[1]/2-18*3/2, ornament2, 18*3, curses.A_REVERSE)
#}
stdscr.addstr(dims[0]/2-5+1, dims[1]/2-len(L[0])/2, L[0], curses.A_REVERSE)
#}
elif selections == 2:#{
for x in range(0,selections+1):#{
for y in range(0,3):
stdscr.addnstr(dims[0]/2-selections*9/2+8*x+y, dims[1]/2-18*3/2, ornament2, 18*3, curses.A_REVERSE)
#}
stdscr.addstr(dims[0]/2-9+1, dims[1]/2-len(L[0])/2, L[0], curses.A_REVERSE)
#}
elif selections > 2:#{
for x in range(0,selection+1):#{
for y in range(0,3):#{
stdscr.addnstr(dims[0]/2-selections*9/2+x*8+y, dims[1]/2-18*3/2, ornament2, 18*3, curses.A_REVERSE)
#}
#}
stdscr.addstr(dims[0]/2-selections*8/2+1, dims[1]/2-len(L[0])/2, L[0], curses.A_REVERSE)
#}
for a in range(0,selections):#{
for y in range(0,6):#{
stdscr.addstr(dims[0]/2-selections*9/2+3+y+a*8, dims[1]/2-16, ' ', curses.A_REVERSE)
stdscr.addstr(dims[0]/2-selections*9/2+3+y+a*8, dims[1]/2+16, ' ', curses.A_REVERSE)
#}
#}
for b in range(0,selections):#{
stdscr.addstr(dims[0]/2-selections*9/2+5+8*b+1, dims[1]/2-13,'--- --- --- - - --- --- ---')
stdscr.addstr(dims[0]/2-selections*9/2+5+8*b-1, dims[1]/2-13,'--- --- --- - - --- --- ---')
stdscr.addstr(dims[0]/2-selections*9/2+5+8*b+1, dims[1]/2,'-', curses.color_pair(9))
stdscr.addstr(dims[0]/2-selections*9/2+5+8*b-1, dims[1]/2,'-', curses.color_pair(9))
#}
input = stdscr.getch()
if input == curses.KEY_BACKSPACE:
stdscr.clear()
stdscr.refresh()
break
#}
#}
def numberSelector(stdscr, L, currPos):#{
pos = currPos
subject = L[0]
numbers = int(L[6])-int(L[5])+1
dims = stdscr.getmaxyx()
while True:#{
for x in range(0,3):#{
stdscr.addnstr(dims[0]/2-5+x, dims[1]/2-numbers*3/2, ornament2, (numbers)*3, curses.A_REVERSE)
stdscr.addnstr(dims[0]/2+3+x, dims[1]/2-numbers*3/2, ornament2, (numbers)*3, curses.A_REVERSE)
#}
stdscr.addstr(dims[0]/2-4, dims[1]/2-len(L[0])/2, L[0], curses.A_REVERSE)
stdscr.addstr(dims[0]/2+1, dims[1]/2-9,'-- -- -- --')
stdscr.addstr(dims[0]/2-1, dims[1]/2-9,'-- -- -- --')
stdscr.addstr(dims[0]/2+1, dims[1]/2-1,'--', curses.color_pair(9))
stdscr.addstr(dims[0]/2-1, dims[1]/2-1,'--', curses.color_pair(9))
for y in range(0,6):#{
stdscr.addstr(dims[0]/2-2+y, dims[1]/2-12, ' ', curses.A_REVERSE)
stdscr.addstr(dims[0]/2-2+y, dims[1]/2+12, ' ', curses.A_REVERSE)
#}
if pos == int(L[5]):#{
stdscr.addstr(dims[0]/2, dims[1]/2-9, '-- -- '+' '+' '+str(pos+1).rjust(2,'0')+' '+str(pos+2).rjust(2,'0'))
stdscr.addstr(dims[0]/2, dims[1]/2-1, str(pos).rjust(2,'0'), curses.color_pair(9))
#}
elif pos == int(L[5])+1:#{
stdscr.addstr(dims[0]/2, dims[1]/2-9, '-- '+str(pos-1).rjust(2,'0')+' '+' '+' '+str(pos+1).rjust(2,'0')+' '+str(pos+2).rjust(2,'0'))
stdscr.addstr(dims[0]/2, dims[1]/2-1, str(pos).rjust(2,'0'), curses.color_pair(9))
#}
elif pos == int(L[6])-1:#{
stdscr.addstr(dims[0]/2, dims[1]/2-9, str(pos-2).rjust(2,'0')+' '+str(pos-1).rjust(2,'0')+' '+' '+' '+str(pos+1).rjust(2,'0')+' --')
stdscr.addstr(dims[0]/2, dims[1]/2-1, str(pos).rjust(2,'0'), curses.color_pair(9))
#}
elif pos == int(L[6]):#{
stdscr.addstr(dims[0]/2, dims[1]/2-9, str(pos-2).rjust(2,'0')+' '+str(pos-1).rjust(2,'0')+' '+' '+' -- --')
stdscr.addstr(dims[0]/2, dims[1]/2-1, str(pos).rjust(2,'0'), curses.color_pair(9))
#}
else:#{
stdscr.addstr(dims[0]/2, dims[1]/2-9, str(pos-2).rjust(2,'0')+' '+str(pos-1).rjust(2,'0')+' '+' '+' '+str(pos+1).rjust(2,'0')+' '+str(pos+2).rjust(2,'0'))
stdscr.addstr(dims[0]/2, dims[1]/2-1, str(pos).rjust(2,'0'), curses.color_pair(9))
#}
stdscr.addstr(dims[0]/2+4, dims[1]/2-len('Press "h" for help')/2, 'Press "h" for help', curses.A_REVERSE)
input=stdscr.getch()
if input == curses.KEY_LEFT and pos > int(L[5]):#{
pos-=1
#}
elif input == curses.KEY_RIGHT and pos < int(L[6]):#{
pos+=1
#}
elif input == ord('h'):
stdscr.addstr(dims[0]-2, dims[1]/2-len(L[1])/2, L[1])
elif input == 10:#{
stdscr.clear()
stdscr.refresh()
return pos
break
#}
elif input == curses.KEY_BACKSPACE:#{
pos = -1
stdscr.clear()
stdscr.refresh()
return pos
break
#}
#}
gamemenu=['GAME OF LIFE','Arrows to select. Enter to submit. Exit with backspace','Start the Game','Options','Exit']
optionsmenu=['SETTINGS','Arrows to select. Enter to submit. Exit with backspace', 'Board dimensions', 'Cell graphics', 'Rules']
initgame=['GAMEMODE', 'Arrows to select. Enter to submit. Exit with backspace', 'Custom start', 'Select an example']
samples=['LIST OF SAMPLES','Arrows to select. Enter to submit. Exit with backspace', 'Pentadecathlon (repeats after 15 steps)', 'Pulsar (repeats after 3 steps)', 'Lightweight spaceship (LWSS, repeats never)', 'Blinker (repeats after 2 steps)', 'Toad (repeats after 2 steps)']
sizeselector=['SELECT THE SIZE OF LIFE', 'Arrows to select. Enter to submit. Exit with backspace', 'OK', 'CANCEL', 'Current size', '5', '22']
charselector=['PUT SOME MAKEUP ON DEM CELLS','Arrows to select. Enter to submit. Cancel wih backspace','Alive cells','Dead cells']
elemArray=[['+','-','*','/','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','1','2','3','4','5','6','7','8','9','0','?','!','<','>'],['O','0','o','D','C','G']]
rules=['RULES','Keys 1-3 for rules 1-3. Cancel with "Backspace"','Original rules','Rule 2','Rule 3']
ornament1='======================================================================================================='
ornament2=' '
ornament3='-------------------------------------------------------------------------------------------------------'
if __name__ == '__main__':#{
wrapper(main)
#}
| |
#!/usr/bin/python
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
##\author Derek King
##\brief Interface to Prologix GPIB-Ethernet controller
"""
Interactive interface to GPIB device using Prologix GPIB-Ethernet controller.
Usage: %(progname)s [-h] [-a adresss] [-u usb serial dev] [-g gpib_addr]
Options:
-a address : Use address to connect to Prologix GPIB-Ethernet controller.
Address can be IPv4 address or hostname.
-u dev : Use given usb serial device to communicate to Prologix GPIB-USB controller.
Device is usually /dev/ttyUSBX
-g gpib : Use GPIP address to access specific device on GPIB bus.
GPIB address is usually number 1-30.
-h : show this help
Example:
%(progname)s -a 10.0.1.197 -g 22
Interative Usage:
Type SCPI command at prompt '>'. Pressing enter will send command to device.
To read SCPI output from previous command, don't type anything and just press enter.
Note : SCPI = Standard Commands for Programmable Instruments
Interactive Example : (Reading device identification string)
> *idn? <enter>
> << ENTER >>
Agilent Technologies,34410A,MY47007427,2.35-2.35-0.09-46-09
Interactive Example : (Voltage measurement from DMM)
> meas:volt:dc?
> << ENTER >>
-2.12071654E-04
"""
import traceback
import socket
import sys
import re
import pdb
import time
import datetime
import matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import math
import numpy
import getopt
import pdb
DEBUG = True
def usage(progname):
print __doc__ % vars()
# Interface to GPIB bus
class PrologixGpibEthernet:
def __init__(self, ip_address):
self._gpib_address = None
try :
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 1234
sock.connect((ip_address, port))
except socket.error :
raise RuntimeError("Error making connection to Prologix GPIB controller with address '%s'" % ip_address)
self._sock = sock
self.settimeout(0.1)
self._write('++auto 0\n')
self._write('++eot_enable 1\n')
self._write('++eot_char 0\n')
self._write('++ver\n')
ver = self._read("\n")
m = re.match('Prologix GPIB-ETHERNET Controller version ([0-9.]+)', ver)
if not m:
raise RuntimeError("Error ++ver response is incorrect : '%s'" % ver.strip('\r\n'))
msg = self.flush()
if msg:
print "Warning, flushed : ", msg
def _read(self, eot="\x00"):
sock = self._sock
msg = ""
try:
while True:
msg += sock.recv(1024)
end = msg.find(eot)
if end == len(msg)-1:
return msg[0:end-1]
elif end != -1:
print "warning, dropping %d byte tail of message. Tail='%s'"%(len(msg)-end, msg[end:])
return msg[0:end-1]
except socket.timeout:
if len(msg) > 0:
raise RuntimeError("Got timeout after receiving partial result : %s" % msg)
else:
return None
def _write(self, line):
self._sock.sendall(line)
def _select(self, gpib_address):
#print 'select %d' % self._gpib_address
self._write('++addr %d\n' % self._gpib_address)
def settimeout(self, timeout):
""" Set read timeout in seconds """
self._sock.settimeout(timeout)
self._timeout = timeout
def flush(self):
""" Clear any read buffers out """
self._sock.settimeout(0.1)
line = self._read()
result = ""
while line != None:
result += line
line = self._read()
self._sock.settimeout(self._timeout)
return result
# Core functions : every gpib adapter should implement: read() write() select()
def select(self, gpib_address):
if self._gpib_address != gpib_address:
self._gpib_address = gpib_address
self._select(self._gpib_address)
def read(self):
if self._gpib_address == None:
raise RuntimeError("GPIB address must select()ed before read() is called\n")
self._write("++read eoi\n")
return self._read()
def write(self, line):
if self._gpib_address == None:
raise RuntimeError("GPIB address must select()ed before read() is called\n")
# escape +, \n, \r, and \27 (ESC) chars
line = line.replace('\x1B', '\x1B\x1B')
line = line.replace('\n', '\x1B\n')
line = line.replace('\r', '\x1B\r')
line = line.replace('+', '\x1B+')
self._write(line + "\n")
''' '''
def init_GPIB(self):
'''
if ip address is not found, run the program "GPIB Configuator" and look
at ip. Or run "NetFinder" from Prologix
'''
self.GPIB_adapter = GPIB_control.PrologixGpibEthernet('10.1.1.113')
read_timeout = 1.0
if DEBUG: print "Setting adapter read timeout to %f seconds" % read_timeout
self.GPIB_adapter.settimeout(read_timeout)
gpib_address = int(7)#Scope over Rb exp
if DEBUG: print "Using device GPIB address of %d" % gpib_address
self.GPIB_device = GPIB_control.GpibDevice(self.GPIB_adapter, gpib_address)
if DEBUG: print "Finished initialization of GPIB controller"
def get_scope_field(self,q1="Data:Source CH1",
q2="Data:Encdg: ASCII",
q3="Data:Width 2",
q4="Data:Start 1",
q5="Data:Stop 500",
q6="wfmpre?" ,
q7="curve?"):
e1 = time.time()
if not hasattr(self,'GPIB_device'):
if DEBUG: print "GPIB device not ready"
return
response = self.GPIB_device.converse([q1,q2,q3,q4,q5,q6,q7])
e2 = time.time()
if DEBUG: print "Scope communication took", e2-e1, "sec"
ystr = response["curve?"]
if DEBUG: print "Data:", ystr
''' '''
# Interface to specific GPIB device on GPIB bus
class GpibDevice:
def __init__(self, gpib_adapter, gpib_addr):
self._gpib_addr = gpib_addr
self._gpib_adapter = gpib_adapter
def read(self):
self._gpib_adapter.select(self._gpib_addr)
return self._gpib_adapter.read()
def write(self, line):
self._gpib_adapter.select(self._gpib_addr)
self._gpib_adapter.write(line)
def converse(self,commands):
responses={}
if isinstance(commands, basestring): commands=[commands]
i = 0
for command in commands:
self.write(command)
responses.update({command:self.read()})
i = i + 1
return responses
# def converse(self,commands, timeouts):
# responses={}
# if isinstance(commands, basestring): commands=[commands]
# i = 0
# for command in commands:
# self._gpib_adapter.settimeout(timeouts[i])
# self.write(command)
# responses.update({command:self.read()})
# i = i + 1
# return responses
def main(argv):
progname = argv[0]
print argv
import getopt
optlist,argv = getopt.gnu_getopt(argv, "a:s:g:h");
gpib_address = 22
read_timeout = 1.0
adapter = None
# for opt,arg in optlist:
# if (opt == "-h"):
# usage(progname)
# return 0
# elif (opt == "-a") :
# print "Connecting to Prologix GPIB Ethernet adapter using network address %s" % arg
# adapter = PrologixGpibEthernet(arg)
adapter = PrologixGpibEthernet('10.1.1.113')#if ip address is not found, run the program "GPIB Configuator" and look at ip. Or run "NetFinder" from Prologix
#
# elif (opt == "-u") :
# print "USB adapter is not supported yet..."
# return 1
# elif (opt == "-g") :
# gpib_address = int(arg)
gpib_address = int(7)
# else :
# print "Internal error : opt = ", opt
# return 2
# if adapter == None:
# usage(progname)
# print "Please use -a or -u options to select GPIB adapter"
# return 1
print "Setting adapter read timeout to %f seconds" % read_timeout
adapter.settimeout(read_timeout)
print "Using device GPIB address of %d" % gpib_address
dev = GpibDevice(adapter, gpib_address)
##file = open("scopetest.txt","a")
print "File is open and am now writing to it. Ctrl-c to stop data taking."
##i = datetime.datetime.now()
##file.write("Current date & time = %s\n" % i)
startTime = time.time()
def func(x, intercept, w1,a1,d1,w2,a2,d2):
return intercept + a1*numpy.exp(-((x-d1)**2)/(2.*w1**2))+a2*numpy.exp(-((x-d2)**2)/(2.*w2**2))
#while True:
#query1 = "MEAS:VOLT:DC? 10,0.0001"
#query1 = "Measurement:meas2:value?"
#data = dev.converse([query1])
q1="Data:Source CH3"
q2="Data:Encdg: ASCII"
q3="Data:Width 2"
q4="Data:Start 1"
q5="Data:Stop 500"
q6="wfmpre?"
q7="curve?"
e1=time.time()
response=dev.converse([q1,q2,q3,q4,q5,q6,q7])
ystr=response["curve?"]
'''
response=dev.converse([q7])
ystr=response["curve?"]
'''
e2=time.time()
print "Scope communication took", e2-e1,"s"
#pdb.set_trace()
ydata=[int(s) for s in ystr.split(',')]
xdata=numpy.multiply(range(len(ydata)),(10.0*10.0)/500.0) #xdata converted for 10ms/div scale
ydata=numpy.multiply(ydata,(5.0*5.0)/2**15) #ydata converted for 5mV/div scale
fig,ax=plt.subplots()
ax.plot(xdata,ydata)
'''
popt,popv = curve_fit(func, xdata, ydata, (-12.,12.,15.,25.,6.,5.,50.))
popt,popv = curve_fit(func, xdata, ydata, (1,1,1,1,1,1.,1))
fit = func(xdata, *popt)
#plt.plot(xdata, fit, 'b-')
print "FWHM 1=",popt[1]*2*numpy.sqrt(2*numpy.log(2)), "ms", "and FWHM 2=",popt[4]*2*numpy.sqrt(2*numpy.log(2)),"ms"
print "Temp 1 approximately:",(popt[1]*2*numpy.sqrt(2*numpy.log(2))/10.0)**2*16.3, "uK and Temp 2 approximately",(popt[4]*2*numpy.sqrt(2*numpy.log(2))/10.0)**2*16.3, "uK"
'''
# plt.plot(range(len(ydata)),func(range(len(ydata)), 1,1) )
ydatasave=str(ydata).translate(None,'[]\n')
savefile=open("Lattice0Order",'w')
savefile.write(ydatasave)
savefile.close()
print "ASCII data saved"
plt.savefig("Lattice0Order")
print "Figure saved"
#print str(query1 + ": " + str(data.pop(query1)) + "\n" )
##file.write("Time [s]: " + str(time.time()-startTime) + " "+ query1 + ": " + str(data.pop(query1)) + "\n" )
#time.sleep(1)'''
# import readline
# line = raw_input("> ")
# while True:
# if line:
# dev.write(line)
# else:
# result = dev.read()
# if result != None:
# print result
# else:
# print '<<< NO RESPONSE >>>'
# line = raw_input("> ")
if __name__ == '__main__':
main(sys.argv)
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python library to communicate with Fleetspeak over grpc."""
import abc
import collections
import os
import logging
import threading
import time
from typing import Optional
from absl import flags
from concurrent import futures
import grpc
from fleetspeak.src.common.proto.fleetspeak import common_pb2
from fleetspeak.src.server.grpcservice.proto.fleetspeak_grpcservice import grpcservice_pb2_grpc
from fleetspeak.src.server.proto.fleetspeak_server import admin_pb2
from fleetspeak.src.server.proto.fleetspeak_server import admin_pb2_grpc
FLAGS = flags.FLAGS
flags.DEFINE_string(
"fleetspeak_message_listen_address", "",
"The address to bind to, to listen for fleetspeak messages.")
flags.DEFINE_string("fleetspeak_server", "",
"The address to find the fleetspeak admin server, e.g. "
"'localhost:8080'")
class Servicer(grpcservice_pb2_grpc.ProcessorServicer):
"""A wrapper to collect messages from incoming grpcs.
This implementation of grpcservice_pb2_grpc.ProcessorServicer, it passes all
received messages into a provided callback, after performing some basic sanity
checking.
Note that messages may be delivered twice.
"""
def __init__(self, process_callback, service_name, **kwargs):
"""Create a Servicer.
Args:
process_callback: A callback to be executed when a message arrives. Will
be called as process_callback(msg, context) where msg is a
common_pb2.Message and context is a grpc.ServicerContext. Must be
thread safe.
service_name: The name of the service that we are running as. Used to
sanity check the destination address of received messages.
**kwargs: Extra arguments passed to the constructor of the base
class, grpcservice_pb2_grpc.ProcessorServicer.
"""
super(Servicer, self).__init__(**kwargs)
self._process_callback = process_callback
self._service_name = service_name
def Process(self, request, context):
if not isinstance(request, common_pb2.Message):
logging.error("Received unexpected request type: %s",
request.__class__.__name__)
context.set_code(grpc.StatusCode.UNKNOWN)
return common_pb2.EmptyMessage()
if request.destination.client_id:
logging.error("Received message for client: %s",
request.destination.client_id)
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return common_pb2.EmptyMessage()
if request.destination.service_name != self._service_name:
logging.error("Received message for unknown service: %s",
request.destination.service_name)
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return common_pb2.EmptyMessage()
self._process_callback(request, context)
return common_pb2.EmptyMessage()
class InvalidArgument(Exception):
"""Exception indicating unexpected input."""
class NotConfigured(Exception):
"""Exception indicating that the requested operation is not configured."""
class OutgoingConnection(object):
"""An outgoing connection to Fleetspeak over grpc.
This wraps an admin_pb2_grpc.AdminStub, providing the same interface but
adding retry support and some sanity checks.
See the definition of the Admin grpc service in
server/proto/fleetspeak_server/admin.proto for full interface documentation.
"""
# TODO: Remove retry logic when possible. I.e. when grpc supports it
# natively - https://github.com/grpc/proposal/blob/master/A6-client-retries.md
DEFAULT_TIMEOUT = 30 # seconds
def __init__(self, channel, service_name, stub=None):
"""Create a Sender.
Args:
channel: The grpc.Channel over which we should send messages.
service_name: The name of the service that we are running as.
stub: If set, used instead of AdminStub(channel). Intended to ease
unit tests.
"""
if stub:
self._stub = stub
else:
self._stub = admin_pb2_grpc.AdminStub(channel)
self._service_name = service_name
self._shutdown = False
self._shutdown_cv = threading.Condition()
self._keep_alive_thread = threading.Thread(target=self._KeepAliveLoop)
self._keep_alive_thread.daemon = True
self._keep_alive_thread.start()
def _KeepAliveLoop(self):
try:
while True:
with self._shutdown_cv:
if self._shutdown:
return
self._shutdown_cv.wait(timeout=5)
if self._shutdown:
return
try:
self._stub.KeepAlive(common_pb2.EmptyMessage(), timeout=1.0)
except grpc.RpcError as e:
logging.warning("KeepAlive rpc failed: %s", e)
except Exception as e: # pylint: disable=broad-except
logging.error("Exception in KeepAlive: %s", e)
def _RetryLoop(self, func, timeout=None):
"""Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
"""
timeout = timeout or self.DEFAULT_TIMEOUT
deadline = time.time() + timeout
sleep = 1
while True:
try:
return func(timeout)
except grpc.RpcError:
if time.time() + sleep > deadline:
raise
time.sleep(sleep)
sleep *= 2
timeout = deadline - time.time()
def InsertMessage(self, message, timeout=None):
"""Inserts a message into the Fleetspeak server.
Sets message.source, if unset.
Args:
message: common_pb2.Message
The message to send.
timeout: How many seconds to try for.
Raises:
grpc.RpcError: if the RPC fails.
InvalidArgument: if message is not a common_pb2.Message.
"""
if not isinstance(message, common_pb2.Message):
raise InvalidArgument("Attempt to send unexpected message type: %s" %
message.__class__.__name__)
if not message.HasField("source"):
message.source.service_name = self._service_name
# Sometimes GRPC reports failure, even though the call succeeded. To prevent
# retry logic from creating duplicate messages we fix the message_id.
if not message.message_id:
message.message_id = os.urandom(32)
return self._RetryLoop(
lambda t: self._stub.InsertMessage(message, timeout=t))
def DeletePendingMessages(self, request, timeout=None):
if not isinstance(request, admin_pb2.DeletePendingMessagesRequest):
raise TypeError("Expected fleetspeak.admin.DeletePendingMessagesRequest "
"as an argument.")
return self._RetryLoop(
lambda t: self._stub.DeletePendingMessages(request, timeout=t)
)
def GetPendingMessages(
self,
request: admin_pb2.GetPendingMessagesRequest,
timeout: Optional[float] = None) -> admin_pb2.GetPendingMessagesResponse:
return self._RetryLoop(
lambda t: self._stub.GetPendingMessages(request, timeout=t),
timeout=timeout,
)
def GetPendingMessageCount(
self,
request: admin_pb2.GetPendingMessageCountRequest,
timeout: Optional[float] = None,
) -> admin_pb2.GetPendingMessageCountResponse:
return self._RetryLoop(
lambda t: self._stub.GetPendingMessageCount(request, timeout=t),
timeout=timeout,
)
def ListClients(self, request, timeout=None):
"""Provides basic information about Fleetspeak clients.
Args:
request: fleetspeak.admin.ListClientsRequest
timeout: How many seconds to try for.
Returns: fleetspeak.admin.ListClientsResponse
"""
return self._RetryLoop(
lambda t: self._stub.ListClients(request, timeout=t))
def FetchClientResourceUsageRecords(self, request, timeout=None):
"""Provides resource usage metrics of a single Fleetspeak client.
Args:
request: fleetspeak.admin.FetchClientResourceUsageRecordsRequest
timeout: How many seconds to try for.
Returns: fleetspeak.admin.FetchClientResourceUsageRecordsResponse
"""
return self._RetryLoop(
lambda t: self._stub.FetchClientResourceUsageRecords(request, timeout=t))
def Shutdown(self):
with self._shutdown_cv:
self._shutdown = True
self._shutdown_cv.notify()
self._keep_alive_thread.join()
class ServiceClient(object):
"""Bidirectional connection to Fleetspeak.
This abstract class can be used to represent a bidirectional connection with
fleetspeak. Users of this library are encourage to select (or provide) an
implementation of this according to their grpc connection requirements.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(
self,
service_name,):
"""Abstract constructor for ServiceClient.
Args:
service_name: string; The Fleetspeak service name to communicate with.
"""
@abc.abstractmethod
def Send(self, message):
"""Sends a message to the Fleetspeak server."""
@abc.abstractmethod
def Listen(self, process_callback):
"""Listens to messages from the Fleetspeak server.
Args:
process_callback: A callback to be executed when a messages arrives from
the Fleetspeak server. See the process argument of Servicer.__init__.
"""
class InsecureGRPCServiceClient(ServiceClient):
"""An insecure bidirectional connection to Fleetspeak.
This class implements ServiceClient by creating insecure grpc connections. It
is meant primarily for integration testing.
Attributes:
outgoing: The underlying OutgoingConnection object. Present when configured
for writing.
"""
def __init__(self,
service_name,
fleetspeak_message_listen_address=None,
fleetspeak_server=None,
threadpool_size=5):
"""Constructor.
Args:
service_name: string The name of the service to communicate as.
fleetspeak_message_listen_address: string
The connection's read end address. If unset, the argv flag
fleetspeak_message_listen_address will be used. If still unset, the
connection will not be open for reading and Listen() will raise
NotConfigured.
fleetspeak_server: string
The connection's write end address. If unset, the argv flag
fleetspeak_server will be used. If still unset, the connection will
not be open for writing and Send() will raise NotConfigured.
threadpool_size: int
The number of threads to use to process messages.
Raises:
NotConfigured:
If both fleetspeak_message_listen_address and fleetspeak_server are
unset.
"""
super(InsecureGRPCServiceClient, self).__init__(service_name)
if fleetspeak_message_listen_address is None:
fleetspeak_message_listen_address = (
FLAGS.fleetspeak_message_listen_address or None)
if fleetspeak_server is None:
fleetspeak_server = FLAGS.fleetspeak_server or None
if fleetspeak_message_listen_address is None and fleetspeak_server is None:
raise NotConfigured(
"At least one of the arguments (fleetspeak_message_listen_address, "
"fleetspeak_server) has to be provided.")
self._service_name = service_name
self._listen_address = fleetspeak_message_listen_address
self._threadpool_size = threadpool_size
if fleetspeak_server is None:
logging.info(
"fleetspeak_server is unset, not creating outbound connection to "
"fleetspeak.")
self.outgoing = None
else:
channel = grpc.insecure_channel(fleetspeak_server)
self.outgoing = OutgoingConnection(channel, service_name)
logging.info("Fleetspeak GRPCService client connected to %s",
fleetspeak_server)
def Send(self, message):
"""Send one message.
Deprecated, users should migrate to call self.outgoing.InsertMessage
directly.
"""
if not self.outgoing:
raise NotConfigured("Send address not provided.")
self.outgoing.InsertMessage(message)
def Listen(self, process):
if self._listen_address is None:
raise NotConfigured("Listen address not provided.")
self._server = grpc.server(
futures.ThreadPoolExecutor(max_workers=self._threadpool_size))
self._server.add_insecure_port(self._listen_address)
servicer = Servicer(process, self._service_name)
grpcservice_pb2_grpc.add_ProcessorServicer_to_server(servicer, self._server)
self._server.start()
logging.info("Fleetspeak GRPCService client listening on %s",
self._listen_address)
| |
# Copyright 2015-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import copy
from datetime import datetime, timedelta
import json
import itertools
import ipaddress
import logging
import os
import random
import re
import sys
import threading
import time
from urllib import parse as urlparse
from urllib.request import getproxies
from c7n import config
from c7n.exceptions import ClientError, PolicyValidationError
# Try to play nice in a serverless environment, where we don't require yaml
try:
import yaml
except ImportError: # pragma: no cover
SafeLoader = BaseSafeDumper = yaml = None
else:
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as BaseSafeDumper
except ImportError: # pragma: no cover
from yaml import SafeLoader, SafeDumper as BaseSafeDumper
class SafeDumper(BaseSafeDumper or object):
def ignore_aliases(self, data):
return True
log = logging.getLogger('custodian.utils')
class VarsSubstitutionError(Exception):
pass
def load_file(path, format=None, vars=None):
if format is None:
format = 'yaml'
_, ext = os.path.splitext(path)
if ext[1:] == 'json':
format = 'json'
with open(path) as fh:
contents = fh.read()
if vars:
try:
contents = contents.format(**vars)
except IndexError:
msg = 'Failed to substitute variable by positional argument.'
raise VarsSubstitutionError(msg)
except KeyError as e:
msg = 'Failed to substitute variables. KeyError on {}'.format(str(e))
raise VarsSubstitutionError(msg)
if format == 'yaml':
return yaml_load(contents)
elif format == 'json':
return loads(contents)
def yaml_load(value):
if yaml is None:
raise RuntimeError("Yaml not available")
return yaml.load(value, Loader=SafeLoader)
def yaml_dump(value):
if yaml is None:
raise RuntimeError("Yaml not available")
return yaml.dump(value, default_flow_style=False, Dumper=SafeDumper)
def loads(body):
return json.loads(body)
def dumps(data, fh=None, indent=0):
if fh:
return json.dump(data, fh, cls=DateTimeEncoder, indent=indent)
else:
return json.dumps(data, cls=DateTimeEncoder, indent=indent)
def format_event(evt):
return json.dumps(evt, indent=2)
def filter_empty(d):
for k, v in list(d.items()):
if not v:
del d[k]
return d
def type_schema(
type_name, inherits=None, rinherit=None,
aliases=None, required=None, **props):
"""jsonschema generation helper
params:
- type_name: name of the type
- inherits: list of document fragments that are required via anyOf[$ref]
- rinherit: use another schema as a base for this, basically work around
inherits issues with additionalProperties and type enums.
- aliases: additional names this type maybe called
- required: list of required properties, by default 'type' is required
- props: additional key value properties
"""
if aliases:
type_names = [type_name]
type_names.extend(aliases)
else:
type_names = [type_name]
if rinherit:
s = copy.deepcopy(rinherit)
s['properties']['type'] = {'enum': type_names}
else:
s = {
'type': 'object',
'properties': {
'type': {'enum': type_names}}}
# Ref based inheritance and additional properties don't mix well.
# https://stackoverflow.com/questions/22689900/json-schema-allof-with-additionalproperties
if not inherits:
s['additionalProperties'] = False
s['properties'].update(props)
if not required:
required = []
if isinstance(required, list):
required.append('type')
s['required'] = required
if inherits:
extended = s
s = {'allOf': [{'$ref': i} for i in inherits]}
s['allOf'].append(extended)
return s
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def group_by(resources, key):
"""Return a mapping of key value to resources with the corresponding value.
Key may be specified as dotted form for nested dictionary lookup
"""
resource_map = {}
parts = key.split('.')
for r in resources:
v = r
for k in parts:
v = v.get(k)
if not isinstance(v, dict):
break
resource_map.setdefault(v, []).append(r)
return resource_map
def chunks(iterable, size=50):
"""Break an iterable into lists of size"""
batch = []
for n in iterable:
batch.append(n)
if len(batch) % size == 0:
yield batch
batch = []
if batch:
yield batch
def camelResource(obj):
"""Some sources from apis return lowerCased where as describe calls
always return TitleCase, this function turns the former to the later
"""
if not isinstance(obj, dict):
return obj
for k in list(obj.keys()):
v = obj.pop(k)
obj["%s%s" % (k[0].upper(), k[1:])] = v
if isinstance(v, dict):
camelResource(v)
elif isinstance(v, list):
list(map(camelResource, v))
return obj
def get_account_id_from_sts(session):
response = session.client('sts').get_caller_identity()
return response.get('Account')
def get_account_alias_from_sts(session):
response = session.client('iam').list_account_aliases()
aliases = response.get('AccountAliases', ())
return aliases and aliases[0] or ''
def query_instances(session, client=None, **query):
"""Return a list of ec2 instances for the query.
"""
if client is None:
client = session.client('ec2')
p = client.get_paginator('describe_instances')
results = p.paginate(**query)
return list(itertools.chain(
*[r["Instances"] for r in itertools.chain(
*[pp['Reservations'] for pp in results])]))
CONN_CACHE = threading.local()
def local_session(factory):
"""Cache a session thread local for up to 45m"""
factory_region = getattr(factory, 'region', 'global')
s = getattr(CONN_CACHE, factory_region, {}).get('session')
t = getattr(CONN_CACHE, factory_region, {}).get('time')
n = time.time()
if s is not None and t + (60 * 45) > n:
return s
s = factory()
setattr(CONN_CACHE, factory_region, {'session': s, 'time': n})
return s
def reset_session_cache():
for k in [k for k in dir(CONN_CACHE) if not k.startswith('_')]:
setattr(CONN_CACHE, k, {})
def annotation(i, k):
return i.get(k, ())
def set_annotation(i, k, v):
"""
>>> x = {}
>>> set_annotation(x, 'marker', 'a')
>>> annotation(x, 'marker')
['a']
"""
if not isinstance(i, dict):
raise ValueError("Can only annotate dictionaries")
if not isinstance(v, list):
v = [v]
if k in i:
ev = i.get(k)
if isinstance(ev, list):
ev.extend(v)
else:
i[k] = v
def parse_s3(s3_path):
if not s3_path.startswith('s3://'):
raise ValueError("invalid s3 path")
ridx = s3_path.find('/', 5)
if ridx == -1:
ridx = None
bucket = s3_path[5:ridx]
s3_path = s3_path.rstrip('/')
if ridx is None:
key_prefix = ""
else:
key_prefix = s3_path[s3_path.find('/', 5):]
return s3_path, bucket, key_prefix
REGION_PARTITION_MAP = {
'us-gov-east-1': 'aws-us-gov',
'us-gov-west-1': 'aws-us-gov',
'cn-north-1': 'aws-cn',
'cn-northwest-1': 'aws-cn',
'us-isob-east-1': 'aws-iso-b',
'us-iso-east-1': 'aws-iso'
}
def get_partition(region):
return REGION_PARTITION_MAP.get(region, 'aws')
def generate_arn(
service, resource, partition='aws',
region=None, account_id=None, resource_type=None, separator='/'):
"""Generate an Amazon Resource Name.
See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html.
"""
if region and region in REGION_PARTITION_MAP:
partition = REGION_PARTITION_MAP[region]
if service == 's3':
region = ''
arn = 'arn:%s:%s:%s:%s:' % (
partition, service, region if region else '', account_id if account_id else '')
if resource_type:
if resource.startswith(separator):
separator = ''
arn = arn + '%s%s%s' % (resource_type, separator, resource)
else:
arn = arn + resource
return arn
def snapshot_identifier(prefix, db_identifier):
"""Return an identifier for a snapshot of a database or cluster.
"""
now = datetime.now()
return '%s-%s-%s' % (prefix, db_identifier, now.strftime('%Y-%m-%d-%H-%M'))
retry_log = logging.getLogger('c7n.retry')
def get_retry(retry_codes=(), max_attempts=8, min_delay=1, log_retries=False):
"""Decorator for retry boto3 api call on transient errors.
https://www.awsarchitectureblog.com/2015/03/backoff.html
https://en.wikipedia.org/wiki/Exponential_backoff
:param codes: A sequence of retryable error codes.
:param max_attempts: The max number of retries, by default the delay
time is proportional to the max number of attempts.
:param log_retries: Whether we should log retries, if specified
specifies the level at which the retry should be logged.
:param _max_delay: The maximum delay for any retry interval *note*
this parameter is only exposed for unit testing, as its
derived from the number of attempts.
Returns a function for invoking aws client calls that
retries on retryable error codes.
"""
max_delay = max(min_delay, 2) ** max_attempts
def _retry(func, *args, ignore_err_codes=(), **kw):
for idx, delay in enumerate(
backoff_delays(min_delay, max_delay, jitter=True)):
try:
return func(*args, **kw)
except ClientError as e:
if e.response['Error']['Code'] in ignore_err_codes:
return
elif e.response['Error']['Code'] not in retry_codes:
raise
elif idx == max_attempts - 1:
raise
if log_retries:
retry_log.log(
log_retries,
"retrying %s on error:%s attempt:%d last delay:%0.2f",
func, e.response['Error']['Code'], idx, delay)
time.sleep(delay)
return _retry
def backoff_delays(start, stop, factor=2.0, jitter=False):
"""Geometric backoff sequence w/ jitter
"""
cur = start
while cur <= stop:
if jitter:
yield cur - (cur * random.random())
else:
yield cur
cur = cur * factor
def parse_cidr(value):
"""Process cidr ranges."""
klass = IPv4Network
if '/' not in value:
klass = ipaddress.ip_address
try:
v = klass(str(value))
except (ipaddress.AddressValueError, ValueError):
v = None
return v
class IPv4Network(ipaddress.IPv4Network):
# Override for net 2 net containment comparison
def __contains__(self, other):
if other is None:
return False
if isinstance(other, ipaddress._BaseNetwork):
return self.supernet_of(other)
return super(IPv4Network, self).__contains__(other)
if (sys.version_info.major == 3 and sys.version_info.minor <= 6): # pragma: no cover
@staticmethod
def _is_subnet_of(a, b):
try:
# Always false if one is v4 and the other is v6.
if a._version != b._version:
raise TypeError(f"{a} and {b} are not of the same version")
return (b.network_address <= a.network_address and
b.broadcast_address >= a.broadcast_address)
except AttributeError:
raise TypeError(f"Unable to test subnet containment "
f"between {a} and {b}")
def supernet_of(self, other):
"""Return True if this network is a supernet of other."""
return self._is_subnet_of(other, self)
def reformat_schema(model):
""" Reformat schema to be in a more displayable format. """
if not hasattr(model, 'schema'):
return "Model '{}' does not have a schema".format(model)
if 'properties' not in model.schema:
return "Schema in unexpected format."
ret = copy.deepcopy(model.schema['properties'])
if 'type' in ret:
del(ret['type'])
for key in model.schema.get('required', []):
if key in ret:
ret[key]['required'] = True
return ret
# from botocore.utils avoiding runtime dependency for botocore for other providers.
# license apache 2.0
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise ValueError(expression)
if remainder:
if current_key not in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key
# with an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
def format_string_values(obj, err_fallback=(IndexError, KeyError), *args, **kwargs):
"""
Format all string values in an object.
Return the updated object
"""
if isinstance(obj, dict):
new = {}
for key in obj.keys():
new[key] = format_string_values(obj[key], *args, **kwargs)
return new
elif isinstance(obj, list):
new = []
for item in obj:
new.append(format_string_values(item, *args, **kwargs))
return new
elif isinstance(obj, str):
try:
return obj.format(*args, **kwargs)
except err_fallback:
return obj
else:
return obj
def parse_url_config(url):
if url and '://' not in url:
url += "://"
conf = config.Bag()
parsed = urlparse.urlparse(url)
for k in ('scheme', 'netloc', 'path'):
conf[k] = getattr(parsed, k)
for k, v in urlparse.parse_qs(parsed.query).items():
conf[k] = v[0]
conf['url'] = url
return conf
def get_proxy_url(url):
proxies = getproxies()
url_parts = parse_url_config(url)
proxy_keys = [
url_parts['scheme'] + '://' + url_parts['netloc'],
url_parts['scheme'],
'all://' + url_parts['netloc'],
'all'
]
for key in proxy_keys:
if key in proxies:
return proxies[key]
return None
class FormatDate:
"""a datetime wrapper with extended pyformat syntax"""
date_increment = re.compile(r'\+[0-9]+[Mdh]')
def __init__(self, d=None):
self._d = d
@property
def datetime(self):
return self._d
@classmethod
def utcnow(cls):
return cls(datetime.utcnow())
def __getattr__(self, k):
return getattr(self._d, k)
def __format__(self, fmt=None):
d = self._d
increments = self.date_increment.findall(fmt)
for i in increments:
p = {}
if i[-1] == 'M':
p['minutes'] = float(i[1:-1])
if i[-1] == 'h':
p['hours'] = float(i[1:-1])
if i[-1] == 'd':
p['days'] = float(i[1:-1])
d = d + timedelta(**p)
if increments:
fmt = self.date_increment.sub("", fmt)
return d.__format__(fmt)
class QueryParser:
QuerySchema = {}
type_name = ''
multi_value = True
value_key = 'Values'
@classmethod
def parse(cls, data):
filters = []
if not isinstance(data, (tuple, list)):
raise PolicyValidationError(
"%s Query invalid format, must be array of dicts %s" % (
cls.type_name,
data))
for d in data:
if not isinstance(d, dict):
raise PolicyValidationError(
"%s Query Filter Invalid %s" % (cls.type_name, data))
if "Name" not in d or cls.value_key not in d:
raise PolicyValidationError(
"%s Query Filter Invalid: Missing Key or Values in %s" % (
cls.type_name, data))
key = d['Name']
values = d[cls.value_key]
if not cls.multi_value and isinstance(values, list):
raise PolicyValidationError(
"%s Query Filter Invalid Key: Value:%s Must be single valued" % (
cls.type_name, key))
elif not cls.multi_value:
values = [values]
if key not in cls.QuerySchema and not key.startswith('tag:'):
raise PolicyValidationError(
"%s Query Filter Invalid Key:%s Valid: %s" % (
cls.type_name, key, ", ".join(cls.QuerySchema.keys())))
vtype = cls.QuerySchema.get(key)
if vtype is None and key.startswith('tag'):
vtype = str
if not isinstance(values, list):
raise PolicyValidationError(
"%s Query Filter Invalid Values, must be array %s" % (
cls.type_name, data,))
for v in values:
if isinstance(vtype, tuple):
if v not in vtype:
raise PolicyValidationError(
"%s Query Filter Invalid Value: %s Valid: %s" % (
cls.type_name, v, ", ".join(vtype)))
elif not isinstance(v, vtype):
raise PolicyValidationError(
"%s Query Filter Invalid Value Type %s" % (
cls.type_name, data,))
filters.append(d)
return filters
def get_annotation_prefix(s):
return 'c7n:{}'.format(s)
def merge_dict_list(dict_iter):
"""take an list of dictionaries and merge them.
last dict wins/overwrites on keys.
"""
result = {}
for d in dict_iter:
result.update(d)
return result
def merge_dict(a, b):
"""Perform a merge of dictionaries a and b
Any subdictionaries will be recursively merged.
Any leaf elements in the form of a list or scalar will use the value from a
"""
d = {}
for k, v in a.items():
if k not in b:
d[k] = v
elif isinstance(v, dict) and isinstance(b[k], dict):
d[k] = merge_dict(v, b[k])
for k, v in b.items():
if k not in d:
d[k] = v
return d
def select_keys(d, keys):
result = {}
for k in keys:
result[k] = d.get(k)
return result
| |
import aeropy
import math
import numpy as np
from scipy import optimize
class shell():
def __init__(self, geometry_parent, geometry_child, properties,
bc, chord=1, ndim=2):
"""
COnstant length is assumed for the structure
A: metric tensor
dA: metric tensor covariant derivative as a function of theta
a: curvilinear basis vectors
chord: length of the beam"""
# Defining geometries
self.g_p = geometry_parent
self.g_c = geometry_child
# shell thickness
self.h = properties.dimensions[1]
self.width = properties.dimensions[0]
self.ndim = 2
self.bc = bc
self.properties = properties
self.arc_length = self.g_p.arclength()[0]
def calculate_chord(self, length_target = None, bounds = None):
def f(c_c):
length_current, err = self.g_c.arclength(c_c)
return abs(length_target - length_current)
if length_target is None:
length_target= self.arc_length
if bounds is None:
self.g_c.chord = optimize.minimize(f, self.g_c.chord).x[0]
else:
self.g_c.chord = optimize.minimize(f, self.g_c.chord,
method='L-BFGS-B',
bounds = bounds).x[0]
# In case the calculated chord is really close to the original
if abs(self.g_p.chord - self.g_c.chord) < 1e-7:
self.g_c.chord = self.g_p.chord
def calculate_strains(self):
self.gamma = 0.5*(self.g_c.A[:2, :2, :] - self.g_p.A[:2, :2, :])
def calculate_change_curvature(self):
self.rho = -(self.g_c.B - self.g_p.B)
# print('parent', self.g_p.B)
# print('child', self.g_c.B)
self.rho[1,1,:] = -self.properties.poisson*self.rho[0,0,:]
def CauchyGreen(self):
"""From the definition of Hookian Thin homogeneous isentropic shell
(Eq. 9.98a) from Wempner's book:
- the definition uses contravariant basis vectors, but this whole
class uses covariant basis vectors. because of that all values
are inverted (coordinate system assumed orthogonal)"""
self.C = np.zeros([2,2,2,2,len(self.g_c.x1_grid)])
c0 = self.properties.young/2/(1+self.properties.poisson)
A = np.linalg.inv(self.g_c.A.T).T
for alpha in range(2):
for beta in range(2):
for gamma in range(2):
for eta in range(2):
a1 = A[alpha, gamma]*A[beta, eta]
a2 = A[alpha, eta]*A[beta, gamma]
a3 = A[alpha, beta]*A[gamma, eta]
c3 = (2*self.properties.poisson)/(1-self.properties.poisson)
self.C[alpha, beta, gamma, eta, :] = c0*(a1 + a2 + c3*a3)
# self.C[alpha, beta, gamma, eta, :] = self.properties.young
def free_energy(self):
self.phi_M = (self.h/2)*np.einsum('ijklm,ijm,klm->m',self.C,self.gamma,self.gamma)
self.phi_B = (self.h**3/24)*np.einsum('ijklm,ijm,klm->m',self.C,self.rho,self.rho)
# self.phi_B = (self.h**3/24)*self.properties.young*(self.g_c.r(self.g_c.x1_grid, 'x11')[:,2])**2
# # print all terms of phi
# c0 = self.properties.young/(1+self.properties.poisson)/(1-self.properties.poisson)
# A = np.linalg.inv(self.g_c.A.T).T
# print('a00', A[0,0]*A[0,0])
# for alpha in range(2):
# for beta in range(2):
# for gamma in range(2):
# for eta in range(2):
# print(alpha+1, beta+1, gamma+1, eta+1)
# print(self.C[alpha, beta, gamma, eta, :]*self.rho[alpha, beta]*self.rho[gamma, eta])
# print(np.einsum('ijklm,ijm,klm->m',self.C,self.rho,self.rho))
self.phi = self.phi_B + self.phi_M
def strain_energy(self):
self.U = self.width*np.trapz(self.phi, self.g_c.x1_grid)
def work(self):
energy = 0
for i in range(self.bc.concentrated_n):
# determine displacement of the tip
theta1 = self.g_p.arclength(self.bc.concentrated_x[i])[0]
x1_c = np.array(self.g_c.calculate_x1([theta1], output = True))
u = self.g_c.r(x1_c) - self.g_p.r(np.array([self.bc.concentrated_x[i]]))
# determine angle a tthe tip
dydx = self.g_c.x3(theta1, diff='x1')
phi = math.atan2(-.001*dydx,0.001)
# Calculating components of load
load_normal = self.bc.concentrated_load[i][2]*math.cos(phi) + self.bc.concentrated_load[i][0]*math.sin(phi)
loads = [0,0,0]
loads[0] = -load_normal*math.sin(phi)
loads[2] = load_normal*math.cos(phi)
# Calculating energy
energy_u = loads[0] * u[0][0]
energy_w = loads[2] * u[0][2]
# energy += energy_w + energy_u
energy += self.bc.concentrated_load[i][2]*u[0][2]
self.W = energy
self.u = u
def residual(self):
self.R = self.U - self.W
def update_parent(self):
self.g_p.calculate_x1(self.theta1, bounds = self.g_p.bounds)
self.g_p.basis()
self.g_p.basis(diff = 'theta')
self.g_p.metric_tensor()
self.g_p.metric_tensor(diff = 'theta')
self.g_p.curvature_tensor()
def update_child(self, steps=False):
self.calculate_chord(length_target = self.arc_length,
bounds = self.g_c.bounds)
self.g_c.calculate_x1(self.theta1, bounds = self.g_c.bounds)
# self.g_c.x1_grid = self.g_p.x1_grid
self.g_c.basis()
self.g_c.basis(diff = 'theta')
self.g_c.metric_tensor()
self.g_c.metric_tensor(diff = 'theta')
self.calculate_strains()
# Calculate energy
self.g_c.curvature_tensor()
self.calculate_change_curvature()
self.CauchyGreen()
self.free_energy()
self.strain_energy()
self.work()
self.residual()
def minimum_potential(self, x0=[0,0], input_function = None,
bounds = np.array([[-0.01,0.01], [-0.01,0.01]])):
def to_optimize(n_x):
x = (bounds[:,1] - bounds[:,0])*n_x + bounds[:,0]
self.g_c.D = input_function(x)
self.update_child()
print(x, self.U) #, self.W, self.U)
return self.R
if input_function is None:
input_function = lambda x:x
# With bounds
n_bounds = np.array(len(bounds)*[[0,1],])
n_x0 = (x0 - bounds[:,0])/(bounds[:,1] - bounds[:,0])
res = optimize.minimize(to_optimize, n_x0 ) #, options = {'eps':1e-7, 'ftol':1e-7})
x = (bounds[:,1] - bounds[:,0])*res.x + bounds[:,0]
self.g_c.D = input_function(x)
self.R = res.fun
self.update_child()
# theta1 = self.g_p.arclength(self.bc.concentrated_x[0])[0]
# x1_c = np.array(self.g_c.calculate_x1([theta1], output = True))
# if self.g_c.arc_length()[1] < 1e-3:
# return(x, res.fun*100)
# else:
return(x, res.fun)
def stepped_loading(self, x0=[0,0], input_function = None,
bounds = np.array([[-0.01,0.01], [-0.01,0.01]]),
N = 2):
load = self.bc.concentrated_load[0][2]
self.u0 = [[0,0,0],]
self.W0 = 0
self.load0 = [0, 0, 0]
coefficients = np.zeros([N,len(x0)])
coefficients[0,:] = x0
results = np.zeros([N,2])
arc_lengths = np.zeros([N,2])
arc_lengths[0,1] = self.g_p.arclength(1)[0]
loads = np.linspace(0,load,N)
print('loads', loads)
for i in range(1,N):
load_i = loads[i]
# print('load', i, load_i, self.u0, self.W0, self.load0)
self.bc.concentrated_load[0][2] = load_i
# print(load)
# print(self.bc.concentrated_load)
xi, residual = self.minimum_potential(x0 = x0,
input_function = input_function,
bounds = bounds)
coefficients[i,:] = xi
results[i,0] = load_i
results[i,1] = self.u[0][2]
arc_lengths[i,0] = load_i
theta1 = self.g_p.arclength(self.bc.concentrated_x[0])[0]
x1_c = np.array(self.g_c.calculate_x1([theta1], output = True))
arc_lengths[i,1] = self.g_c.arclength(x1_c)[0]
x0 = xi
print(results)
print('x0', x0)
return [coefficients, results, arc_lengths]
class design_exploration():
def __init__(self, component):
pass
def sweep_geometries(self, geom_variables, input_function, reorder=None,
loading_condition = 'plane_stress'):
energy_list = []
residual_list = []
n = len(geom_variables)
for i in range(n):
print(i)
input = geom_variables[i]
residual_list.append(self.residual(input, input_type = 'Geometry',
input_function = input_function,
loading_condition = loading_condition))
energy_list.append(self.strain_energy())
if reorder is not None:
residual_list = np.resize(residual_list, reorder)
energy_list = np.resize(energy_list, reorder)
return(energy_list, residual_list)
| |
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.provision.test.test_install -*-
"""
Install flocker on a remote node.
"""
import posixpath
from textwrap import dedent
from urlparse import urljoin, urlparse
from effect import Func, Effect
import yaml
from zope.interface import implementer
from characteristic import attributes
from pyrsistent import PRecord, field
from ._libcloud import INode
from ._common import PackageSource, Variants
from ._ssh import (
run, run_from_args, Run,
sudo_from_args, Sudo,
put,
run_remotely
)
from ._effect import sequence
from flocker import __version__ as version
from flocker.cli import configure_ssh
from flocker.common.version import (
get_installable_version, get_package_key_suffix, is_release,
)
# A systemctl sub-command to start or restart a service. We use restart here
# so that if it is already running it gets restart (possibly necessary to
# respect updated configuration) and because restart will also start it if it
# is not running.
START = "restart"
ZFS_REPO = {
'centos-7': "https://s3.amazonaws.com/archive.zfsonlinux.org/"
"epel/zfs-release.el7.noarch.rpm",
}
ARCHIVE_BUCKET = 'clusterhq-archive'
def is_centos(distribution):
"""
Determine whether the named distribution is a version of CentOS.
:param bytes distribution: The name of the distribution to inspect.
:return: ``True`` if the distribution named is a version of CentOS,
``False`` otherwise.
"""
return distribution.startswith("centos-")
def is_ubuntu(distribution):
"""
Determine whether the named distribution is a version of Ubuntu.
:param bytes distribution: The name of the distribution to inspect.
:return: ``True`` if the distribution named is a version of Ubuntu,
``False`` otherwise.
"""
return distribution.startswith("ubuntu-")
def get_repository_url(distribution, flocker_version):
"""
Return the URL for the repository of a given distribution.
For ``yum``-using distributions this gives the URL to a package that adds
entries to ``/etc/yum.repos.d``. For ``apt``-using distributions, this
gives the URL for a repo containing a Packages(.gz) file.
:param bytes distribution: The Linux distribution to get a repository for.
:param bytes flocker_version: The version of Flocker to get a repository
for.
:return bytes: The URL pointing to a repository of packages.
:raises: ``UnsupportedDistribution`` if the distribution is unsupported.
"""
distribution_to_url = {
# TODO instead of hardcoding keys, use the _to_Distribution map
# and then choose the name
'centos-7': "https://{archive_bucket}.s3.amazonaws.com/"
"{key}/clusterhq-release$(rpm -E %dist).noarch.rpm".format(
archive_bucket=ARCHIVE_BUCKET,
key='centos',
),
# This could hardcode the version number instead of using
# ``lsb_release`` but that allows instructions to be shared between
# versions, and for earlier error reporting if you try to install on a
# separate version. The $(ARCH) part must be left unevaluated, hence
# the backslash escapes (one to make shell ignore the $ as a
# substitution marker, and then doubled to make Python ignore the \ as
# an escape marker). The output of this value then goes into
# /etc/apt/sources.list which does its own substitution on $(ARCH)
# during a subsequent apt-get update
'ubuntu-14.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
'ubuntu-15.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
}
try:
return distribution_to_url[distribution]
except KeyError:
raise UnsupportedDistribution()
def get_repo_options(flocker_version):
"""
Get a list of options for enabling necessary yum repositories.
:param bytes flocker_version: The version of Flocker to get options for.
:return: List of bytes for enabling (or not) a testing repository.
"""
is_dev = not is_release(flocker_version)
if is_dev:
return ['--enablerepo=clusterhq-testing']
else:
return []
class UnsupportedDistribution(Exception):
"""
Raised if trying to support a distribution which is not supported.
"""
@attributes(['distribution'])
class DistributionNotSupported(NotImplementedError):
"""
Raised when the provisioning step is not supported on the given
distribution.
:ivar bytes distribution: The distribution that isn't supported.
"""
def __str__(self):
return "Distribution not supported: %s" % (self.distribution,)
@implementer(INode)
class ManagedNode(PRecord):
"""
A node managed by some other system (eg by hand or by another piece of
orchestration software).
"""
address = field(type=bytes, mandatory=True)
private_address = field(type=(bytes, type(None)),
initial=None, mandatory=True)
distribution = field(type=bytes, mandatory=True)
def ensure_minimal_setup(package_manager):
"""
Get any system into a reasonable state for installation.
Although we could publish these commands in the docs, they add a lot
of noise for many users. Ensure that systems have sudo enabled.
:param bytes package_manager: The package manager (apt, dnf, yum).
:return: a sequence of commands to run on the distribution
"""
if package_manager in ('dnf', 'yum'):
# Fedora/CentOS sometimes configured to require tty for sudo
# ("sorry, you must have a tty to run sudo"). Disable that to
# allow automated tests to run.
return sequence([
run_from_args([
'su', 'root', '-c', [package_manager, '-y', 'install', 'sudo']
]),
run_from_args([
'su', 'root', '-c', [
'sed', '--in-place', '-e',
's/Defaults.*requiretty/Defaults !requiretty/',
'/etc/sudoers'
]]),
])
elif package_manager == 'apt':
return sequence([
run_from_args(['su', 'root', '-c', ['apt-get', 'update']]),
run_from_args([
'su', 'root', '-c', ['apt-get', '-y', 'install', 'sudo']
]),
])
else:
raise UnsupportedDistribution()
def task_cli_pkg_test():
"""
Check that the CLI is working.
"""
return run_from_args(['flocker-deploy', '--version'])
def install_commands_yum(package_name, distribution, package_source,
base_url):
"""
Install Flocker package on CentOS.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param str package_name: The name of the package to install.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:param base_url: URL of repository, or ``None`` if we're not using
development branch.
:return: a sequence of commands to run on the distribution
"""
commands = [
# May have been previously installed by previous install run, so do
# update instead of install:
run(command="yum update -y " + get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version))),
]
if base_url is not None:
repo = dedent(b"""\
[clusterhq-build]
name=clusterhq-build
baseurl=%s
gpgcheck=0
enabled=0
""") % (base_url,)
commands.append(put(content=repo,
path='/tmp/clusterhq-build.repo'))
commands.append(run_from_args([
'cp', '/tmp/clusterhq-build.repo',
'/etc/yum.repos.d/clusterhq-build.repo']))
repo_options = ['--enablerepo=clusterhq-build']
else:
repo_options = get_repo_options(
flocker_version=get_installable_version(version))
if package_source.os_version:
package_name += '-%s' % (package_source.os_version,)
# Install package and all dependencies:
commands.append(run_from_args(
["yum", "install"] + repo_options + ["-y", package_name]))
return sequence(commands)
def install_commands_ubuntu(package_name, distribution, package_source,
base_url):
"""
Install Flocker package on Ubuntu.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:param base_url: URL of repository, or ``None`` if we're not using
development branch.
:return: a sequence of commands to run on the distribution
"""
commands = [
# Minimal images often have cleared apt caches and are missing
# packages that are common in a typical release. These commands
# ensure that we start from a good base system with the required
# capabilities, particularly that the add-apt-repository command
# is available, and HTTPS URLs are supported.
run_from_args(["apt-get", "update"]),
run_from_args([
"apt-get", "-y", "install", "apt-transport-https",
"software-properties-common"]),
# Add ClusterHQ repo for installation of Flocker packages.
run(command='add-apt-repository -y "deb {} /"'.format(
get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version))))
]
if base_url is not None:
# Add BuildBot repo for running tests
commands.append(run_from_args([
"add-apt-repository", "-y", "deb {} /".format(base_url)]))
# During a release, the ClusterHQ repo may contain packages with
# a higher version number than the Buildbot repo for a branch.
# Use a pin file to ensure that any Buildbot repo has higher
# priority than the ClusterHQ repo.
buildbot_host = urlparse(package_source.build_server).hostname
commands.append(put(dedent('''\
Package: *
Pin: origin {}
Pin-Priority: 900
'''.format(buildbot_host)), '/tmp/apt-pref'))
commands.append(run_from_args([
'mv', '/tmp/apt-pref', '/etc/apt/preferences.d/buildbot-900']))
# Update to read package info from new repos
commands.append(run_from_args(["apt-get", "update"]))
if package_source.os_version:
package_name += '=%s' % (package_source.os_version,)
# Install package and all dependencies
commands.append(run_from_args([
'apt-get', '-y', '--force-yes', 'install', package_name]))
return sequence(commands)
def task_package_install(package_name, distribution,
package_source=PackageSource()):
"""
Install Flocker package on a distribution.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param str package_name: The name of the package to install.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
base_url = None
if is_centos(distribution):
installer = install_commands_yum
elif is_ubuntu(distribution):
installer = install_commands_ubuntu
else:
raise UnsupportedDistribution()
return installer(package_name, distribution, package_source,
base_url)
def task_cli_pkg_install(distribution, package_source=PackageSource()):
"""
Install the Flocker CLI package.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
commands = task_package_install("clusterhq-flocker-cli", distribution,
package_source)
# Although client testing is currently done as root.e want to use
# sudo for better documentation output.
return sequence([
(Effect(Sudo(command=e.intent.command,
log_command_filter=e.intent.log_command_filter))
if isinstance(e.intent, Run) else e)
for e in commands.intent.effects])
PIP_CLI_PREREQ_APT = [
'gcc',
'libffi-dev',
'libssl-dev',
'python2.7',
'python2.7-dev',
'python-virtualenv',
]
PIP_CLI_PREREQ_YUM = [
'gcc',
'libffi-devel',
'openssl-devel',
'python',
'python-devel',
'python-virtualenv',
]
def task_cli_pip_prereqs(package_manager):
"""
Install the pre-requisites for pip installation of the Flocker client.
:param bytes package_manager: The package manager (apt, dnf, yum).
:return: an Effect to install the pre-requisites.
"""
if package_manager in ('dnf', 'yum'):
return sudo_from_args(
[package_manager, '-y', 'install'] + PIP_CLI_PREREQ_YUM
)
elif package_manager == 'apt':
return sequence([
sudo_from_args(['apt-get', 'update']),
sudo_from_args(['apt-get', '-y', 'install'] + PIP_CLI_PREREQ_APT),
])
else:
raise UnsupportedDistribution()
def task_cli_pip_install(
venv_name='flocker-client', package_source=PackageSource()):
"""
Install the Flocker client into a virtualenv using pip.
:param bytes venv_name: Name for the virtualenv.
:param package_source: Package source description
:return: an Effect to install the client.
"""
vers = package_source.version
if vers is None:
vers = version
url = (
'https://{bucket}.s3.amazonaws.com/{key}/'
'Flocker-{version}-py2-none-any.whl'.format(
bucket=ARCHIVE_BUCKET, key='python',
version=get_installable_version(vers))
)
return sequence([
run_from_args(
['virtualenv', '--python=/usr/bin/python2.7', venv_name]),
run_from_args(['source', '{}/bin/activate'.format(venv_name)]),
run_from_args(['pip', 'install', '--upgrade', 'pip']),
run_from_args(
['pip', 'install', url]),
])
def task_cli_pip_test(venv_name='flocker-client'):
"""
Test the Flocker client installed in a virtualenv.
:param bytes venv_name: Name for the virtualenv.
:return: an Effect to test the client.
"""
return sequence([
run_from_args(['source', '{}/bin/activate'.format(venv_name)]),
run_from_args(
['flocker-deploy', '--version']),
])
def task_configure_brew_path():
"""
Configure non-interactive shell to use all paths.
By default, OSX provides a minimal $PATH, for programs run via SSH. In
particular /usr/local/bin (which contains `brew`) isn't in the path. This
configures the path to have it there.
"""
return put(
path='.bashrc',
content=dedent("""\
if [ -x /usr/libexec/path_helper ]; then
eval `/usr/libexec/path_helper -s`
fi
"""))
def task_test_homebrew(recipe):
"""
The commands used to install a Homebrew recipe for Flocker and test it.
This taps the ClusterHQ/tap tap, which means that Homebrew looks in the
ClusterHQ/homebrew-tap GitHub repository for any recipe name given.
:param bytes recipe: The name of a recipe in a either the official Homebrew
tap or ClusterHQ/tap, or a URL pointing to a recipe.
:return Effect: Commands used to install a Homebrew recipe for Flocker and
test it.
"""
return sequence([
run_from_args(['brew', 'tap', 'ClusterHQ/tap']),
run("brew update"),
run("brew install {recipe}".format(recipe=recipe)),
run("brew test {recipe}".format(recipe=recipe)),
])
def task_install_ssh_key():
"""
Install the authorized ssh keys of the current user for root as well.
"""
return sequence([
sudo_from_args(['cp', '.ssh/authorized_keys',
'/root/.ssh/authorized_keys']),
])
def task_upgrade_kernel(distribution):
"""
Upgrade kernel.
"""
if is_centos(distribution):
return sequence([
run_from_args([
"yum", "install", "-y", "kernel-devel", "kernel"]),
run_from_args(['sync']),
])
elif distribution == 'ubuntu-14.04':
# Not required.
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def _remove_private_key(content):
"""
Remove most of the contents of a private key file for logging.
"""
prefix = '-----BEGIN PRIVATE KEY-----'
suffix = '-----END PRIVATE KEY-----'
start = content.find(prefix)
if start < 0:
# no private key
return content
# Keep prefix, subsequent newline, and 4 characters at start of key
trim_start = start + len(prefix) + 5
end = content.find(suffix, trim_start)
if end < 0:
end = len(content)
# Keep suffix and previous 4 characters and newline at end of key
trim_end = end - 5
if trim_end <= trim_start:
# strangely short key, keep all content
return content
return content[:trim_start] + '...REMOVED...' + content[trim_end:]
def task_install_control_certificates(ca_cert, control_cert, control_key):
"""
Install certificates and private key required by the control service.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath control_cert: Path to control service certificate on
local machine.
:param FilePath control_key: Path to control service private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/control-service.crt",
content=control_cert.getContent()),
put(path="/etc/flocker/control-service.key",
content=control_key.getContent(),
log_content_filter=_remove_private_key),
])
def task_install_node_certificates(ca_cert, node_cert, node_key):
"""
Install certificates and private key required by a node.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath node_cert: Path to node certificate on
local machine.
:param FilePath node_key: Path to node private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/node.crt",
content=node_cert.getContent()),
put(path="/etc/flocker/node.key",
content=node_key.getContent(),
log_content_filter=_remove_private_key),
])
def task_install_api_certificates(api_cert, api_key):
"""
Install certificate and private key required by Docker plugin to
access the Flocker REST API.
:param FilePath api_cert: Path to API certificate on local machine.
:param FilePath api_key: Path to API private key local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/api.crt",
content=api_cert.getContent()),
put(path="/etc/flocker/api.key",
content=api_key.getContent(),
log_content_filter=_remove_private_key),
])
def task_enable_docker(distribution):
"""
Configure docker.
We don't actually start it (or on Ubuntu, restart it) at this point
since the certificates it relies on have yet to be installed.
"""
# Use the Flocker node TLS certificate, since it's readily
# available.
docker_tls_options = (
'--tlsverify --tlscacert=/etc/flocker/cluster.crt'
' --tlscert=/etc/flocker/node.crt --tlskey=/etc/flocker/node.key'
' -H=0.0.0.0:2376')
if is_centos(distribution):
conf_path = (
"/etc/systemd/system/docker.service.d/01-TimeoutStartSec.conf"
)
return sequence([
# Give Docker a long time to start up. On the first start, it
# initializes a 100G filesystem which can take a while. The
# default startup timeout is frequently too low to let this
# complete.
run("mkdir -p /etc/systemd/system/docker.service.d"),
put(
path=conf_path,
content=dedent(
"""\
[Service]
TimeoutStartSec=10min
"""
),
),
put(path="/etc/systemd/system/docker.service.d/02-TLS.conf",
content=dedent(
"""\
[Service]
ExecStart=
ExecStart=/usr/bin/docker daemon -H fd:// {}
""".format(docker_tls_options))),
run_from_args(["systemctl", "enable", "docker.service"]),
])
elif distribution == 'ubuntu-14.04':
return sequence([
put(path="/etc/default/docker",
content=(
'DOCKER_OPTS="-H unix:///var/run/docker.sock {}"'.format(
docker_tls_options))),
])
else:
raise DistributionNotSupported(distribution=distribution)
def open_firewalld(service):
"""
Open firewalld port for a service.
:param str service: Name of service.
"""
return sequence([run_from_args(['firewall-cmd', '--reload'])] + [
run_from_args(command + [service])
for command in [['firewall-cmd', '--permanent', '--add-service'],
['firewall-cmd', '--add-service']]])
def open_ufw(service):
"""
Open ufw port for a service.
:param str service: Name of service.
"""
return sequence([
run_from_args(['ufw', 'allow', service])
])
def task_enable_flocker_control(distribution):
"""
Enable flocker-control service.
"""
if is_centos(distribution):
return sequence([
run_from_args(['systemctl', 'enable', 'flocker-control']),
run_from_args(['systemctl', START, 'flocker-control']),
])
elif distribution == 'ubuntu-14.04':
# Since the flocker-control service is currently installed
# alongside the flocker-dataset-agent service, the default control
# service configuration does not automatically start the
# service. Here, we provide an override file to start it.
return sequence([
put(
path='/etc/init/flocker-control.override',
content=dedent('''\
start on runlevel [2345]
stop on runlevel [016]
'''),
),
run("echo 'flocker-control-api\t4523/tcp\t\t\t# Flocker Control API port' >> /etc/services"), # noqa
run("echo 'flocker-control-agent\t4524/tcp\t\t\t# Flocker Control Agent port' >> /etc/services"), # noqa
run_from_args(['service', 'flocker-control', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_enable_docker_plugin(distribution):
"""
Enable the Flocker Docker plugin.
:param bytes distribution: The distribution name.
"""
if is_centos(distribution):
return sequence([
run_from_args(['systemctl', 'enable', 'flocker-docker-plugin']),
run_from_args(['systemctl', START, 'flocker-docker-plugin']),
run_from_args(['systemctl', START, 'docker']),
])
elif distribution == 'ubuntu-14.04':
return sequence([
run_from_args(['service', 'flocker-docker-plugin', 'restart']),
run_from_args(['service', 'docker', 'restart']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_open_control_firewall(distribution):
"""
Open the firewall for flocker-control.
"""
if is_centos(distribution):
upload = put(path="/usr/lib/firewalld/services/docker.xml",
content=dedent(
"""\
<?xml version="1.0" encoding="utf-8"?>
<service>
<short>Docker API Port</short>
<description>The Docker API, over TLS.</description>
<port protocol="tcp" port="2376"/>
</service>
"""))
open_firewall = open_firewalld
elif distribution == 'ubuntu-14.04':
upload = put(path="/etc/ufw/applications.d/docker",
content=dedent(
"""
[docker]
title=Docker API
description=Docker API.
ports=2376/tcp
"""))
open_firewall = open_ufw
else:
raise DistributionNotSupported(distribution=distribution)
return sequence([upload] + [
open_firewall(service)
for service in ['flocker-control-api', 'flocker-control-agent',
'docker']
])
# Set of dataset fields which are *not* sensitive. Only fields in this
# set are logged. This should contain everything except usernames and
# passwords (or equivalents). Implemented as a whitelist in case new
# security fields are added.
_ok_to_log = frozenset((
'auth_plugin',
'auth_url',
'backend',
'region',
'zone',
))
def _remove_dataset_fields(content):
"""
Remove non-whitelisted fields from dataset for logging.
"""
content = yaml.safe_load(content)
dataset = content['dataset']
for key in dataset:
if key not in _ok_to_log:
dataset[key] = 'REMOVED'
return yaml.safe_dump(content)
def task_configure_flocker_agent(control_node, dataset_backend,
dataset_backend_configuration):
"""
Configure the flocker agents by writing out the configuration file.
:param bytes control_node: The address of the control agent.
:param DatasetBackend dataset_backend: The volume backend the nodes are
configured with.
:param dict dataset_backend_configuration: The backend specific
configuration options.
"""
dataset_backend_configuration = dataset_backend_configuration.copy()
dataset_backend_configuration.update({
u"backend": dataset_backend.name,
})
put_config_file = put(
path='/etc/flocker/agent.yml',
content=yaml.safe_dump(
{
"version": 1,
"control-service": {
"hostname": control_node,
"port": 4524,
},
"dataset": dataset_backend_configuration,
},
),
log_content_filter=_remove_dataset_fields
)
return sequence([put_config_file])
def task_enable_flocker_agent(distribution):
"""
Enable the flocker agents.
:param bytes distribution: The distribution name.
"""
if is_centos(distribution):
return sequence([
run_from_args(['systemctl', 'enable', 'flocker-dataset-agent']),
run_from_args(['systemctl', START, 'flocker-dataset-agent']),
run_from_args(['systemctl', 'enable', 'flocker-container-agent']),
run_from_args(['systemctl', START, 'flocker-container-agent']),
])
elif distribution == 'ubuntu-14.04':
return sequence([
run_from_args(['service', 'flocker-dataset-agent', 'start']),
run_from_args(['service', 'flocker-container-agent', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_create_flocker_pool_file():
"""
Create a file-back zfs pool for flocker.
"""
return sequence([
run('mkdir -p /var/opt/flocker'),
run('truncate --size 10G /var/opt/flocker/pool-vdev'),
run('zpool create flocker /var/opt/flocker/pool-vdev'),
])
def task_install_zfs(distribution, variants=set()):
"""
Install ZFS on a node.
:param bytes distribution: The distribution the node is running.
:param set variants: The set of variant configurations to use when
"""
commands = []
if distribution == 'ubuntu-14.04':
commands += [
# ZFS not available in base Ubuntu - add ZFS repo
run_from_args([
"add-apt-repository", "-y", "ppa:zfs-native/stable"]),
]
commands += [
# Update to read package info from new repos
run_from_args([
"apt-get", "update"]),
# Package spl-dkms sometimes does not have libc6-dev as a
# dependency, add it before ZFS installation requires it.
# See https://github.com/zfsonlinux/zfs/issues/3298
run_from_args(["apt-get", "-y", "install", "libc6-dev"]),
run_from_args(['apt-get', '-y', 'install', 'zfsutils']),
]
elif is_centos(distribution):
commands += [
run_from_args(["yum", "install", "-y", ZFS_REPO[distribution]]),
]
if distribution == 'centos-7':
commands.append(
run_from_args(["yum", "install", "-y", "epel-release"]))
if Variants.ZFS_TESTING in variants:
commands += [
run_from_args(['yum', 'install', '-y', 'yum-utils']),
run_from_args([
'yum-config-manager', '--enable', 'zfs-testing'])
]
commands += [
run_from_args(['yum', 'install', '-y', 'zfs']),
]
else:
raise DistributionNotSupported(distribution)
return sequence(commands)
def configure_zfs(node, variants):
"""
Configure ZFS for use as a Flocker backend.
:param INode node: The node to configure ZFS on.
:param set variants: The set of variant configurations to use when
:return Effect:
"""
return sequence([
run_remotely(
username='root',
address=node.address,
commands=task_upgrade_kernel(
distribution=node.distribution),
),
node.reboot(),
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_zfs(
distribution=node.distribution,
variants=variants),
task_create_flocker_pool_file(),
]),
),
Effect(
Func(lambda: configure_ssh(node.address, 22))),
])
def _uninstall_flocker_ubuntu1404():
"""
Return an ``Effect`` for uninstalling the Flocker package from an Ubuntu
14.04 machine.
"""
return run_from_args([
b"apt-get", b"remove", b"-y", b"--purge", b"clusterhq-python-flocker",
])
def _uninstall_flocker_centos7():
"""
Return an ``Effect`` for uninstalling the Flocker package from a CentOS 7
machine.
"""
return sequence([
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-python-flocker",
]),
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-release",
]),
])
_flocker_uninstallers = {
"ubuntu-14.04": _uninstall_flocker_ubuntu1404,
"centos-7": _uninstall_flocker_centos7,
}
def task_uninstall_flocker(distribution):
"""
Return an ``Effect`` for uninstalling the Flocker package from the given
distribution.
"""
return _flocker_uninstallers[distribution]()
def uninstall_flocker(nodes):
"""
Return an ``Effect`` for uninstalling the Flocker package from all of the
given nodes.
"""
return _run_on_all_nodes(
nodes,
task=lambda node: task_uninstall_flocker(node.distribution)
)
def task_install_docker(distribution):
"""
Return an ``Effect`` for installing Docker if it is not already installed.
The state of ``https://get.docker.com/`` at the time the task is run
determines the version of Docker installed.
The version of Docker is allowed to float this way because:
* Docker development is currently proceeding at a rapid pace. There are
frequently compelling reasons to want to run Docker 1.(X+1) instead of 1.X.
* https://get.docker.com/ doesn't keep very old versions of Docker around.
Pinning a particular version makes it laborious to rely on this source for
Docker packages (due to the pinned version frequently disappearing from the
repository).
* Other package repositories frequently only have older packages available.
* Different packagers of Docker give the package different names. The
different package names make it more difficult to request a specific
version.
* Different packagers apply different system-specific patches. Users may
have reasons to prefer packages from one packager over another. Thus if
Docker is already installed, no matter what version it is, the requirement
is considered satisfied (we treat the user as knowing what they're doing).
"""
if is_centos(distribution):
# The Docker packages don't declare all of their dependencies. They
# seem to work on an up-to-date system, though, so make sure the system
# is up to date.
update = b"yum --assumeyes update && "
else:
update = b""
return run(command=(
b"[[ -e /usr/bin/docker ]] || { " + update +
b"curl https://get.docker.com/ > /tmp/install-docker.sh && "
b"sh /tmp/install-docker.sh"
b"; }"
))
def task_install_flocker(
distribution=None,
package_source=PackageSource(),
):
"""
Install flocker cluster on a distribution.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:raises: ``UnsupportedDistribution`` if the distribution is unsupported.
"""
return task_package_install("clusterhq-flocker-node",
distribution, package_source)
ACCEPTANCE_IMAGES = [
"postgres:latest",
"clusterhq/mongodb:latest",
"python:2.7-slim",
"busybox",
]
def task_pull_docker_images(images=ACCEPTANCE_IMAGES):
"""
Pull docker images.
:param list images: List of images to pull. Defaults to images used in
acceptance tests.
"""
return sequence([
run_from_args(['docker', 'pull', image]) for image in images
])
def task_enable_updates_testing(distribution):
"""
Enable the distribution's proposed updates repository.
:param bytes distribution: See func:`task_install_flocker`
"""
raise DistributionNotSupported(distribution=distribution)
def task_enable_docker_head_repository(distribution):
"""
Enable the distribution's repository containing in-development docker
builds.
:param bytes distribution: See func:`task_install_flocker`
"""
if is_centos(distribution):
return sequence([
put(content=dedent("""\
[virt7-testing]
name=virt7-testing
baseurl=http://cbs.centos.org/repos/virt7-testing/x86_64/os/
enabled=1
gpgcheck=0
"""),
path="/etc/yum.repos.d/virt7-testing.repo")
])
else:
raise DistributionNotSupported(distribution=distribution)
def provision(distribution, package_source, variants):
"""
Provision the node for running flocker.
This drives all the common node installation steps in:
* http://doc-dev.clusterhq.com/gettingstarted/installation.html
:param bytes address: Address of the node to provision.
:param bytes username: Username to connect as.
:param bytes distribution: See func:`task_install_flocker`
:param PackageSource package_source: See func:`task_install_flocker`
:param set variants: The set of variant configurations to use when
provisioning
"""
commands = []
if Variants.DISTRO_TESTING in variants:
commands.append(task_enable_updates_testing(distribution))
if Variants.DOCKER_HEAD in variants:
commands.append(task_enable_docker_head_repository(distribution))
commands.append(task_install_docker(distribution))
commands.append(
task_install_flocker(
package_source=package_source, distribution=distribution))
commands.append(
task_package_install(
"clusterhq-flocker-docker-plugin", distribution, package_source))
commands.append(task_enable_docker(distribution))
return sequence(commands)
def _run_on_all_nodes(nodes, task):
"""
Run some commands on some nodes.
:param nodes: An iterable of ``Node`` instances where the commands should
be run.
:param task: A one-argument callable which is called with each ``Node`` and
should return the ``Effect`` to run on that node.
:return: An ``Effect`` that runs the commands on a group of nodes.
"""
return sequence(list(
run_remotely(
username='root',
address=node.address,
commands=task(node),
)
for node in nodes
))
def install_flocker(nodes, package_source):
"""
Return an ``Effect`` that installs a certain version of Flocker on the
given nodes.
:param nodes: An iterable of ``Node`` instances on which to install
Flocker.
:param PackageSource package_source: The version of Flocker to install.
:return: An ``Effect`` which installs Flocker on the nodes.
"""
return _run_on_all_nodes(
nodes,
task=lambda node: task_install_flocker(
distribution=node.distribution,
package_source=package_source,
)
)
def configure_cluster(cluster, dataset_backend_configuration):
"""
Configure flocker-control, flocker-dataset-agent and
flocker-container-agent on a collection of nodes.
:param Cluster cluster: Description of the cluster to configure.
:param dict dataset_backend_configuration: Configuration parameters to
supply to the dataset backend.
"""
return sequence([
run_remotely(
username='root',
address=cluster.control_node.address,
commands=sequence([
task_install_control_certificates(
cluster.certificates.cluster.certificate,
cluster.certificates.control.certificate,
cluster.certificates.control.key),
task_enable_flocker_control(cluster.control_node.distribution),
]),
),
sequence([
sequence([
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_node_certificates(
cluster.certificates.cluster.certificate,
certnkey.certificate,
certnkey.key),
task_install_api_certificates(
cluster.certificates.user.certificate,
cluster.certificates.user.key),
task_enable_docker(node.distribution),
task_configure_flocker_agent(
control_node=cluster.control_node.address,
dataset_backend=cluster.dataset_backend,
dataset_backend_configuration=(
dataset_backend_configuration
),
),
task_enable_docker_plugin(node.distribution),
task_enable_flocker_agent(
distribution=node.distribution,
)]),
),
]) for certnkey, node
in zip(cluster.certificates.nodes, cluster.agent_nodes)
])
])
| |
# -*- coding: UTF-8 -*-
'''
Created on Sep 9, 2013
@author: tanel
'''
import sys
from itertools import groupby
import math
import re
import syllabifier
import yaml
import os
LANGUAGES = yaml.load(open(os.path.dirname(__file__) + '/data/languages.yaml'))
LEFT_CONTEXT = 2
RIGHT_CONTEXT = 2
USE_DURATION_FEATURE = True
SKIP_FILLERS = True
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def dur_function(dur):
return (sigmoid(dur * 0.1) - 0.5) * 2.0
def encode(alist):
return [(key, len(list(group))) for key, group in groupby(alist)]
def syllabify(phonemes, language, nonsilence_phonemes):
syllabifier_conf = LANGUAGES[language].get('syllabifier_conf', None)
if syllabifier_conf is None:
return None
if len(phonemes) == 1 and phonemes[0] not in nonsilence_phonemes:
return None
syllables = syllabifier.syllabify(syllabifier_conf, phonemes)
return [s[1] + s[2] + s[3] for s in syllables]
def load_stress_dict(filename):
result = {}
for l in open(filename):
if l.startswith(";;;"):
continue
ss = l.split()
word = ss[0]
word = re.sub("(\d)$", "", word)
phonemes = [re.sub("\d", "", x).upper() for x in ss[1:]]
stress = [0] * len(phonemes)
for i, x in enumerate(ss[1:]):
if x.endswith("1"):
stress[i] = 1
if x.endswith("2"):
stress[i] = 2
result.setdefault(word, []).append((phonemes, stress))
return result
def get_stress(word, phonemes, stress_dict):
prons = stress_dict.get(word, [])
for pron in prons:
if pron[0] == phonemes:
return pron[1]
return [0] * len(phonemes)
def phone_runlengths_from_frames(frames, transitions):
phone_runlengths = []
n = 0
last_phone = ""
for f in frames:
# if phone != last_phone
# note that it's not correct if there are identical phonemes after each other
# but for some reason relying on transitions to final states fails sometimes
if n > 0 and transitions[f][0] != last_phone:
phone_runlengths.append((last_phone.partition("_")[0], n))
n = 1
else:
n += 1
last_phone = transitions[f][0]
phone_runlengths.append((last_phone.partition("_")[0], n))
return phone_runlengths
def make_local(start_frame, word_id, frames, transitions, word_list, nonsilence_phonemes, language="ESTONIAN", stress_dict=None):
phone_rl_names = phone_runlengths_from_frames(frames, transitions)
#print >> sys.stderr, phone_rl_names
word = word_list[word_id]
features_and_dur_seq = []
syllables = syllabify([p[0] for p in phone_rl_names], language, nonsilence_phonemes)
syllable_ids = None
if syllables:
syllable_ids = []
i = 1
for s in syllables:
for p in s:
syllable_ids.append(i)
i += 1
i = 0
current_start_frame = start_frame
for (phone, dur) in phone_rl_names:
features = []
features.append(("%s" % phone, 1))
for (kl, phonemes) in LANGUAGES[language]["phoneme_classes"].iteritems():
if phone in phonemes:
features.append((kl, 1))
if syllable_ids:
features.append(("syllable", syllable_ids[i]))
features_and_dur_seq.append((features, dur))
i += 1
current_start_frame += dur
if len(phone_rl_names) > 1:
features_and_dur_seq[0][0].append(("word_initial", 1))
features_and_dur_seq[-1][0].append(("word_final", 1))
elif phone_rl_names[0][0] in nonsilence_phonemes:
features_and_dur_seq[0][0].append(("single_phoneme", 1))
if stress_dict:
stress = get_stress(word, [p[0].upper() for p in phone_rl_names], stress_dict)
for i, s in enumerate(stress):
if s > 0:
features_and_dur_seq[i][0].append(("stress%d" % s, 1))
return features_and_dur_seq
def make_linear(feature_and_dur_seqs, nonsilence_phonemes, speaker_id):
full_feature_seq = []
local_feature_seq = []
for feature_and_dur_seq in feature_and_dur_seqs:
for (feature_set, dur) in feature_and_dur_seq:
local_feature_seq.append((feature_set, dur))
i = 0
for feature_and_dur_seq in feature_and_dur_seqs:
for (feature_list, dur) in feature_and_dur_seq:
is_filler = True
tmp_feature_set = set(feature_list)
#print >> sys.stderr, tmp_feature_set
if SKIP_FILLERS:
for phoneme in nonsilence_phonemes:
if (phoneme, 1) in tmp_feature_set:
is_filler = False
break
if is_filler:
i += 1
continue
full_feature_list = []
full_feature_list.extend(feature_list)
for j in range(1, LEFT_CONTEXT + 1):
if i - j >= 0:
full_feature_list.extend(
[("pos-%d:%s" % (j, s), value) for (s, value) in local_feature_seq[i - j][0] if not s.startswith("_")])
if USE_DURATION_FEATURE:
full_feature_list.append(("pos-%d:dur" % j, dur_function(local_feature_seq[i - j][1])))
else:
full_feature_list.append(("pos-%d:<s>" % j, 1))
if USE_DURATION_FEATURE:
full_feature_list.append(("pos-%d:dur" % j, dur_function(10)))
for j in range(1, RIGHT_CONTEXT + 1):
if i + j < len(local_feature_seq):
full_feature_list.extend(
[("pos+%d:%s" % (j, s), value) for (s, value) in local_feature_seq[i + j][0] if not s.startswith("_")])
else:
full_feature_list.append(("pos+%d:</s>" % j, 1))
full_feature_seq.append((full_feature_list, speaker_id, dur))
i += 1
return full_feature_seq
def get_context_features_and_durs(lattice, feature_and_dur_seqs):
contexts = []
for i, arc in enumerate(lattice.arcs):
#print "--- processing arc", arc
contexts_map = {}
prev_arcs = lattice.get_previous_arcs(arc)
if len(prev_arcs) > 0:
prev_arc = prev_arcs[0]
#print "prev_arc: ", prev_arc
prev_arc_id = prev_arc.id
index_of_prev_phone = -1
for j in range(1, LEFT_CONTEXT + 1):
#print "finding context", -j
contexts_map[-j] = feature_and_dur_seqs[prev_arc_id][index_of_prev_phone][0] +\
[("dur", dur_function(feature_and_dur_seqs[prev_arc_id][index_of_prev_phone][1]))]
index_of_prev_phone -= 1
if index_of_prev_phone < -len(feature_and_dur_seqs[prev_arc_id]):
prev_arcs = lattice.get_previous_arcs(prev_arc)
if len(prev_arcs) > 0:
prev_arc = prev_arcs[0]
prev_arc_id = prev_arc.id
#print "new prev arc:", prev_arc
index_of_prev_phone = -1
else:
contexts_map[-j - 1] = [("<s>", 1), ("dur", dur_function(10))]
break
else:
for j in range(1, LEFT_CONTEXT + 1):
contexts_map[-j] = [("<s>", 1),
("dur", dur_function(10))]
next_arcs = lattice.get_next_arcs(arc)
if len(next_arcs) > 0:
next_arc = next_arcs[0]
#print "next_arc: ", next_arc
next_arc_id = next_arc.id
index_of_next_phone = 0
for j in range(1, RIGHT_CONTEXT + 1):
#print "finding context", j
contexts_map[j] = feature_and_dur_seqs[next_arc_id][index_of_next_phone][0]
index_of_next_phone += 1
if index_of_next_phone >= len(feature_and_dur_seqs[next_arc_id]):
next_arcs = lattice.get_next_arcs(next_arc)
if len(next_arcs) > 0:
next_arc = next_arcs[0]
next_arc_id = next_arc.id
#print "new next arc:", next_arc
index_of_next_phone = 0
else:
contexts_map[j + 1] = [("</s>", 1)]
break
else:
contexts_map[1] = [("</s>", 1)]
contexts.append(contexts_map)
return contexts
def compile_features_for_word(context, local_feature_seq):
full_feature_seq = []
i = 0
for (feature_set, dur) in local_feature_seq:
full_feature_list = []
full_feature_list.extend(feature_set)
for j in range(1, LEFT_CONTEXT + 1):
delta_pos = i - j
if delta_pos >= 0:
full_feature_list.extend(
[("pos-%d:%s" % (j, s), value) for (s, value) in local_feature_seq[i - j][0] if not s.startswith("_")])
if USE_DURATION_FEATURE:
full_feature_list.append(("pos-%d:dur" % j, dur_function(local_feature_seq[i - j][1])))
else:
full_feature_list.extend([("pos-%d:%s" % (j, s), value) for (s, value) in context.get(delta_pos, [])])
for j in range(1, RIGHT_CONTEXT + 1):
if i + j < len(local_feature_seq):
full_feature_list.extend(
[("pos+%d:%s" % (j, s), value) for (s, value) in local_feature_seq[i + j][0] if not s.startswith("_")])
else:
full_feature_list.extend(
[("pos+%d:%s" % (j, s), value) for (s, value) in context.get(j - (len(local_feature_seq) - i - 1), [])])
full_feature_seq.append((full_feature_list, dur))
i += 1
return full_feature_seq
def read_transitions(filename):
# a list of transitions, add None to make it aligned with transition IDs
transitions = [None]
final_transition_states = {}
print >> sys.stderr, "Reading transition model..."
current_phone = None
for l in open(filename):
if l.startswith("Transition-state "):
ss = l.split()
current_phone = ss[4]
hmm_state = int(ss[7])
final_transition_states[current_phone] = hmm_state
elif l.startswith(" Transition-id = "):
ss = l.split()
to_state = None
if len(ss) == 9 and ss[7] == "->":
to_state = int(ss[8][:-1])
transitions.append((current_phone, to_state))
else:
raise Exception("Unexpected line in transition model data: ", l)
print >> sys.stderr, "Finding final states"
for (i, transition) in enumerate(transitions):
if transition is None:
continue
if transition[1] is not None and final_transition_states[transition[0]] + 1 == transition[1]:
transitions[i] = (transition[0], transition[1], True)
else:
transitions[i] = (transition[0], transition[1], False)
return transitions
| |
# -*- coding: utf-8 -*-
"""This module contains classes for constructing propeller navbars"""
from django.utils.safestring import mark_safe
from .utils import render_tag, add_css_class
from .text import text_concat
try: # pragma: no cover
from django.urls import reverse
except ImportError: # pragma: no cover
from django.core.urlresolvers import reverse
class NavBarLinkItem(object):
"""
Generates a Link navbar item or a Link DropDown item.
**Parameters**:
name
The display name for the item. (for example: 'Home')
url
The address for the link item. Can be a absolute URL or a resolvable Django url.
(for example: 'http://example.org' or 'home'). Optional.
icon
not yet supported
"""
name = None
url = None
icon = None
def __init__(self, name="", url=None, icon=None):
"""
"""
self.name = name
self.url = url
self.icon = icon
def get_url(self):
"""
Returns the url set in the attribute.
**Returns**
``javascript:void(0);`` if ``url = None``
or
an absolute URL if ``url`` starts with 'http'
or
an relative URL if ``url`` is a resolvable Django url
"""
if self.url:
if not str(self.url).startswith('http'):
return reverse(self.url)
return self.url
return "javascript:void(0);"
def as_html(self):
"""Returns navbar link item as html"""
tag = 'a'
attrs = {'class': 'pmd-ripple-effect', 'href': self.get_url()}
content = self.name
return '<li>' + render_tag(tag, attrs=attrs, content=mark_safe(content), ) + '</li>'
class NavBarDropDownDivider(object):
"""Generates a DropDown Divider item."""
@staticmethod
def as_html():
"""Returns navbar dropdown divider as html"""
tag = 'li'
attrs = {'role': 'separator', 'class': 'divider'}
return render_tag(tag, attrs=attrs, )
class NavBarDropDownItem(NavBarLinkItem):
"""
Generates a DropDown navbar item.
**Parameters**:
name
The display name for the item. (for example: 'Home')
url
The address for the link item. Can be a absolute URL or a resolvable Django url.
(for example: 'http://example.org' or 'home'). Optional.
icon
not yet supported
items
A list containing NavBarLinkItems and/or NavBarDropDownDivider. Optional.
"""
items = []
def __init__(self, name="", items=None, url=None):
super(NavBarDropDownItem, self).__init__(name, url)
if items:
self.items = items
def as_html(self):
"""Returns navbar dropdown item as html"""
tag = 'li'
attrs = {'class': 'dropdown pmd-dropdown'}
content = '<a data-toggle="dropdown" class="pmd-ripple-effect dropdown-toggle" data-sidebar="true" ' \
'href="%s">%s<span class="caret"></span></a>' % (self.url, self.name)
content = text_concat(content, '<ul class="dropdown-menu">')
for itm in self.items:
content = text_concat(content, itm.as_html())
content = text_concat(content, '</ul>')
return render_tag(tag, attrs=attrs, content=mark_safe(content), )
class NavBar(object):
"""
NavBar is a class that generates a NavBar.
**Parameters**:
brandname
The brand shown on the very left of the navbar.
brandurl
The address for the brand name. Can be a absolute URL or a resolvable Django url.
(for example: 'http://example.org' or 'home'). Optional.
items
A list containing NavBarLinkItems and/or NavBarDropDownItems. Optional.
style_inverse
Generate a dark navbar if true (default) or a light navbar if false.
style_static
Sets the static style for the navbar. Static if true (default) or floating on top if false.
"""
brandname = ""
brandurl = None
items = []
style_inverse = True
style_static = True
def get_brand_url(self):
"""
Returns the brand url set in the attribute.
**Returns**
``javascript:void(0);`` if ``brandurl = None``
or
an absolute URL if ``brandurl`` starts with 'http'
or
an relative URL if ``brandurl`` is a resolvable Django url
"""
if self.brandurl:
if not str(self.brandurl).startswith('http'):
return reverse(self.brandurl)
return self.brandurl
return "javascript:void(0);"
@staticmethod
def render_toggle():
"""Returns navbar toggle as html (for responsive)"""
tag = 'button'
attrs = {
'class': 'navbar-toggle collapsed',
'type': 'button',
'data-toggle': 'collapse',
'aria-expanded': 'false'
}
content = '<span class="sr-only">Toggle navigation</span>'
content = text_concat(content, '<span class="icon-bar"></span>')
content = text_concat(content, '<span class="icon-bar"></span>')
content = text_concat(content, '<span class="icon-bar"></span>')
return render_tag(tag, attrs=attrs, content=mark_safe(content), )
def render_header(self):
"""Returns navbar header as html"""
tag = 'div'
attrs = {'class': 'navbar-header'}
content = self.render_toggle()
content = text_concat(content, '<a href="%s" class="navbar-brand navbar-brand-custome">%s'
'</a>' % (self.get_brand_url(), self.brandname))
return render_tag(tag, attrs=attrs, content=mark_safe(content), )
def render_items(self):
"""Returns navbar items as html (for item container)"""
tag = 'ul'
attrs = {'class': 'nav navbar-nav'}
content = ''
for itm in self.items:
content = text_concat(content, itm.as_html())
return render_tag(tag, attrs=attrs, content=mark_safe(content), )
def render_item_container(self):
"""Returns navbar items as html"""
tag = 'div'
attrs = {'class': 'collapse navbar-collapse'}
content = self.render_items()
return render_tag(tag, attrs=attrs, content=mark_safe(content), )
def render_content(self):
"""Returns navbar content as html"""
tag = 'div'
attrs = {'class': 'container-fluid'}
content = self.render_header()
content = text_concat(content, self.render_item_container())
return render_tag(tag, attrs=attrs, content=mark_safe(content), )
def as_html(self):
"""Returns navbar as html"""
tag = 'nav'
classes = 'navbar'
if self.style_inverse:
classes = add_css_class(classes, 'navbar-inverse')
if self.style_static:
classes = add_css_class(classes, 'navbar-static')
else:
classes = add_css_class(classes, 'navbar-top')
classes = add_css_class(classes, 'pmd-navbar')
classes = add_css_class(classes, 'pmd-z-depth')
attrs = {'class': classes}
content = self.render_content()
content = text_concat(content, '<div class="pmd-sidebar-overlay"></div>')
return render_tag(tag, attrs=attrs, content=mark_safe(content), )
class CustomItem(object):
"""
Returns a custom NavBar item.
Just assign some raw HTML code to `html` attribute.
"""
html = ""
def __init__(self, html=""):
self.html = html
def as_html(self):
return self.html
| |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Dict, Iterator
from unittest import mock
from unittest.mock import ANY
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.loops import Loop, TrainingBatchLoop
from pytorch_lightning.trainer.progress import BaseProgress
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
class NestedLoop(Loop):
def __init__(self):
super().__init__()
self.child_loop0 = None
self.child_loop1 = None
@property
def done(self) -> bool:
return False
def connect(self, child0, child1):
self.child_loop0 = child0
self.child_loop1 = child1
def reset(self) -> None:
pass
def advance(self, *args, **kwargs):
pass
@pytest.mark.parametrize("loop_name", ["fit_loop", "validate_loop", "test_loop", "predict_loop"])
def test_connect_loops_direct(loop_name):
"""Test Trainer referenes in loops on assignment."""
loop = NestedLoop()
assert loop.trainer is None
trainer = Trainer()
# trainer.loop = loop
setattr(trainer, loop_name, loop)
assert loop.trainer is trainer
def test_connect_loops_recursive():
"""Test Trainer references in a nested loop assigned to a Trainer."""
main_loop = NestedLoop()
child0 = NestedLoop()
child1 = NestedLoop()
main_loop.connect(child0, child1)
assert main_loop.trainer is None
assert main_loop.child_loop0.trainer is None
trainer = Trainer()
trainer.fit_loop = main_loop
assert child0.trainer is trainer
assert child1.trainer is trainer
def test_connect_subloops(tmpdir):
"""Test connecting individual subloops by calling `trainer.x.y.connect()`"""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
epoch_loop = trainer.fit_loop.epoch_loop
new_batch_loop = TrainingBatchLoop()
epoch_loop.connect(batch_loop=new_batch_loop)
assert epoch_loop.batch_loop is new_batch_loop
assert new_batch_loop.trainer is None
trainer.fit(model)
assert new_batch_loop.trainer is trainer
class CustomException(Exception):
pass
def test_loop_restore():
class Simple(Loop):
def __init__(self, dataset: Iterator):
super().__init__()
self.iteration_count = 0
self.dataset = dataset
@property
def skip(self) -> bool:
return False
@property
def done(self) -> bool:
return self.iteration_count > len(self.dataset)
def reset(self) -> None:
self.iter_dataset = iter(self.dataset)
if self.restarting:
for _ in range(self.iteration_count):
next(self.iter_dataset)
self.iteration_count += 1
else:
self.outputs = []
def advance(self) -> None:
value = next(self.iter_dataset)
if self.iteration_count == 5:
raise CustomException
self.outputs.append(value)
def on_advance_end(self) -> None:
self.iteration_count += 1
def state_dict(self) -> Dict:
return {"iteration_count": self.iteration_count, "outputs": self.outputs}
def load_state_dict(self, state_dict: Dict) -> None:
self.iteration_count = state_dict["iteration_count"]
self.outputs = state_dict["outputs"]
trainer = Trainer()
data = range(10)
loop = Simple(data)
loop.trainer = trainer
try:
loop.run()
state_dict = {}
except CustomException:
state_dict = loop.state_dict()
loop = Simple(data)
loop.trainer = trainer
loop.load_state_dict(state_dict)
loop.restarting = True
loop.run()
assert not loop.restarting
assert loop.outputs == list(range(10))
def test_loop_hierarchy():
@dataclass
class SimpleProgress(BaseProgress):
increment: int = 0
class Simple(Loop):
def __init__(self, a):
super().__init__()
self.a = a
self.progress = SimpleProgress()
def advance(self, *args: Any, **kwargs: Any) -> None:
loop = getattr(self, "loop_child", None)
if not loop:
return
loop.run()
def on_advance_end(self):
self.progress.increment += 1
@property
def done(self) -> bool:
return self.progress.increment > 0
def reset(self) -> None:
...
def on_save_checkpoint(self) -> Dict:
return {"a": self.a}
def on_load_checkpoint(self, state_dict: Dict) -> None:
self.a = state_dict["a"]
loop_parent = Simple(1)
loop_child = Simple(2)
loop_parent.loop_child = loop_child
# check the trainer reference is propagated
loop_parent.trainer = Trainer()
assert loop_child.trainer is loop_parent.trainer
state_dict = loop_parent.state_dict()
assert state_dict == {
"state_dict": {"a": 1},
"progress": {"increment": 0},
"loop_child.state_dict": {"a": 2},
"loop_child.progress": {"increment": 0},
}
state_dict["loop_child.state_dict"]["a"] = 3
# check restarting after `load_state_dict`
loop_parent.load_state_dict(state_dict)
assert loop_parent.restarting
loop_parent.run()
# check the new state after `run`
state_dict = loop_parent.state_dict()
assert state_dict == {
"state_dict": {"a": 1},
"progress": {"increment": 1},
"loop_child.state_dict": {"a": 3},
"loop_child.progress": {"increment": 1},
}
loop_parent_copy = deepcopy(loop_parent)
assert loop_parent_copy.state_dict() == loop_parent.state_dict()
assert loop_parent_copy.on_save_checkpoint() == state_dict["state_dict"]
assert loop_parent_copy.loop_child.on_save_checkpoint() == state_dict["loop_child.state_dict"]
loop_parent = Simple(1)
loop_child = Simple(2)
loop_parent.loop_child = loop_child
loop_parent.load_state_dict(state_dict)
assert loop_parent.progress.increment == 1
assert loop_parent.loop_child.progress.increment == 1
del loop_parent.loop_child
state_dict = loop_parent.state_dict()
assert state_dict == {"state_dict": {"a": 1}, "progress": {"increment": 1}}
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize("stop_epoch", (1, 2))
@pytest.mark.parametrize("stop_batch", (1, 2))
@pytest.mark.parametrize("n_dataloaders,stop_dataloader", [(2, 0), (2, 1), (3, 2)])
@RunIf(min_torch="1.7.0")
def test_loop_restart_progress_multiple_dataloaders(tmpdir, n_dataloaders, stop_dataloader, stop_epoch, stop_batch):
n_batches = 5
n_epochs = 3
class ValidationModel(BoringModel):
def __init__(self):
super().__init__()
def validation_step(self, batch, batch_idx, dataloader_idx):
if self.current_epoch == stop_epoch and batch_idx == stop_batch and dataloader_idx == stop_dataloader:
raise CustomException
return super().validation_step(batch, batch_idx)
def val_dataloader(self):
return [super(ValidationModel, self).val_dataloader() for _ in range(n_dataloaders)]
model = ValidationModel()
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=n_epochs,
limit_train_batches=1,
limit_val_batches=n_batches,
num_sanity_val_steps=0,
)
# simulate a failure
try:
trainer.fit(model)
except CustomException:
pass
ckpt_path = str(tmpdir / ".pl_auto_save.ckpt")
checkpoint = torch.load(ckpt_path)["loops"]["fit_loop"]
total_dataloader = stop_epoch * n_dataloaders + stop_dataloader
expected = {
"total": {"ready": total_dataloader + 1, "started": None, "processed": None, "completed": total_dataloader},
"current": {"ready": stop_dataloader + 1, "started": None, "processed": None, "completed": stop_dataloader},
}
assert checkpoint["epoch_loop.val_loop.dataloader_progress"] == expected
trainer.fit_loop.load_state_dict(checkpoint, restart_progress=False)
# `nbe_`: non-breaking epoch, as in, no exception will be raised. `be_`: breaking epoch
nbe_total_val_batch = stop_epoch * n_dataloaders * n_batches
be_total_val_batch = stop_dataloader * n_batches + stop_batch
total_val_batch = nbe_total_val_batch + be_total_val_batch
expected = {
"total": {
"ready": total_val_batch + 1,
"started": total_val_batch + 1,
"processed": total_val_batch,
"completed": total_val_batch,
},
"current": {
"ready": stop_batch + 1,
"started": stop_batch + 1,
"processed": stop_batch,
"completed": stop_batch,
},
}
assert trainer.fit_loop.epoch_loop.val_loop.epoch_loop.batch_progress.state_dict() == expected
trainer.fit_loop.load_state_dict(checkpoint)
expected = {
"total": {
"ready": total_val_batch + 1,
"started": total_val_batch + 1,
"processed": total_val_batch,
"completed": total_val_batch,
},
"current": {"ready": stop_batch, "started": stop_batch, "processed": stop_batch, "completed": stop_batch},
}
assert trainer.fit_loop.epoch_loop.val_loop.epoch_loop.batch_progress.state_dict() == expected
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize("accumulate_grad_batches", (1, 2, 3))
@pytest.mark.parametrize("n_optimizers", (1, 3, 5))
@pytest.mark.parametrize("stop_epoch", (1, 2))
@pytest.mark.parametrize("stop_batch", (1, 2))
@pytest.mark.parametrize("stop_optimizer", (1, 2))
@RunIf(min_torch="1.7.0")
def test_loop_state_on_exception(accumulate_grad_batches, stop_epoch, stop_batch, stop_optimizer, n_optimizers, tmpdir):
stop_optimizer = stop_optimizer if stop_optimizer < n_optimizers else 0
n_epochs = 3
n_batches = 3
class TestModel(BoringModel):
def __init__(self):
super().__init__()
if n_optimizers > 1:
self.configure_optimizers = self.configure_optimizers_multiple
def training_step(self, batch, batch_idx, optimizer_idx=0):
if self.trainer.current_epoch == stop_epoch and batch_idx == stop_batch and optimizer_idx == stop_optimizer:
raise CustomException
return super().training_step(batch, batch_idx)
def configure_optimizers_multiple(self):
optimizers = [torch.optim.Adam(self.layer.parameters(), lr=0.1) for _ in range(n_optimizers)]
lr_scheduler_0 = torch.optim.lr_scheduler.StepLR(optimizers[0], step_size=1)
lr_scheduler_1 = torch.optim.lr_scheduler.StepLR(optimizers[1], step_size=1)
# no scheduler for optimizer_2
lr_schedulers = [lr_scheduler_0, {"scheduler": lr_scheduler_1, "interval": "step"}]
return optimizers, lr_schedulers
model = TestModel()
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=n_epochs,
limit_train_batches=n_batches,
limit_val_batches=0,
accumulate_grad_batches=accumulate_grad_batches,
progress_bar_refresh_rate=0,
logger=False,
checkpoint_callback=False,
)
# simulate a failure
try:
trainer.fit(model)
except CustomException:
pass
ckpt_path = str(tmpdir / ".pl_auto_save.ckpt")
assert os.path.exists(ckpt_path)
checkpoint = torch.load(ckpt_path)
optim_progress = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.optim_progress
sch_progress = trainer.fit_loop.epoch_loop.scheduler_progress
# `nbe_`: non-breaking epoch, as in, no exception will be raised. `be_`: breaking epoch
nbe_batches_completed = stop_epoch * n_batches
be_batches_completed = stop_batch
be_batches_ready = stop_batch + 1
# lightning applies leftover accumulated gradients when the epoch ends
has_leftover_accumulation_batches = n_batches % accumulate_grad_batches != 0
# number of batches that will call `optimizer.step()` during non-breaking and breaking epochs
nbe_stepping_batches = nbe_batches_completed // accumulate_grad_batches
be_stepping_batches = be_batches_completed // accumulate_grad_batches
nbe_total_opt_steps = (nbe_stepping_batches + has_leftover_accumulation_batches) * n_optimizers
does_last_be_batch_step = be_batches_ready % accumulate_grad_batches == 0 or has_leftover_accumulation_batches
be_total_opt_steps = be_stepping_batches * n_optimizers + does_last_be_batch_step * stop_optimizer
assert optim_progress.optimizer_steps == nbe_total_opt_steps + be_total_opt_steps
assert optim_progress.optimizer.step.current.completed == be_total_opt_steps
has_opt_stepped_in_be = stop_batch + 1 >= accumulate_grad_batches
nbe_total_zero_grad = (nbe_stepping_batches + has_leftover_accumulation_batches) * n_optimizers
does_last_be_batch_zero_grad = be_batches_completed % accumulate_grad_batches == 0
# `max` because the first batch always zero-grads
be_total_zero_grad = max(1, be_stepping_batches) * n_optimizers + stop_optimizer * does_last_be_batch_zero_grad
assert optim_progress.optimizer.zero_grad.total.completed == nbe_total_zero_grad + be_total_zero_grad
assert optim_progress.optimizer.zero_grad.current.completed == be_total_zero_grad
nbe_sch_steps = stop_epoch
be_sch_steps = 0 # the current epoch did not complete
if n_optimizers > 1:
# assumes that the scheduler config is unchanged
# `* 1` because there is only one step-level scheduler
nbe_sch_steps = stop_epoch + nbe_stepping_batches + has_leftover_accumulation_batches * 1
# `0 +` for the epoch-level scheduler
be_sch_steps = 0 + be_stepping_batches
assert sch_progress.total.completed == nbe_sch_steps + be_sch_steps
assert sch_progress.current.completed == be_sch_steps
expected = {
"state_dict": ANY,
"epoch_progress": {
"total": {
"ready": stop_epoch + 1,
"started": stop_epoch + 1,
"processed": stop_epoch,
"completed": stop_epoch,
},
"current": {
"ready": stop_epoch + 1,
"started": stop_epoch + 1,
"processed": stop_epoch,
"completed": stop_epoch,
},
},
"epoch_loop.state_dict": ANY,
"epoch_loop.batch_progress": {
"total": {
"ready": nbe_batches_completed + be_batches_completed + 1,
"started": nbe_batches_completed + be_batches_completed + 1,
"processed": nbe_batches_completed + be_batches_completed,
"completed": nbe_batches_completed + be_batches_completed,
},
"current": {
"ready": stop_batch + 1,
"started": stop_batch + 1,
"processed": stop_batch,
"completed": stop_batch,
},
},
"epoch_loop.scheduler_progress": {
"total": {
"ready": nbe_sch_steps + be_sch_steps,
"started": None,
"processed": None,
"completed": nbe_sch_steps + be_sch_steps,
},
"current": {"ready": be_sch_steps, "started": None, "processed": None, "completed": be_sch_steps},
},
"epoch_loop.batch_loop.state_dict": ANY,
"epoch_loop.batch_loop.optimizer_loop.state_dict": {},
"epoch_loop.batch_loop.optimizer_loop.optim_progress": {
"optimizer_idx": stop_optimizer,
"optimizer": {
"step": {
"total": {
"ready": nbe_total_opt_steps + be_total_opt_steps + has_opt_stepped_in_be,
"started": None,
"processed": None,
"completed": nbe_total_opt_steps + be_total_opt_steps,
},
"current": {
"ready": be_total_opt_steps + has_opt_stepped_in_be,
"started": None,
"processed": None,
"completed": be_total_opt_steps,
},
},
"zero_grad": {
"total": {
"ready": nbe_total_zero_grad + be_total_zero_grad,
"started": nbe_total_zero_grad + be_total_zero_grad,
"processed": None,
"completed": nbe_total_zero_grad + be_total_zero_grad,
},
"current": {
"ready": be_total_zero_grad,
"started": be_total_zero_grad,
"processed": None,
"completed": be_total_zero_grad,
},
},
},
},
"epoch_loop.val_loop.state_dict": ANY,
"epoch_loop.val_loop.dataloader_progress": ANY,
"epoch_loop.val_loop.epoch_loop.state_dict": ANY,
"epoch_loop.val_loop.epoch_loop.batch_progress": ANY,
"epoch_loop.val_loop._results": ANY,
"epoch_loop._results": ANY,
}
assert checkpoint["loops"]["fit_loop"] == expected
trainer.fit_loop.load_state_dict(checkpoint["loops"]["fit_loop"], restart_progress=False)
state_dict = trainer.fit_loop.state_dict()
# need to remove these elements for comparison; comparing with `fit_loop.state_dict()` would require the
# fit loop to have an iterator, which is only available during training
checkpoint["loops"]["fit_loop"]["state_dict"]["dataloader_state_dict"] = ANY
assert state_dict == checkpoint["loops"]["fit_loop"]
trainer.fit_loop.load_state_dict(checkpoint["loops"]["fit_loop"])
state_dict = trainer.fit_loop.state_dict()
assert state_dict != checkpoint["loops"]["fit_loop"]
assert state_dict["epoch_progress"]["total"]["started"] == stop_epoch + 1
assert state_dict["epoch_progress"]["current"]["started"] == stop_epoch
| |
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""NaCl SDK tool SCons."""
import __builtin__
import re
import os
import shutil
import sys
import SCons.Scanner
import SCons.Script
import subprocess
import tempfile
NACL_TOOL_MAP = {
'arm': {
'32': {
'tooldir': 'arm-nacl',
'as_flag': '',
'cc_flag': '',
'ld_flag': '',
},
},
'x86': {
'32': {
'tooldir': 'i686-nacl',
'other_libdir': 'lib32',
'as_flag': '--32',
'cc_flag': '-m32',
'ld_flag': ' -melf_i386_nacl',
},
'64': {
'tooldir': 'x86_64-nacl',
'other_libdir': 'lib64',
'as_flag': '--64',
'cc_flag': '-m64',
'ld_flag': ' -melf_x86_64_nacl',
},
},
}
def _StubOutEnvToolsForBuiltElsewhere(env):
"""Stub out all tools so that they point to 'true'.
Some machines have their code built by another machine, they'll therefore
run 'true' instead of running the usual build tools.
Args:
env: The SCons environment in question.
"""
assert(env.Bit('built_elsewhere'))
env.Replace(CC='true', CXX='true', LINK='true', AR='true',
RANLIB='true', AS='true', ASPP='true', LD='true',
STRIP='true', OBJDUMP='true', PNACLOPT='true',
PNACLFINALIZE='true')
def _SetEnvForNativeSdk(env, sdk_path):
"""Initialize environment according to target architecture."""
bin_path = os.path.join(sdk_path, 'bin')
# NOTE: attempts to eliminate this PATH setting and use
# absolute path have been futile
env.PrependENVPath('PATH', bin_path)
tool_prefix = None
tool_map = NACL_TOOL_MAP[env['TARGET_ARCHITECTURE']]
subarch_spec = tool_map[env['TARGET_SUBARCH']]
tooldir = subarch_spec['tooldir']
# We need to pass it extra options for the subarch we are building.
as_mode_flag = subarch_spec['as_flag']
cc_mode_flag = subarch_spec['cc_flag']
ld_mode_flag = subarch_spec['ld_flag']
if os.path.exists(os.path.join(sdk_path, tooldir)):
# The tooldir for the build target exists.
# The tools there do the right thing without special options.
tool_prefix = tooldir
libdir = os.path.join(tooldir, 'lib')
else:
# We're building for a target for which there is no matching tooldir.
# For example, for x86-32 when only <sdk_path>/x86_64-nacl/ exists.
# Find a tooldir for a different subarch that does exist.
others_map = tool_map.copy()
del others_map[env['TARGET_SUBARCH']]
for subarch, tool_spec in others_map.iteritems():
tooldir = tool_spec['tooldir']
if os.path.exists(os.path.join(sdk_path, tooldir)):
# OK, this is the other subarch to use as tooldir.
tool_prefix = tooldir
# The lib directory may have an alternate name, i.e.
# 'lib32' in the x86_64-nacl tooldir.
libdir = os.path.join(tooldir, subarch_spec.get('other_libdir', 'lib'))
break
if tool_prefix is None:
raise Exception("Cannot find a toolchain for %s in %s" %
(env['TARGET_FULLARCH'], sdk_path))
cc = 'clang' if env.Bit('nacl_clang') else 'gcc'
cxx = 'clang++' if env.Bit('nacl_clang') else 'g++'
env.Replace(# Replace header and lib paths.
# where to put nacl extra sdk headers
# TODO(robertm): switch to using the mechanism that
# passes arguments to scons
NACL_SDK_INCLUDE='%s/%s/include' % (sdk_path, tool_prefix),
# where to find/put nacl generic extra sdk libraries
NACL_SDK_LIB='%s/%s' % (sdk_path, libdir),
# Replace the normal unix tools with the NaCl ones.
CC=os.path.join(bin_path, '%s-%s' % (tool_prefix, cc)),
CXX=os.path.join(bin_path, '%s-%s' % (tool_prefix, cxx)),
AR=os.path.join(bin_path, '%s-ar' % tool_prefix),
AS=os.path.join(bin_path, '%s-as' % tool_prefix),
ASPP=os.path.join(bin_path, '%s-%s' % (tool_prefix, cc)),
GDB=os.path.join(bin_path, '%s-gdb' % tool_prefix),
# NOTE: use g++ for linking so we can handle C AND C++.
LINK=os.path.join(bin_path, '%s-%s' % (tool_prefix, cxx)),
# Grrr... and sometimes we really need ld.
LD=os.path.join(bin_path, '%s-ld' % tool_prefix) + ld_mode_flag,
RANLIB=os.path.join(bin_path, '%s-ranlib' % tool_prefix),
NM=os.path.join(bin_path, '%s-nm' % tool_prefix),
OBJDUMP=os.path.join(bin_path, '%s-objdump' % tool_prefix),
STRIP=os.path.join(bin_path, '%s-strip' % tool_prefix),
ADDR2LINE=os.path.join(bin_path, '%s-addr2line' % tool_prefix),
BASE_LINKFLAGS=[cc_mode_flag],
BASE_CFLAGS=[cc_mode_flag],
BASE_CXXFLAGS=[cc_mode_flag],
BASE_ASFLAGS=[as_mode_flag],
BASE_ASPPFLAGS=[cc_mode_flag],
CFLAGS=['-std=gnu99'],
CCFLAGS=['-O3',
'-Werror',
'-Wall',
'-Wno-variadic-macros',
'-Wswitch-enum',
'-g',
'-fno-stack-protector',
'-fdiagnostics-show-option',
'-pedantic',
'-D__linux__',
],
ASFLAGS=[],
)
# NaClSdk environment seems to be inherited from the host environment.
# On Linux host, this probably makes sense. On Windows and Mac, this
# introduces nothing except problems.
# For now, simply override the environment settings as in
# <scons>/engine/SCons/Platform/posix.py
env.Replace(LIBPREFIX='lib',
LIBSUFFIX='.a',
SHLIBPREFIX='$LIBPREFIX',
SHLIBSUFFIX='.so',
LIBPREFIXES=['$LIBPREFIX'],
LIBSUFFIXES=['$LIBSUFFIX', '$SHLIBSUFFIX'],
)
# Force -fPIC when compiling for shared libraries.
env.AppendUnique(SHCCFLAGS=['-fPIC'],
)
def _SetEnvForPnacl(env, root):
# All the PNaCl tools require Python to be in the PATH.
arch = env['TARGET_FULLARCH']
assert arch in ['arm', 'mips32', 'x86-32', 'x86-64']
if env.Bit('pnacl_unsandboxed'):
if env.Bit('host_linux'):
arch = '%s-linux' % arch
elif env.Bit('host_mac'):
arch = '%s-mac' % arch
if env.Bit('nonsfi_nacl'):
arch += '-nonsfi'
arch_flag = ' -arch %s' % arch
ld_arch_flag = '' if env.Bit('pnacl_generate_pexe') else arch_flag
llc_mtriple_flag = ''
if env.Bit('minsfi'):
llc_cpu = ''
if env.Bit('build_x86_32'):
llc_cpu = 'i686'
elif env.Bit('build_x86_64'):
llc_cpu = 'x86_64'
if env.Bit('host_linux'):
llc_mtriple_flag = ' -mtriple=%s-linux-gnu' % llc_cpu
elif env.Bit('host_mac'):
llc_mtriple_flag = ' -mtriple=%s-apple-darwin' % llc_cpu
translator_root = os.path.join(os.path.dirname(root), 'pnacl_translator')
binprefix = os.path.join(root, 'bin', 'pnacl-')
binext = ''
if env.Bit('host_windows'):
binext = '.bat'
pnacl_ar = binprefix + 'ar' + binext
pnacl_as = binprefix + 'as' + binext
pnacl_nm = binprefix + 'nm' + binext
pnacl_ranlib = binprefix + 'ranlib' + binext
# Use the standalone sandboxed translator in sbtc mode
if env.Bit('use_sandboxed_translator'):
pnacl_translate = os.path.join(translator_root, 'bin',
'pnacl-translate' + binext)
else:
pnacl_translate = binprefix + 'translate' + binext
pnacl_cc = binprefix + 'clang' + binext
pnacl_cxx = binprefix + 'clang++' + binext
pnacl_ld = binprefix + 'ld' + binext
pnacl_disass = binprefix + 'dis' + binext
pnacl_finalize = binprefix + 'finalize' + binext
pnacl_opt = binprefix + 'opt' + binext
pnacl_strip = binprefix + 'strip' + binext
pnacl_llc = binprefix + 'llc' + binext
# NOTE: XXX_flags start with space for easy concatenation
# The flags generated here get baked into the commands (CC, CXX, LINK)
# instead of CFLAGS etc to keep them from getting blown away by some
# tests. Don't add flags here unless they always need to be preserved.
pnacl_cxx_flags = ''
pnacl_cc_flags = ' -std=gnu99'
pnacl_ld_flags = ' ' + ' '.join(env['PNACL_BCLDFLAGS'])
pnacl_translate_flags = ''
pnacl_llc_flags = ''
sdk_base = os.path.join(root, 'le32-nacl')
bias_flags = ''
# The supported use cases for nonpexe mode (IRT building, nonsfi) use biased
# bitcode and native calling conventions, so inject the --target= flags to
# get that by default. The one exception to that rule is PNaCl zerocost EH,
# so put the flags in BASE_{C,CXX,LINK}FLAGS rather than in the commands
# directly, so that the test can override them. In addition to using the
# flags, we have to point NACL_SDK_{LIB,INCLUDE} to the toolchain directories
# containing the biased bitcode libraries.
if not env.Bit('pnacl_generate_pexe') and env['TARGET_FULLARCH'] != 'mips32':
bias_flags = ' '.join(env.BiasedBitcodeFlags())
archdir = {'x86-32': 'i686', 'x86-64': 'x86_64', 'arm': 'arm'}
sdk_base = os.path.join(root, archdir[env['TARGET_FULLARCH']] + '_bc-nacl')
if env.Bit('nacl_pic'):
pnacl_cc_flags += ' -fPIC'
pnacl_cxx_flags += ' -fPIC'
# NOTE: this is a special hack for the pnacl backend which
# does more than linking
pnacl_ld_flags += ' -fPIC'
pnacl_translate_flags += ' -fPIC'
if env.Bit('minsfi'):
pnacl_llc_flags += ' -relocation-model=pic -filetype=obj'
pnacl_ld_flags += ' -nostdlib -Wl,-r -L' + os.path.join(root, 'usr', 'lib')
if env.Bit('use_sandboxed_translator'):
sb_flags = ' --pnacl-sb'
pnacl_ld_flags += sb_flags
pnacl_translate_flags += sb_flags
if env.Bit('x86_64_zero_based_sandbox'):
pnacl_translate_flags += ' -sfi-zero-based-sandbox'
env.Replace(# Replace header and lib paths.
NACL_SDK_INCLUDE=os.path.join(root, sdk_base, 'include'),
NACL_SDK_LIB=os.path.join(root, sdk_base, 'lib'),
# Remove arch-specific flags (if any)
BASE_LINKFLAGS=bias_flags,
BASE_CFLAGS=bias_flags,
BASE_CXXFLAGS=bias_flags,
BASE_ASFLAGS='',
BASE_ASPPFLAGS='',
# Replace the normal unix tools with the PNaCl ones.
CC=pnacl_cc + pnacl_cc_flags,
CXX=pnacl_cxx + pnacl_cxx_flags,
ASPP=pnacl_cc + pnacl_cc_flags,
LIBPREFIX="lib",
SHLIBPREFIX="lib",
SHLIBSUFFIX=".so",
OBJSUFFIX=".bc",
LINK=pnacl_cxx + ld_arch_flag + pnacl_ld_flags,
# Although we are currently forced to produce native output
# for LINK, we are free to produce bitcode for SHLINK
# (SharedLibrary linking) because scons doesn't do anything
# with shared libraries except use them with the toolchain.
SHLINK=pnacl_cxx + ld_arch_flag + pnacl_ld_flags,
LD=pnacl_ld,
AR=pnacl_ar,
AS=pnacl_as + ld_arch_flag,
RANLIB=pnacl_ranlib,
DISASS=pnacl_disass,
OBJDUMP=pnacl_disass,
STRIP=pnacl_strip,
TRANSLATE=pnacl_translate + arch_flag + pnacl_translate_flags,
PNACLFINALIZE=pnacl_finalize,
PNACLOPT=pnacl_opt,
LLC=pnacl_llc + llc_mtriple_flag + pnacl_llc_flags,
)
if env.Bit('built_elsewhere'):
def FakeInstall(dest, source, env):
print 'Not installing', dest
_StubOutEnvToolsForBuiltElsewhere(env)
env.Replace(INSTALL=FakeInstall)
if env.Bit('translate_in_build_step'):
env.Replace(TRANSLATE='true')
env.Replace(PNACLFINALIZE='true')
def PNaClForceNative(env):
assert(env.Bit('bitcode'))
if env.Bit('pnacl_generate_pexe'):
env.Replace(CC='NO-NATIVE-CC-INVOCATION-ALLOWED',
CXX='NO-NATIVE-CXX-INVOCATION-ALLOWED')
return
env.Replace(OBJSUFFIX='.o',
SHLIBSUFFIX='.so')
arch_flag = ' -arch ${TARGET_FULLARCH}'
if env.Bit('nonsfi_nacl'):
arch_flag += '-nonsfi'
cc_flags = ' --pnacl-allow-native --pnacl-allow-translate'
env.Append(CC=arch_flag + cc_flags,
CXX=arch_flag + cc_flags,
ASPP=arch_flag + cc_flags,
LINK=cc_flags) # Already has -arch
env['LD'] = 'NO-NATIVE-LD-INVOCATION-ALLOWED'
env['SHLINK'] = '${LINK}'
if env.Bit('built_elsewhere'):
_StubOutEnvToolsForBuiltElsewhere(env)
# Get an environment for nacl-gcc when in PNaCl mode.
def PNaClGetNNaClEnv(env):
assert(env.Bit('bitcode'))
assert(not env.Bit('build_mips32'))
# This is kind of a hack. We clone the environment,
# clear the bitcode bit, and then reload naclsdk.py
native_env = env.Clone()
native_env.ClearBits('bitcode')
if env.Bit('built_elsewhere'):
_StubOutEnvToolsForBuiltElsewhere(env)
else:
native_env = native_env.Clone(tools=['naclsdk'])
if native_env.Bit('pnacl_generate_pexe'):
native_env.Replace(CC='NO-NATIVE-CC-INVOCATION-ALLOWED',
CXX='NO-NATIVE-CXX-INVOCATION-ALLOWED')
else:
# These are unfortunately clobbered by running Tool.
native_env.Replace(EXTRA_CFLAGS=env['EXTRA_CFLAGS'],
EXTRA_CXXFLAGS=env['EXTRA_CXXFLAGS'],
CCFLAGS=env['CCFLAGS'],
CFLAGS=env['CFLAGS'],
CXXFLAGS=env['CXXFLAGS'])
return native_env
# This adds architecture specific defines for the target architecture.
# These are normally omitted by PNaCl.
# For example: __i686__, __arm__, __mips__, __x86_64__
def AddBiasForPNaCl(env, temporarily_allow=True):
assert(env.Bit('bitcode'))
# re: the temporarily_allow flag -- that is for:
# BUG= http://code.google.com/p/nativeclient/issues/detail?id=1248
if env.Bit('pnacl_generate_pexe') and not temporarily_allow:
env.Replace(CC='NO-NATIVE-CC-INVOCATION-ALLOWED',
CXX='NO-NATIVE-CXX-INVOCATION-ALLOWED')
return
if env.Bit('build_arm'):
bias_flag = '--pnacl-bias=arm'
elif env.Bit('build_x86_32'):
bias_flag = '--pnacl-bias=x86-32'
elif env.Bit('build_x86_64'):
bias_flag = '--pnacl-bias=x86-64'
elif env.Bit('build_mips32'):
bias_flag = '--pnacl-bias=mips32'
else:
raise Exception("Unknown architecture!")
if env.Bit('nonsfi_nacl'):
bias_flag += '-nonsfi'
env.AppendUnique(CCFLAGS=[bias_flag],
ASPPFLAGS=[bias_flag])
def ValidateSdk(env):
checkables = ['${NACL_SDK_INCLUDE}/stdio.h']
for c in checkables:
if os.path.exists(env.subst(c)):
continue
# Windows build does not use cygwin and so can not see nacl subdirectory
# if it's cygwin's symlink - check for /include instead...
if os.path.exists(re.sub(r'(nacl64|nacl)/include/([^/]*)$',
r'include/\2',
env.subst(c))):
continue
# TODO(pasko): remove the legacy header presence test below.
if os.path.exists(re.sub(r'nacl/include/([^/]*)$',
r'nacl64/include/\1',
env.subst(c))):
continue
message = env.subst('''
ERROR: NativeClient toolchain does not seem present!,
Missing: %s
Configuration is:
NACL_SDK_INCLUDE=${NACL_SDK_INCLUDE}
NACL_SDK_LIB=${NACL_SDK_LIB}
CC=${CC}
CXX=${CXX}
AR=${AR}
AS=${AS}
ASPP=${ASPP}
LINK=${LINK}
RANLIB=${RANLIB}
Run: gclient runhooks --force or build the SDK yourself.
''' % c)
sys.stderr.write(message + "\n\n")
sys.exit(-1)
def ScanLinkerScript(node, env, libpath):
"""SCons scanner for linker script files.
This handles trivial linker scripts like those used for libc.so and libppapi.a.
These scripts just indicate more input files to be linked in, so we want
to produce dependencies on them.
A typical such linker script looks like:
/* Some comments. */
INPUT ( foo.a libbar.a libbaz.a )
or:
/* GNU ld script
Use the shared library, but some functions are only in
the static library, so try that secondarily. */
OUTPUT_FORMAT(elf64-x86-64)
GROUP ( /lib/libc.so.6 /usr/lib/libc_nonshared.a
AS_NEEDED ( /lib/ld-linux-x86-64.so.2 ) )
"""
contents = node.get_text_contents()
if contents.startswith('!<arch>\n') or contents.startswith('\177ELF'):
# An archive or ELF file is not a linker script.
return []
comment_pattern = re.compile(r'/\*.*?\*/', re.DOTALL | re.MULTILINE)
def remove_comments(text):
return re.sub(comment_pattern, '', text)
tokens = remove_comments(contents).split()
libs = []
while tokens:
token = tokens.pop()
if token.startswith('OUTPUT_FORMAT('):
pass
elif token == 'OUTPUT_FORMAT':
# Swallow the next three tokens: '(', 'xyz', ')'
del tokens[0:2]
elif token in ['(', ')', 'INPUT', 'GROUP', 'AS_NEEDED']:
pass
else:
libs.append(token)
# Find those items in the library path, ignoring ones we fail to find.
found = [SCons.Node.FS.find_file(lib, libpath) for lib in libs]
return [lib for lib in found if lib is not None]
# This is a modified copy of the class TempFileMunge in
# third_party/scons-2.0.1/engine/SCons/Platform/__init__.py.
# It differs in using quote_for_at_file (below) in place of
# SCons.Subst.quote_spaces.
class NaClTempFileMunge(object):
"""A callable class. You can set an Environment variable to this,
then call it with a string argument, then it will perform temporary
file substitution on it. This is used to circumvent the long command
line limitation.
Example usage:
env["TEMPFILE"] = TempFileMunge
env["LINKCOM"] = "${TEMPFILE('$LINK $TARGET $SOURCES')}"
By default, the name of the temporary file used begins with a
prefix of '@'. This may be configred for other tool chains by
setting '$TEMPFILEPREFIX'.
env["TEMPFILEPREFIX"] = '-@' # diab compiler
env["TEMPFILEPREFIX"] = '-via' # arm tool chain
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature):
if for_signature:
# If we're being called for signature calculation, it's
# because we're being called by the string expansion in
# Subst.py, which has the logic to strip any $( $) that
# may be in the command line we squirreled away. So we
# just return the raw command line and let the upper
# string substitution layers do their thing.
return self.cmd
# Now we're actually being called because someone is actually
# going to try to execute the command, so we have to do our
# own expansion.
cmd = env.subst_list(self.cmd, SCons.Subst.SUBST_CMD, target, source)[0]
try:
maxline = int(env.subst('$MAXLINELENGTH'))
except ValueError:
maxline = 2048
length = 0
for c in cmd:
length += len(c)
if length <= maxline:
return self.cmd
# We do a normpath because mktemp() has what appears to be
# a bug in Windows that will use a forward slash as a path
# delimiter. Windows's link mistakes that for a command line
# switch and barfs.
#
# We use the .lnk suffix for the benefit of the Phar Lap
# linkloc linker, which likes to append an .lnk suffix if
# none is given.
(fd, tmp) = tempfile.mkstemp('.lnk', text=True)
native_tmp = SCons.Util.get_native_path(os.path.normpath(tmp))
if env['SHELL'] and env['SHELL'] == 'sh':
# The sh shell will try to escape the backslashes in the
# path, so unescape them.
native_tmp = native_tmp.replace('\\', r'\\\\')
# In Cygwin, we want to use rm to delete the temporary
# file, because del does not exist in the sh shell.
rm = env.Detect('rm') or 'del'
else:
# Don't use 'rm' if the shell is not sh, because rm won't
# work with the Windows shells (cmd.exe or command.com) or
# Windows path names.
rm = 'del'
prefix = env.subst('$TEMPFILEPREFIX')
if not prefix:
prefix = '@'
# The @file is sometimes handled by a GNU tool itself, using
# the libiberty/argv.c code, and sometimes handled implicitly
# by Cygwin before the tool's own main even sees it. These
# two treat the contents differently, so there is no single
# perfect way to quote. The libiberty @file code uses a very
# regular scheme: a \ in any context is always swallowed and
# quotes the next character, whatever it is; '...' or "..."
# quote whitespace in ... and the outer quotes are swallowed.
# The Cygwin @file code uses a vaguely similar scheme, but its
# treatment of \ is much less consistent: a \ outside a quoted
# string is never stripped, and a \ inside a quoted string is
# only stripped when it quoted something (Cygwin's definition
# of "something" here is nontrivial). In our uses the only
# appearances of \ we expect are in Windows-style file names.
# Fortunately, an extra doubling of \\ that doesn't get
# stripped is harmless in the middle of a file name.
def quote_for_at_file(s):
s = str(s)
if ' ' in s or '\t' in s:
return '"' + re.sub('([ \t"])', r'\\\1', s) + '"'
return s.replace('\\', '\\\\')
args = list(map(quote_for_at_file, cmd[1:]))
os.write(fd, " ".join(args) + "\n")
os.close(fd)
# XXX Using the SCons.Action.print_actions value directly
# like this is bogus, but expedient. This class should
# really be rewritten as an Action that defines the
# __call__() and strfunction() methods and lets the
# normal action-execution logic handle whether or not to
# print/execute the action. The problem, though, is all
# of that is decided before we execute this method as
# part of expanding the $TEMPFILE construction variable.
# Consequently, refactoring this will have to wait until
# we get more flexible with allowing Actions to exist
# independently and get strung together arbitrarily like
# Ant tasks. In the meantime, it's going to be more
# user-friendly to not let obsession with architectural
# purity get in the way of just being helpful, so we'll
# reach into SCons.Action directly.
if SCons.Action.print_actions:
print("Using tempfile "+native_tmp+" for command line:\n"+
str(cmd[0]) + " " + " ".join(args))
return [ cmd[0], prefix + native_tmp + '\n' + rm, native_tmp ]
def generate(env):
"""SCons entry point for this tool.
Args:
env: The SCons environment in question.
NOTE: SCons requires the use of this name, which fails lint.
"""
# make these methods to the top level scons file
env.AddMethod(ValidateSdk)
env.AddMethod(AddBiasForPNaCl)
env.AddMethod(PNaClForceNative)
env.AddMethod(PNaClGetNNaClEnv)
# Invoke the various unix tools that the NativeClient SDK resembles.
env.Tool('g++')
env.Tool('gcc')
env.Tool('gnulink')
env.Tool('ar')
env.Tool('as')
if env.Bit('pnacl_generate_pexe'):
suffix = '.nonfinal.pexe'
else:
suffix = '.nexe'
env.Replace(
COMPONENT_LINKFLAGS=[''],
COMPONENT_LIBRARY_LINK_SUFFIXES=['.pso', '.so', '.a'],
_RPATH='',
COMPONENT_LIBRARY_DEBUG_SUFFIXES=[],
PROGSUFFIX=suffix,
# adding BASE_ AND EXTRA_ flags to common command lines
# The suggested usage pattern is:
# BASE_XXXFLAGS can only be set in this file
# EXTRA_XXXFLAGS can only be set in a ComponentXXX call
# NOTE: we also have EXTRA_LIBS which is handles separately in
# site_scons/site_tools/component_builders.py
# NOTE: the command lines were gleaned from:
# * ../third_party/scons-2.0.1/engine/SCons/Tool/cc.py
# * ../third_party/scons-2.0.1/engine/SCons/Tool/c++.py
# * etc.
CCCOM='$CC $BASE_CFLAGS $CFLAGS $EXTRA_CFLAGS ' +
'$CCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
SHCCCOM='$SHCC $BASE_CFLAGS $SHCFLAGS $EXTRA_CFLAGS ' +
'$SHCCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
CXXCOM='$CXX $BASE_CXXFLAGS $CXXFLAGS $EXTRA_CXXFLAGS ' +
'$CCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
SHCXXCOM='$SHCXX $BASE_CXXFLAGS $SHCXXFLAGS $EXTRA_CXXFLAGS ' +
'$SHCCFLAGS $_CCCOMCOM -c -o $TARGET $SOURCES',
LINKCOM='$LINK $BASE_LINKFLAGS $LINKFLAGS $EXTRA_LINKFLAGS ' +
'$SOURCES $_LIBDIRFLAGS $_LIBFLAGS -o $TARGET',
SHLINKCOM='$SHLINK $BASE_LINKFLAGS $SHLINKFLAGS $EXTRA_LINKFLAGS ' +
'$SOURCES $_LIBDIRFLAGS $_LIBFLAGS -o $TARGET',
ASCOM='$AS $BASE_ASFLAGS $ASFLAGS $EXTRA_ASFLAGS -o $TARGET $SOURCES',
ASPPCOM='$ASPP $BASE_ASPPFLAGS $ASPPFLAGS $EXTRA_ASPPFLAGS ' +
'$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES',
# Strip doesn't seem to be a first-class citizen in SCons country,
# so we have to add these *COM, *COMSTR manually.
# Note: it appears we cannot add this in component_setup.py
STRIPFLAGS=['--strip-all'],
STRIPCOM='${STRIP} ${STRIPFLAGS}',
TRANSLATECOM='${TRANSLATE} ${TRANSLATEFLAGS} ${SOURCES} -o ${TARGET}',
PNACLFINALIZEFLAGS=[],
PNACLFINALIZECOM='${PNACLFINALIZE} ${PNACLFINALIZEFLAGS} ' +
'${SOURCES} -o ${TARGET}',
)
# Windows has a small limit on the command line size. The linking and AR
# commands can get quite large. So bring in the SCons machinery to put
# most of a command line into a temporary file and pass it with
# @filename, which works with gcc.
if env['PLATFORM'] in ['win32', 'cygwin']:
env['TEMPFILE'] = NaClTempFileMunge
for com in ['LINKCOM', 'SHLINKCOM', 'ARCOM']:
env[com] = "${TEMPFILE('%s')}" % env[com]
# Get root of the SDK.
root = env.GetToolchainDir()
# if bitcode=1 use pnacl toolchain
if env.Bit('bitcode'):
_SetEnvForPnacl(env, root)
elif env.Bit('built_elsewhere'):
def FakeInstall(dest, source, env):
print 'Not installing', dest
_StubOutEnvToolsForBuiltElsewhere(env)
env.Replace(INSTALL=FakeInstall)
else:
_SetEnvForNativeSdk(env, root)
if (env.Bit('bitcode') or env.Bit('nacl_clang')) and env.Bit('build_x86'):
# Get GDB from the nacl-gcc toolchain even when using PNaCl.
# TODO(mseaborn): We really want the nacl-gdb binary to be in a
# separate tarball from the nacl-gcc toolchain, then this step
# will not be necessary.
# See http://code.google.com/p/nativeclient/issues/detail?id=2773
temp_env = env.Clone()
temp_env.ClearBits('bitcode', 'nacl_clang')
temp_root = temp_env.GetToolchainDir()
_SetEnvForNativeSdk(temp_env, temp_root)
env.Replace(GDB=temp_env['GDB'])
env.Prepend(LIBPATH='${NACL_SDK_LIB}')
# Install our scanner for (potential) linker scripts.
# It applies to "source" files ending in .a or .so.
# Dependency files it produces are to be found in ${LIBPATH}.
# It is applied recursively to those dependencies in case
# some of them are linker scripts too.
ldscript_scanner = SCons.Scanner.Base(
function=ScanLinkerScript,
skeys=['.a', '.so', '.pso'],
path_function=SCons.Scanner.FindPathDirs('LIBPATH'),
recursive=True
)
env.Append(SCANNERS=ldscript_scanner)
# Scons tests can check this version number to decide whether to
# enable tests for toolchain bug fixes or new features. See
# description in pnacl/build.sh.
if 'toolchain_feature_version' in SCons.Script.ARGUMENTS:
version = int(SCons.Script.ARGUMENTS['toolchain_feature_version'])
else:
version_file = os.path.join(root, 'FEATURE_VERSION')
# There is no pnacl_newlib toolchain on ARM, only a pnacl_translator, so
# use that if necessary. Otherwise use it if we are doing sandboxed
# translation.
if not os.path.exists(version_file) or env.Bit('use_sandboxed_translator'):
version_file = os.path.join(os.path.dirname(root), 'pnacl_translator',
'FEATURE_VERSION')
if os.path.exists(version_file):
with open(version_file, 'r') as fh:
version = int(fh.read())
else:
version = 0
env.Replace(TOOLCHAIN_FEATURE_VERSION=version)
| |
import pyCGM_Single.pyCGM as pyCGM
import pytest
import numpy as np
rounding_precision = 8
class TestUtils():
"""
This class tests the utils functions in pyCGM.py:
findwandmarker
cross
norm2d
norm3d
normDiv
matrixmult
rotmat
"""
rand_coor = [np.random.randint(0, 10), np.random.randint(0, 10), np.random.randint(0, 10)]
@pytest.mark.parametrize(["frame", "thorax", "expected"], [
({'RSHO': [428.88476562, 270.552948, 1500.73010254], 'LSHO': [68.24668121, 269.01049805, 1510.1072998]}, [[[256.23991128535846, 365.30496976939753, 1459.662169500559], rand_coor, rand_coor], [256.149810236564, 364.3090603933987, 1459.6553639290375]], [[255.92550222678443, 364.3226950497605, 1460.6297868417887], [256.42380097331767, 364.27770361353487, 1460.6165849382387]]),
({'RSHO': [0, 0, 1], 'LSHO': [0, 1, 0]}, [[[1, 0, 0], rand_coor, rand_coor], [0, 0, 0]], [[0, 1, 0], [0, 0, 1]]),
({'RSHO': [0, 1, 1], 'LSHO': [1, 1, 1]}, [[[1, 0, 0], rand_coor, rand_coor], [0, 0, 0]], [[0, 0.70710678, -0.70710678], [0, -0.70710678, 0.70710678]]),
({'RSHO': [0, 1, 1], 'LSHO': [1, 1, 1]}, [[[1, 0, 0], rand_coor, rand_coor], [-1, 0, 0]], [[-1, 0.70710678, -0.70710678], [-1, -0.70710678, 0.70710678]]),
({'RSHO': [1, 2, 1], 'LSHO': [2, 1, 2]}, [[[1, 0, 0], rand_coor, rand_coor], [0, 0, 0]], [[0, 0.4472136, -0.89442719], [0, -0.89442719, 0.4472136]]),
({'RSHO': [1, 2, 1], 'LSHO': [2, 2, 2]}, [[[1, 0, 0], rand_coor, rand_coor], [0, 0, 0]], [[0, 0.4472136, -0.89442719], [0, -0.70710678, 0.70710678]]),
({'RSHO': [1, 2, 2], 'LSHO': [2, 1, 2]}, [[[1, 0, 0], rand_coor, rand_coor], [0, 0, 0]], [[0, 0.70710678, -0.70710678], [0, -0.89442719, 0.4472136]]),
({'RSHO': [1, 1, 1], 'LSHO': [1, 1, 1]}, [[[1, 0, 1], rand_coor, rand_coor], [0, 0, 0]], [[0.70710678, 0, -0.70710678], [-0.70710678, 0, 0.70710678]]),
({'RSHO': [1, 1, 1], 'LSHO': [1, 1, 1]}, [[[1, 0, 1], rand_coor, rand_coor], [0, 0, 1]], [[0, 0, 0], [0, 0, 2]]),
({'RSHO': [0, 1, 0], 'LSHO': [0, 0, -1]}, [[[0, 3, 4], rand_coor, rand_coor], [0, 0, 0]], [[1, 0, 0], [-1, 0, 0]]),
({'RSHO': [1, 0, 0], 'LSHO': [0, 1, 0]}, [[[7, 0, 24], rand_coor, rand_coor], [0, 0, 0]], [[0, -1, 0], [-0.96, 0, 0.28]]),
({'RSHO': [1, 0, 0], 'LSHO': [0, 0, 1]}, [[[8, 0, 6], rand_coor, rand_coor], [8, 0, 0]], [[8, 1, 0], [8, -1, 0]])])
def test_findwandmarker(self, frame, thorax, expected):
"""
This test provides coverage of the findwandmarker function in pyCGM.py, defined as findwandmarker(frame,thorax)
where frame is a dictionary of x, y, z positions and marker names and thorax is the thorax axis and origin.
The function takes in the xyz position of the Right Shoulder and Left Shoulder markers, as well as the thorax
frame, which is a list of [ xyz axis vectors, origin ]. The wand marker position is returned as a 2x3 array
containing the right wand marker x, y, z positions (1x3) followed by the left wand marker x, y, z positions
(1x3). The thorax axis is provided in global coordinates, which are subtracted inside the function to define
the unit vectors.
For the Right and Left wand markers, the function performs the same calculation, with the difference being the
corresponding sides marker. Each wand marker is defined as the cross product between the unit vector of the
x axis of the thorax frame, and the unit vector from the thorax frame origin to the Shoulder marker.
Given a marker SHO, representing the right (RSHO) or left (LSHO) shoulder markers and a thorax axis TH, the
wand marker W is defined as:
.. math::
W_R = (RSHO-TH_o) \times TH_x
W_L = TH_x \times (LSHO-TH_o)
where :math:`TH_o` is the origin of the thorax axis, :math:`TH_x` is the x unit vector of the thorax axis.
From this calculation, it should be clear that changing the thorax y and z vectors should not have an impact
on the results.
This unit test ensure that:
- The right and left markers do not impact the wand marker calculations for one another
- The function requires global positions
- The thorax y and z axis do not change the results
"""
result = pyCGM.findwandmarker(frame, thorax)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_findwandmarker_datatypes(self):
"""
This test provides coverage of the findwandmarker function in pyCGM.py, defined as findwandmarker(frame,thorax)
where frame is a dictionary of x, y, z positions and marker names and thorax is the thorax axis.
This test checks that the resulting output from calling cross is correct when called with ints or floats.
"""
frame_int = {'RSHO': [1, 0, 0], 'LSHO': [0, 0, 1]}
frame_float = {'RSHO': [1.0, 0.0, 0.0], 'LSHO': [0.0, 0.0, 1.0]}
thorax_int = [[[8, 0, 6], [0, 0, 0], [0, 0, 0]], [8, 0, 0]]
thorax_float = [[[8.0, 0.0, 6.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [8.0, 0.0, 0.0]]
expected = [[8, 1, 0], [8, -1, 0]]
# Check that calling findwandmarker yields the expected results when frame and thorax consist of ints
result_int_list = pyCGM.findwandmarker(frame_int, thorax_int)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check that calling findwandmarker yields the expected results when frame and thorax consist of floats
result_float_list = pyCGM.findwandmarker(frame_float, thorax_float)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
@pytest.mark.parametrize(["a", "b", "expected"], [
([0.13232936, 0.98562946, -0.10499292], [-0.99119134, 0.13101088, -0.01938735], [-0.005353527183234709, 0.10663358915485248, 0.994283972218527]),
([0, 0, 0], [0, 0, 0], [0, 0, 0]),
([1, 1, 1], [1, 1, 1], [0, 0, 0]),
([0, 0, -2], [0, 4, 0], [8, 0, 0]),
([0, 0, 4], [-0.5, 0, 0], [0, -2, 0]),
([-1.5, 0, 0], [0, 4, 0], [0, 0, -6]),
([1, 0, 1], [0, 1, 0], [-1, 0, 1]),
([1, 2, 3], [3, 2, 1], [-4, 8, -4]),
([-2, 3, 1], [4, -1, 5], [16, 14, -10])
])
def test_cross(self, a, b, expected):
"""
This test provides coverage of the cross function in pyCGM.py, defined as cross(a, b) where a and b are both 3D vectors.
This test takes 3 parameters:
a: 3D vector
b: 3D vector
expected: the expected result from calling cross on a and b. This result is the cross product of the vectors
a and b.
"""
result = pyCGM.cross(a, b)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_cross_datatypes(self):
"""
This test provides coverage of the cross function in pyCGM.py, defined as cross(a, b) where a and b are both 3D vectors.
This test checks that the resulting output from calling cross is correct when called with a list of ints, a numpy
array of ints, a list of floats, and a numpy array of floats.
"""
A_int = [-2, 3, 1]
A_float = [-2.0, 3.0, 1.0]
B_int = [4, -1, 5]
B_float = [4.0, -1.0, 5.0]
expected = [16, 14, -10]
# Check the calling cross on a list of ints yields the expected results
result_int_list = pyCGM.cross(A_int, B_int)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check the calling cross on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.cross(np.array(A_int, dtype='int'), np.array(B_int, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check the calling cross on a list of floats yields the expected results
result_float_list = pyCGM.cross(A_float, B_float)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check the calling cross on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.cross(np.array(A_float, dtype='float'), np.array(B_float, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
@pytest.mark.parametrize(["v", "expected"], [
([-9944.089508486479, -20189.20612828088, 150.42955108569652], 22505.812344655435),
([0, 0, 0], 0),
([2, 0, 0], 2),
([0, 0, -1], 1),
([0, 3, 4], 5),
([-3, 0, 4], 5),
([6, -8, 0], 10),
([-5, 0, -12], 13),
([1, -1, np.sqrt(2)], 2)])
def test_norm2d(self, v, expected):
"""
This test provides coverage of the norm2d function in pyCGM.py, defined as norm2d(v) where v is a 3D vector.
This test takes 2 parameters:
v: 3D vector
expected: the expected result from calling norm2d on v. This will be the value of the normalization of vector v,
returned as a float.
Given the vector v, the normalization is defined by:
normalization = :math:`\sqrt{v_x^2 + v_y^2 + v_z^2}`
where :math:`v_x` is the x-coordinate of the vector v
"""
result = pyCGM.norm2d(v)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_norm2d_datatypes(self):
"""
This test provides coverage of the norm2d function in pyCGM.py, defined as norm2d(v) where v is a 3D vector.
This test checks that the resulting output from calling norm2d is correct when called with a list of ints, a
numpy array of ints, a list of floats, and a numpy array of floats.
"""
v_int = [6, 0, -8]
v_float = [6.0, 0, -8.0]
expected = 10
# Check the calling norm2d on a list of ints yields the expected results
result_int_list = pyCGM.norm2d(v_int)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check the calling norm2d on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.norm2d(np.array(v_int, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check the calling norm2d on a list of floats yields the expected results
result_float_list = pyCGM.norm2d(v_float)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check the calling norm2d on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.norm2d(np.array(v_float, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
@pytest.mark.parametrize(["v", "expected"], [
([-212.5847168, 28.09841919, -4.15808105], np.array(214.47394390603984)),
([0, 0, 0], np.array(0)),
([2, 0, 0], np.array(2)),
([0, 0, -1], np.array(1)),
([0, 3, 4], np.array(5)),
([-3, 0, 4], np.array(5)),
([-6, 8, 0], np.array(10)),
([-5, 0, -12], np.array(13)),
([1, -1, np.sqrt(2)], np.array(2))])
def test_norm3d(self, v, expected):
"""
This test provides coverage of the norm3d function in pyCGM.py, defined as norm3d(v) where v is a 3D vector.
This test takes 2 parameters:
v: 3D vector
expected: the expected result from calling norm3d on v. This will be the normalization of the vector v,
inside of a numpy array.
Given the vector v, the normalization is defined by:
normalization = :math:`\sqrt{v_x^2 + v_y^2 + v_z^2}`
where :math:`v_x` is the x-coordinate of the vector v
"""
result = pyCGM.norm3d(v)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_norm3d_datatypes(self):
"""
This test provides coverage of the norm3d function in pyCGM.py, defined as norm3d(v) where v is a 3D vector.
This test checks that the resulting output from calling norm3d is correct when called with a list of ints, a
numpy array of ints, a list of floats, and a numpy array of floats.
"""
v_int = [-6, 0, 8]
v_float = [-6.0, 0, 8.0]
expected = np.array(10)
# Check the calling norm3d on a list of ints yields the expected results
result_int_list = pyCGM.norm3d(v_int)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check the calling norm3d on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.norm3d(np.array(v_int, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check the calling norm3d on a list of floats yields the expected results
result_float_list = pyCGM.norm3d(v_float)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check the calling norm3d on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.norm3d(np.array(v_float, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
@pytest.mark.parametrize(["v", "expected"], [
([-212.5847168, 28.09841919, -4.15808105], np.array([-4.62150006e-03, 6.10847515e-04, -9.03948887e-05])),
([0, 0, 0], np.array([np.nan, np.nan, np.nan])),
([2, 0, 0], np.array([0.5, 0, 0])),
([0, 0, -1], np.array([0, 0, -1])),
([0, 3, 4], np.array([0, 0.12, 0.16])),
([-3, 0, 4], np.array([-0.12, 0, 0.16])),
([-6, 8, 0], np.array([-0.06, 0.08, 0])),
([-5, 0, -12], np.array([-0.0295858, 0, -0.07100592])),
([1, -1, np.sqrt(2)], np.array([0.25, -0.25, 0.35355339]))])
def test_normDiv(self, v, expected):
"""
This test provides coverage of the normDiv function in pyCGM.py, defined as normDiv(v) where v is a 3D vector.
This test takes 2 parameters:
v: 3D vector
expected: the expected result from calling norm3d on v. This function returns the wrong result. It is supposed
to return the normalization division, but in the function divides the vector by the normalization twice.
Given the vector v, the normalization is defined by:
normalization = :math:`\sqrt{v_x^2 + v_y^2 + v_z^2}`
where :math:`v_x is the x-coordinate of the vector v
The mathematically correct result would be defined by:
.. math::
\[ result = [\frac{v_x}{norm}, \frac{v_y}{norm}, \frac{v_z}{norm}] \]
But this function has an error where it divides the vector twice:
.. math::
\[ result = [\frac{v_x}{norm^2}, \frac{v_y}{norm^2}, \frac{v_z}{norm^2}] \]
"""
result = pyCGM.normDiv(v)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_normDiv_datatypes(self):
"""
This test provides coverage of the normDiv function in pyCGM.py, defined as normDiv(v) where v is a 3D vector.
This test checks that the resulting output from calling normDiv is correct when called with a list of ints, a
numpy array of ints, a list of floats, and a numpy array of floats.
"""
v_int = [-6, 0, 8]
v_float = [-6.0, 0, 8.0]
expected = np.array([-0.06, 0, 0.08])
# Check the calling normDiv on a list of ints yields the expected results
result_int_list = pyCGM.normDiv(v_int)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check the calling normDiv on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.normDiv(np.array(v_int, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check the calling normDiv on a list of floats yields the expected results
result_float_list = pyCGM.normDiv(v_float)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check the calling normDiv on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.normDiv(np.array(v_float, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
@pytest.mark.parametrize(["A", "B", "expected"], [
([[1, 0, 0], [0, 1.0, -0.0], [0, 0.0, 1.0]], [[1.0, 0, 0.0], [0, 1, 0], [-0.0, 0, 1.0]], [[1.0, 0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
([[1]], [[1]], [[1]]),
# Invalid matrix dimensions
([[1, 2]], [[1]], [[1]]),
([[2], [1]], [[1, 2]], [[2, 4], [1, 2]]),
# Invalid matrix dimensions
([[1, 2, 0], [0, 1, 2]], [[2, 1], [1, 4]], [[4, 9], [1, 4]]),
([[11, 12, 13], [14, 15, 16]], [[1, 2], [3, 4], [5, 6]], [[112, 148], [139, 184]]),
([[1, 2, 3], [4, 5, 6]], [[7, 8], [9, 10], [11, 12]], [[58, 64], [139, 154]])])
def test_matrixmult(self, A, B, expected):
"""
This test provides coverage of the matrixmult function in pyCGM.py, defined as matrixmult(a, b)
where a and b are both lists that represent a matrix to be multiplied.
This test takes 3 parameters:
A: a matrix, 2D array format
B: a matrix, 2D array format
expected: the expected matrix from calling matrixmult on A and B. This is the result of multiplying the two
matrices A and B. It gives the correct result for multiplying two valid matrices, but still gives a result
in some cases when the two matrices can't be multiplied. For two matrices to be multiplied, len(A[0]) need to
be equal to len(B), but this function gives an output even when this isn't true
"""
result = pyCGM.matrixmult(A, B)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_matrixmult_datatypes(self):
"""
This test provides coverage of the matrixmult function in pyCGM.py, defined as matrixmult(a, b)
where a and b are both lists that represent a matrix to be multiplied.
This test checks that the resulting output from calling matrixmult is correct when called with a list of ints,
a numpy array of ints, a list of floats, and a numpy array of floats.
"""
A_int = [[1, 2, 0], [0, 1, 2]]
B_int = [[2, 1], [1, 4]]
A_float = [[1.0, 2.0, 0.0], [0.0, 1.0, 2.0]]
B_float = [[2.0, 1.0], [1.0, 4.0]]
expected = [[4, 9], [1, 4]]
# Check the calling matrixmult on a list of ints yields the expected results
result_int_list = pyCGM.matrixmult(A_int, B_int)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check the calling matrixmult on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.matrixmult(np.array(A_int, dtype='int'), np.array(B_int, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check the calling matrixmult on a list of floats yields the expected results
result_float_list = pyCGM.matrixmult(A_float, B_float)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check the calling matrixmult on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.matrixmult(np.array(A_float, dtype='float'), np.array(B_float, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
@pytest.mark.parametrize(["x", "y", "z", "expected"], [
(0.0, 0.0, 180, [[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]]),
(0, 0, 0, [[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
(90, 0, 0, [[1, 0, 0], [0, 0, -1], [0, 1, 0]]),
(0, 135, 0, [[-0.70710678, 0, 0.70710678], [0, 1, 0], [-0.70710678, 0, -0.70710678]]),
(0, 0, -60, [[0.5, 0.8660254, 0], [-0.8660254, 0.5, 0], [0, 0, 1]]),
(90, 0, 90, [[0, -1, 0], [0, 0, -1], [1, 0, 0]]),
(0, 150, -30, [[-0.75, -0.4330127, 0.5], [-0.5, 0.8660254, 0], [-0.4330127, -0.25, -0.8660254]]),
(90, 180, -90, [[0, -1, 0], [0, 0, 1], [-1, 0, 0]])])
def test_rotmat(self, x, y, z, expected):
"""
This test provides coverage of the rotmat function in pyCGM.py, defined as rotmat(x, y, z)
where x, y, and z are all floats that represent the angle of rotation in a particular dimension.
This test takes 4 parameters:
x: angle to be rotated in the x axis
y: angle to be rotated in the y axis
z: angle to be rotated in the z axis
expected: the expected rotation matrix from calling rotmat on x, y, and z. This will be a transformation
matrix that can be used to perform a rotation in the x, y, and z directions at the values inputted.
"""
result = pyCGM.rotmat(x, y, z)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_rotmat_datatypes(self):
"""
This test provides coverage of the rotmat function in pyCGM.py, defined as rotmat(x, y, z)
where x, y, and z are all floats that represent the angle of rotation in a particular dimension.
This test checks that the resulting output from calling rotmat is correct when called with ints or floats.
"""
result_int = pyCGM.rotmat(0, 150, -30)
result_float = pyCGM.rotmat(0.0, 150.0, -30.0)
expected = [[-0.75, -0.4330127, 0.5], [-0.5, 0.8660254, 0], [-0.4330127, -0.25, -0.8660254]]
np.testing.assert_almost_equal(result_int, expected, rounding_precision)
np.testing.assert_almost_equal(result_float, expected, rounding_precision)
| |
# generate generates a new recipe scraper.
import ast
import requests
import sys
from recipe_scrapers._utils import get_host_name
from recipe_scrapers._abstract import HEADERS
template_class_name = "Template"
template_host_name = "example.com"
def generate_scraper(class_name, host_name):
with open("templates/scraper.py") as source:
code = source.read()
program = ast.parse(code)
state = GenerateScraperState(class_name, host_name, code)
for node in ast.walk(program):
if not state.step(node):
break
output = f"recipe_scrapers/{class_name.lower()}.py"
with open(output, "w") as target:
target.write(state.result())
def generate_scraper_test(class_name, host_name):
with open("templates/test_scraper.py") as source:
code = source.read()
program = ast.parse(code)
state = GenerateTestScraperState(class_name, host_name, code)
for node in ast.walk(program):
if not state.step(node):
break
output = f"tests/test_{class_name.lower()}.py"
with open(output, "w") as target:
target.write(state.result())
def init_scraper(class_name):
with open("recipe_scrapers/__init__.py", "r+") as source:
code = source.read()
program = ast.parse(code)
state = InitScraperState(class_name, code)
for node in ast.walk(program):
if not state.step(node):
break
source.seek(0)
source.write(state.result())
source.truncate()
def generate_test_data(class_name, content):
output = f"tests/test_data/{class_name.lower()}.testhtml"
with open(output, "wb") as target:
target.write(content)
class ScraperState:
def __init__(self, code):
self.code = code
self.line_offsets = get_line_offsets(code)
self.replacer = Replacer(code)
def result(self):
return self.replacer.result()
def _offset(self, node):
return self.line_offsets[node.lineno - 1] + node.col_offset
def _replace(self, replacement_text, start, length):
self.replacer.replace(replacement_text, start, length)
class GenerateScraperState(ScraperState):
def __init__(self, class_name, host_name, code):
super().__init__(code)
self.class_name = class_name
self.host_name = host_name
def step(self, node):
if isinstance(node, ast.ClassDef) and node.name == template_class_name:
offset = self._offset(node)
segment_end = self.code.index(template_class_name, offset)
self._replace(self.class_name, segment_end, len(template_class_name))
if isinstance(node, ast.Constant) and node.value == template_host_name:
offset = self._offset(node)
segment_end = self.code.index(template_host_name, offset)
self._replace(self.host_name, segment_end, len(template_host_name))
return True
class GenerateTestScraperState(ScraperState):
def __init__(self, class_name, host_name, code):
super().__init__(code)
self.class_name = class_name
self.host_name = host_name
self.module_name = class_name.lower()
self.template_module_name = template_class_name.lower()
def step(self, node):
if (
isinstance(node, ast.ImportFrom)
and node.module == f"recipe_scrapers.{self.template_module_name}"
):
offset = self._offset(node)
module_name_segment_end = self.code.index(self.template_module_name, offset)
self._replace(
self.module_name,
module_name_segment_end,
len(self.template_module_name),
)
class_name_segment_end = self.code.index(template_class_name, offset)
self._replace(
self.class_name, class_name_segment_end, len(template_class_name)
)
if (
isinstance(node, ast.ClassDef)
and node.name == f"Test{template_class_name}Scraper"
):
offset = self._offset(node)
segment_end = self.code.index(template_class_name, offset)
self._replace(self.class_name, segment_end, len(template_class_name))
if (
isinstance(node, ast.Assign)
and isinstance(node.value, ast.Name)
and node.value.id == template_class_name
):
offset = self._offset(node)
segment_end = self.code.index(template_class_name, offset)
self._replace(self.class_name, segment_end, len(template_class_name))
if isinstance(node, ast.Constant) and node.value == template_host_name:
offset = self._offset(node)
segment_end = self.code.index(template_host_name, offset)
self._replace(self.host_name, segment_end, len(template_host_name))
return True
class InitScraperState(ScraperState):
def __init__(self, class_name, code):
super().__init__(code)
self.class_name = class_name
self.module_name = class_name.lower()
self.state = "import"
self.last_node = None
def step(self, node):
if self.state == "import":
return self._import(node)
elif self.state == "init":
return self._init(node)
else:
return False
def _import(self, node):
if isinstance(node, ast.Module) or isinstance(node, ast.Import):
return True
if isinstance(node, ast.ImportFrom):
if node.module > self.module_name and node.level > 0:
offset = self._offset(node)
import_statement = (
f"\nfrom .{self.module_name} import {self.class_name}"
)
self._replace(import_statement, offset, 0)
self.state = "init"
self.last_node = node
elif isinstance(self.last_node, ast.ImportFrom):
offset = (
self.line_offsets[self.last_node.lineno - 1]
+ self.last_node.end_col_offset
)
segment_end = self.code.index("\n", offset)
import_statement = f"\nfrom .{self.module_name} import {self.class_name}"
self._replace(import_statement, segment_end, 0)
self.state = "init"
return self._init(node)
return True
def _init(self, node):
if isinstance(node, ast.Assign):
for target in node.targets:
if (
hasattr(target, "id")
and target.id == "SCRAPERS"
and isinstance(node.value, ast.Dict)
):
for key in node.value.keys:
if (
isinstance(key, ast.Call)
and isinstance(key.func, ast.Attribute)
and isinstance(key.func.value, ast.Name)
):
if key.func.value.id > self.class_name:
offset = self._offset(key)
init_statement = f" {self.class_name}.host(): {self.class_name},\n "
self._replace(init_statement, offset, 0)
return False
self.last_node = key
if isinstance(self.last_node, ast.Call):
offset = (
self.line_offsets[self.last_node.lineno - 1]
+ self.last_node.end_col_offset
)
segment_end = self.code.index("\n", offset)
init_statement = f"\n {self.class_name}.host(): {self.class_name},"
self._replace(init_statement, segment_end, 0)
return False
return True
class Replacer:
def __init__(self, code):
self.code = code
self.delta = 0
self.replacements = []
def replace(self, replacement_text, start, length):
self.replacements.append((replacement_text, start, length))
def result(self):
code = self.code
for (replacement_text, start, length) in self.replacements:
start = start + self.delta
end = start + length
code = code[:start] + replacement_text + code[end:]
self.delta += len(replacement_text) - length
return code
def get_line_offsets(code):
offset = 0
indices = [0]
try:
while True:
index = code.index("\n", offset)
indices.append(index)
offset = index + 1
except ValueError:
return indices
def main():
if len(sys.argv) < 3:
print("Usage: generate.py ScraperClassName url")
exit(1)
class_name = sys.argv[1]
url = sys.argv[2]
host_name = get_host_name(url)
testhtml = requests.get(url, headers=HEADERS).content
generate_scraper(class_name, host_name)
generate_scraper_test(class_name, host_name)
generate_test_data(class_name, testhtml)
init_scraper(class_name)
if __name__ == "__main__":
main()
| |
from __future__ import unicode_literals
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound, HttpRequest, build_request_repr)
from django.template import Template, Context, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils.datastructures import MultiValueDict
from django.utils.html import escape
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_by_path
from django.utils import six
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k, v in value.items())
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
cleansed.do_not_call_in_templates = True
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponseServerError(text, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponseServerError(html, content_type='text/html')
# Cache for the default exception reporter filter instance.
default_exception_reporter_filter = None
def get_exception_reporter_filter(request):
global default_exception_reporter_filter
if default_exception_reporter_filter is None:
# Load the default filter for the first time and cache it.
default_exception_reporter_filter = import_by_path(
settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
if request:
return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter)
else:
return default_exception_reporter_filter
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
elif isinstance(value, MultiValueDict):
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def format_path_status(self, path):
if not os.path.exists(path):
return "File does not exist"
if not os.path.isfile(path):
return "Not a file"
if not os.access(path, os.R_OK):
return "File is not readable"
return "File exists"
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
# If the template_source_loaders haven't been populated yet, you need
# to provide an empty list for this for loop to not fail.
if template_source_loaders is None:
template_source_loaders = []
for loader in template_source_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{
'name': t,
'status': self.format_path_status(t),
} for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and
hasattr(self.exc_value, 'django_template_source')):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEXT_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.django_template_source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append((num, escape(template_source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases, exc_value.args might be empty.
try:
message = self.exc_value.args[0]
except IndexError:
message = '(Could not get exception message)'
self.template_info = {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(DEFAULT_URLCONF_TEMPLATE, name='Default URLconf template')
c = Context({})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>
{% for t in loader.templates %}<li><code>{{ t.name }}</code> ({{ t.status }})</li>{% endfor %}
</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span>{% endif %}</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
{% endif %}
</body>
</html>
"""
TECHNICAL_500_TEXT_TEMPLATE = """{% load firstof from future %}{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard 500 page.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>
Of course, you haven't actually done any work yet.
Next, start your first app by running <code>python manage.py startapp [appname]</code>.
</p>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
| |
import time
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_gui.qt.util import *
from electrum_gui.qt.amountedit import AmountEdit
from electrum.bitcoin import COIN
from electrum.i18n import _
from decimal import Decimal
from functools import partial
from electrum.plugins import hook
from exchange_rate import FxPlugin
from electrum.util import timestamp_to_datetime
class Plugin(FxPlugin, QObject):
def __init__(self, parent, config, name):
FxPlugin.__init__(self, parent, config, name)
QObject.__init__(self)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
edit.setStyleSheet(BLACK_FG)
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.exchange_rate()
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
if fee_e: window.update_fee()
btc_e.setStyleSheet(BLUE_FG)
else:
fiat_e.setText(self.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(BLUE_FG)
fiat_e.textEdited.connect(partial(edit_changed, fiat_e))
btc_e.textEdited.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
@hook
def init_qt(self, gui):
for window in gui.windows:
self.on_new_window(window)
@hook
def do_clear(self, window):
window.fiat_send_e.setText('')
def on_close(self):
self.emit(SIGNAL('close_fx_plugin'))
def restore_window(self, window):
window.update_status()
window.history_list.refresh_headers()
window.fiat_send_e.hide()
window.fiat_receive_e.hide()
def on_quotes(self):
self.emit(SIGNAL('new_fx_quotes'))
def on_history(self):
self.emit(SIGNAL('new_fx_history'))
def on_fx_history(self, window):
'''Called when historical fx quotes are updated'''
window.history_list.update()
def on_fx_quotes(self, window):
'''Called when fresh spot fx quotes come in'''
window.update_status()
self.populate_ccy_combo()
# Refresh edits with the new rate
edit = window.fiat_send_e if window.fiat_send_e.is_last_edited else window.amount_e
edit.textEdited.emit(edit.text())
edit = window.fiat_receive_e if window.fiat_receive_e.is_last_edited else window.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.history_used_spot:
self.on_fx_history(window)
def on_ccy_combo_change(self):
'''Called when the chosen currency changes'''
ccy = str(self.ccy_combo.currentText())
if ccy and ccy != self.ccy:
self.set_currency(ccy)
self.hist_checkbox_update()
def hist_checkbox_update(self):
if self.hist_checkbox:
self.hist_checkbox.setEnabled(self.ccy in self.exchange.history_ccys())
self.hist_checkbox.setChecked(self.config_history())
def populate_ccy_combo(self):
# There should be at most one instance of the settings dialog
combo = self.ccy_combo
# NOTE: bool(combo) is False if it is empty. Nuts.
if combo is not None:
combo.blockSignals(True)
combo.clear()
combo.addItems(sorted(self.exchange.quotes.keys()))
combo.blockSignals(False)
combo.setCurrentIndex(combo.findText(self.ccy))
@hook
def on_new_window(self, window):
# Additional send and receive edit boxes
if not hasattr(window, 'fiat_send_e'):
send_e = AmountEdit(self.get_currency)
window.send_grid.addWidget(send_e, 4, 2, Qt.AlignLeft)
window.amount_e.frozen.connect(
lambda: send_e.setFrozen(window.amount_e.isReadOnly()))
receive_e = AmountEdit(self.get_currency)
window.receive_grid.addWidget(receive_e, 2, 2, Qt.AlignLeft)
window.fiat_send_e = send_e
window.fiat_receive_e = receive_e
self.connect_fields(window, window.amount_e, send_e, window.fee_e)
self.connect_fields(window, window.receive_amount_e, receive_e, None)
else:
window.fiat_send_e.show()
window.fiat_receive_e.show()
window.history_list.refresh_headers()
window.update_status()
window.connect(self, SIGNAL('new_fx_quotes'), lambda: self.on_fx_quotes(window))
window.connect(self, SIGNAL('new_fx_history'), lambda: self.on_fx_history(window))
window.connect(self, SIGNAL('close_fx_plugin'), lambda: self.restore_window(window))
window.connect(self, SIGNAL('refresh_headers'), window.history_list.refresh_headers)
def settings_widget(self, window):
return EnterButton(_('Settings'), partial(self.settings_dialog, window))
def settings_dialog(self, window):
d = WindowModalDialog(window, _("Exchange Rate Settings"))
layout = QGridLayout(d)
layout.addWidget(QLabel(_('Exchange rate API: ')), 0, 0)
layout.addWidget(QLabel(_('Currency: ')), 1, 0)
layout.addWidget(QLabel(_('History Rates: ')), 2, 0)
# Currency list
self.ccy_combo = QComboBox()
self.ccy_combo.currentIndexChanged.connect(self.on_ccy_combo_change)
self.populate_ccy_combo()
def on_change_ex(idx):
exchange = str(combo_ex.currentText())
if exchange != self.exchange.name():
self.set_exchange(exchange)
self.hist_checkbox_update()
def on_change_hist(checked):
if checked:
self.config.set_key('history_rates', 'checked')
self.get_historical_rates()
else:
self.config.set_key('history_rates', 'unchecked')
self.emit(SIGNAL('refresh_headers'))
def ok_clicked():
self.timeout = 0
self.ccy_combo = None
d.accept()
combo_ex = QComboBox()
combo_ex.addItems(sorted(self.exchanges.keys()))
combo_ex.setCurrentIndex(combo_ex.findText(self.config_exchange()))
combo_ex.currentIndexChanged.connect(on_change_ex)
self.hist_checkbox = QCheckBox()
self.hist_checkbox.stateChanged.connect(on_change_hist)
self.hist_checkbox_update()
ok_button = QPushButton(_("OK"))
ok_button.clicked.connect(lambda: ok_clicked())
layout.addWidget(self.ccy_combo,1,1)
layout.addWidget(combo_ex,0,1)
layout.addWidget(self.hist_checkbox,2,1)
layout.addWidget(ok_button,3,1)
return d.exec_()
def config_history(self):
return self.config.get('history_rates', 'unchecked') != 'unchecked'
def show_history(self):
return self.config_history() and self.ccy in self.exchange.history_ccys()
@hook
def history_tab_headers(self, headers):
if self.show_history():
headers.extend(['%s '%self.ccy + _('Amount'), '%s '%self.ccy + _('Balance')])
@hook
def history_tab_update_begin(self):
self.history_used_spot = False
@hook
def history_tab_update(self, tx, entry):
if not self.show_history():
return
tx_hash, conf, value, timestamp, balance = tx
if conf <= 0:
date = timestamp_to_datetime(time.time())
else:
date = timestamp_to_datetime(timestamp)
for amount in [value, balance]:
text = self.historical_value_str(amount, date)
entry.append(text)
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006-2007 Christian Boos <cboos@neuf.fr>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from StringIO import StringIO
from datetime import datetime, timedelta
import re
from genshi.builder import tag
from trac import __version__
from trac.attachment import AttachmentModule
from trac.config import ExtensionOption
from trac.core import *
from trac.mimeview import Context
from trac.perm import IPermissionRequestor
from trac.resource import *
from trac.search import ISearchSource, search_to_sql, shorten_result
from trac.util import as_bool
from trac.util.datefmt import parse_date, utc, to_utimestamp, \
get_datetime_format_hint, format_date, \
format_datetime, from_utimestamp
from trac.util.text import CRLF
from trac.util.translation import _, tag_
from trac.ticket import Milestone, Ticket, TicketSystem, group_milestones
from trac.ticket.query import QueryModule
from trac.timeline.api import ITimelineEventProvider
from trac.web import IRequestHandler, RequestDone
from trac.web.chrome import add_link, add_notice, add_script, add_stylesheet, \
add_warning, Chrome, INavigationContributor
from trac.wiki.api import IWikiSyntaxProvider
from trac.wiki.formatter import format_to
class ITicketGroupStatsProvider(Interface):
def get_ticket_group_stats(ticket_ids):
""" Gather statistics on a group of tickets.
This method returns a valid TicketGroupStats object.
"""
class TicketGroupStats(object):
"""Encapsulates statistics on a group of tickets."""
def __init__(self, title, unit):
"""Creates a new TicketGroupStats object.
`title` is the display name of this group of stats (e.g.
'ticket status').
`unit` is the units for these stats in plural form, e.g. _('hours')
"""
self.title = title
self.unit = unit
self.count = 0
self.qry_args = {}
self.intervals = []
self.done_percent = 0
self.done_count = 0
def add_interval(self, title, count, qry_args, css_class,
overall_completion=None, countsToProg=0):
"""Adds a division to this stats' group's progress bar.
`title` is the display name (eg 'closed', 'spent effort') of this
interval that will be displayed in front of the unit name.
`count` is the number of units in the interval.
`qry_args` is a dict of extra params that will yield the subset of
tickets in this interval on a query.
`css_class` is the css class that will be used to display the division.
`overall_completion` can be set to true to make this interval count
towards overall completion of this group of tickets.
(Warning: `countsToProg` argument will be removed in 0.12, use
`overall_completion` instead)
"""
if overall_completion is None:
overall_completion = countsToProg
self.intervals.append({
'title': title,
'count': count,
'qry_args': qry_args,
'css_class': css_class,
'percent': None,
'countsToProg': overall_completion,
'overall_completion': overall_completion,
})
self.count = self.count + count
def refresh_calcs(self):
if self.count < 1:
return
total_percent = 0
self.done_percent = 0
self.done_count = 0
for interval in self.intervals:
interval['percent'] = round(float(interval['count'] /
float(self.count) * 100))
total_percent = total_percent + interval['percent']
if interval['overall_completion']:
self.done_percent += interval['percent']
self.done_count += interval['count']
# We want the percentages to add up to 100%. To do that, we fudge one
# of the intervals. If we're <100%, we add to the smallest non-zero
# interval. If we're >100%, we subtract from the largest interval.
# The interval is adjusted by enough to make the intervals sum to 100%.
if self.done_count and total_percent != 100:
fudge_amt = 100 - total_percent
fudge_int = [i for i in sorted(self.intervals,
key=lambda k: k['percent'],
reverse=(fudge_amt < 0))
if i['percent']][0]
fudge_int['percent'] += fudge_amt
self.done_percent += fudge_amt
class DefaultTicketGroupStatsProvider(Component):
"""Configurable ticket group statistics provider.
Example configuration (which is also the default):
{{{
[milestone-groups]
# Definition of a 'closed' group:
closed = closed
# The definition consists in a comma-separated list of accepted status.
# Also, '*' means any status and could be used to associate all remaining
# states to one catch-all group.
# Qualifiers for the above group (the group must have been defined first):
closed.order = 0 # sequence number in the progress bar
closed.query_args = group=resolution # optional extra param for the query
closed.overall_completion = true # count for overall completion
# Definition of an 'active' group:
active = * # one catch-all group is allowed
active.order = 1
active.css_class = open # CSS class for this interval
active.label = in progress # Displayed name for the group,
# needed for non-ascii group names
# The CSS class can be one of: new (yellow), open (no color) or
# closed (green). New styles can easily be added using the following
# selector: `table.progress td.<class>`
}}}
"""
implements(ITicketGroupStatsProvider)
default_milestone_groups = [
{'name': 'closed', 'status': 'closed',
'query_args': 'group=resolution', 'overall_completion': 'true'},
{'name': 'active', 'status': '*', 'css_class': 'open'}
]
def _get_ticket_groups(self):
"""Returns a list of dict describing the ticket groups
in the expected order of appearance in the milestone progress bars.
"""
if 'milestone-groups' in self.config:
groups = {}
order = 0
for groupname, value in self.config.options('milestone-groups'):
qualifier = 'status'
if '.' in groupname:
groupname, qualifier = groupname.split('.', 1)
group = groups.setdefault(groupname, {'name': groupname,
'order': order})
group[qualifier] = value
order = max(order, int(group['order'])) + 1
return [group for group in sorted(groups.values(),
key=lambda g: int(g['order']))]
else:
return self.default_milestone_groups
def get_ticket_group_stats(self, ticket_ids):
total_cnt = len(ticket_ids)
all_statuses = set(TicketSystem(self.env).get_all_status())
status_cnt = {}
for s in all_statuses:
status_cnt[s] = 0
if total_cnt:
db = self.env.get_db_cnx()
cursor = db.cursor()
str_ids = [str(x) for x in sorted(ticket_ids)]
cursor.execute("SELECT status, count(status) FROM ticket "
"WHERE id IN (%s) GROUP BY status" %
",".join(str_ids))
for s, cnt in cursor:
status_cnt[s] = cnt
stat = TicketGroupStats(_('ticket status'), _('tickets'))
remaining_statuses = set(all_statuses)
groups = self._get_ticket_groups()
catch_all_group = None
# we need to go through the groups twice, so that the catch up group
# doesn't need to be the last one in the sequence
for group in groups:
status_str = group['status'].strip()
if status_str == '*':
if catch_all_group:
raise TracError(_(
"'%(group1)s' and '%(group2)s' milestone groups "
"both are declared to be \"catch-all\" groups. "
"Please check your configuration.",
group1=group['name'], group2=catch_all_group['name']))
catch_all_group = group
else:
group_statuses = set([s.strip()
for s in status_str.split(',')]) \
& all_statuses
if group_statuses - remaining_statuses:
raise TracError(_(
"'%(groupname)s' milestone group reused status "
"'%(status)s' already taken by other groups. "
"Please check your configuration.",
groupname=group['name'],
status=', '.join(group_statuses - remaining_statuses)))
else:
remaining_statuses -= group_statuses
group['statuses'] = group_statuses
if catch_all_group:
catch_all_group['statuses'] = remaining_statuses
for group in groups:
group_cnt = 0
query_args = {}
for s, cnt in status_cnt.iteritems():
if s in group['statuses']:
group_cnt += cnt
query_args.setdefault('status', []).append(s)
for arg in [kv for kv in group.get('query_args', '').split(',')
if '=' in kv]:
k, v = [a.strip() for a in arg.split('=', 1)]
query_args.setdefault(k, []).append(v)
stat.add_interval(group.get('label', group['name']),
group_cnt, query_args,
group.get('css_class', group['name']),
as_bool(group.get('overall_completion')))
stat.refresh_calcs()
return stat
def get_ticket_stats(provider, tickets):
return provider.get_ticket_group_stats([t['id'] for t in tickets])
def get_tickets_for_milestone(env, db, milestone, field='component'):
cursor = db.cursor()
fields = TicketSystem(env).get_ticket_fields()
if field in [f['name'] for f in fields if not f.get('custom')]:
cursor.execute("SELECT id,status,%s FROM ticket WHERE milestone=%%s "
"ORDER BY %s" % (field, field), (milestone,))
else:
cursor.execute("SELECT id,status,value FROM ticket LEFT OUTER "
"JOIN ticket_custom ON (id=ticket AND name=%s) "
"WHERE milestone=%s ORDER BY value", (field, milestone))
tickets = []
for tkt_id, status, fieldval in cursor:
tickets.append({'id': tkt_id, 'status': status, field: fieldval})
return tickets
def apply_ticket_permissions(env, req, tickets):
"""Apply permissions to a set of milestone tickets as returned by
get_tickets_for_milestone()."""
return [t for t in tickets
if 'TICKET_VIEW' in req.perm('ticket', t['id'])]
def milestone_stats_data(env, req, stat, name, grouped_by='component',
group=None):
has_query = env[QueryModule] is not None
def query_href(extra_args):
if not has_query:
return None
args = {'milestone': name, grouped_by: group, 'group': 'status'}
args.update(extra_args)
return req.href.query(args)
return {'stats': stat,
'stats_href': query_href(stat.qry_args),
'interval_hrefs': [query_href(interval['qry_args'])
for interval in stat.intervals]}
class RoadmapModule(Component):
implements(INavigationContributor, IPermissionRequestor, IRequestHandler)
stats_provider = ExtensionOption('roadmap', 'stats_provider',
ITicketGroupStatsProvider,
'DefaultTicketGroupStatsProvider',
"""Name of the component implementing `ITicketGroupStatsProvider`,
which is used to collect statistics on groups of tickets for display
in the roadmap views.""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'roadmap'
def get_navigation_items(self, req):
if 'ROADMAP_VIEW' in req.perm:
yield ('mainnav', 'roadmap',
tag.a(_('Roadmap'), href=req.href.roadmap(), accesskey=3))
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['MILESTONE_CREATE', 'MILESTONE_DELETE', 'MILESTONE_MODIFY',
'MILESTONE_VIEW', 'ROADMAP_VIEW']
return ['ROADMAP_VIEW'] + [('ROADMAP_ADMIN', actions)]
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/roadmap'
def process_request(self, req):
req.perm.require('MILESTONE_VIEW')
show = req.args.getlist('show')
if 'all' in show:
show = ['completed']
db = self.env.get_db_cnx()
milestones = Milestone.select(self.env, 'completed' in show, db)
if 'noduedate' in show:
milestones = [m for m in milestones
if m.due is not None or m.completed]
milestones = [m for m in milestones
if 'MILESTONE_VIEW' in req.perm(m.resource)]
stats = []
queries = []
for milestone in milestones:
tickets = get_tickets_for_milestone(self.env, db, milestone.name,
'owner')
tickets = apply_ticket_permissions(self.env, req, tickets)
stat = get_ticket_stats(self.stats_provider, tickets)
stats.append(milestone_stats_data(self.env, req, stat,
milestone.name))
#milestone['tickets'] = tickets # for the iCalendar view
if req.args.get('format') == 'ics':
self.render_ics(req, db, milestones)
return
# FIXME should use the 'webcal:' scheme, probably
username = None
if req.authname and req.authname != 'anonymous':
username = req.authname
icshref = req.href.roadmap(show=show, user=username, format='ics')
add_link(req, 'alternate', icshref, _('iCalendar'), 'text/calendar',
'ics')
data = {
'milestones': milestones,
'milestone_stats': stats,
'queries': queries,
'show': show,
}
add_stylesheet(req, 'common/css/roadmap.css')
return 'roadmap.html', data, None
# Internal methods
def render_ics(self, req, db, milestones):
req.send_response(200)
req.send_header('Content-Type', 'text/calendar;charset=utf-8')
buf = StringIO()
from trac.ticket import Priority
priorities = {}
for priority in Priority.select(self.env):
priorities[priority.name] = float(priority.value)
def get_priority(ticket):
value = priorities.get(ticket['priority'])
if value:
return int((len(priorities) + 8 * value - 9) /
(len(priorities) - 1))
def get_status(ticket):
status = ticket['status']
if status == 'new' or status == 'reopened' and not ticket['owner']:
return 'NEEDS-ACTION'
elif status == 'assigned' or status == 'reopened':
return 'IN-PROCESS'
elif status == 'closed':
if ticket['resolution'] == 'fixed':
return 'COMPLETED'
else: return 'CANCELLED'
else: return ''
def escape_value(text):
s = ''.join(map(lambda c: (c in ';,\\') and '\\' + c or c, text))
return '\\n'.join(re.split(r'[\r\n]+', s))
def write_prop(name, value, params={}):
text = ';'.join([name] + [k + '=' + v for k, v in params.items()]) \
+ ':' + escape_value(value)
firstline = 1
while text:
if not firstline:
text = ' ' + text
else: firstline = 0
buf.write(text[:75] + CRLF)
text = text[75:]
def write_date(name, value, params={}):
params['VALUE'] = 'DATE'
write_prop(name, format_date(value, '%Y%m%d', req.tz), params)
def write_utctime(name, value, params={}):
write_prop(name, format_datetime(value, '%Y%m%dT%H%M%SZ', utc),
params)
host = req.base_url[req.base_url.find('://') + 3:]
user = req.args.get('user', 'anonymous')
write_prop('BEGIN', 'VCALENDAR')
write_prop('VERSION', '2.0')
write_prop('PRODID', '-//Edgewall Software//NONSGML Trac %s//EN'
% __version__)
write_prop('METHOD', 'PUBLISH')
write_prop('X-WR-CALNAME',
self.env.project_name + ' - ' + _('Roadmap'))
for milestone in milestones:
uid = '<%s/milestone/%s@%s>' % (req.base_path, milestone.name,
host)
if milestone.due:
write_prop('BEGIN', 'VEVENT')
write_prop('UID', uid)
write_utctime('DTSTAMP', milestone.due)
write_date('DTSTART', milestone.due)
write_prop('SUMMARY', _('Milestone %(name)s',
name=milestone.name))
write_prop('URL', req.abs_href.milestone(milestone.name))
if milestone.description:
write_prop('DESCRIPTION', milestone.description)
write_prop('END', 'VEVENT')
tickets = get_tickets_for_milestone(self.env, db, milestone.name,
field='owner')
tickets = apply_ticket_permissions(self.env, req, tickets)
for tkt_id in [ticket['id'] for ticket in tickets
if ticket['owner'] == user]:
ticket = Ticket(self.env, tkt_id)
write_prop('BEGIN', 'VTODO')
write_prop('UID', '<%s/ticket/%s@%s>' % (req.base_path,
tkt_id, host))
if milestone.due:
write_prop('RELATED-TO', uid)
write_date('DUE', milestone.due)
write_prop('SUMMARY', _('Ticket #%(num)s: %(summary)s',
num=ticket.id,
summary=ticket['summary']))
write_prop('URL', req.abs_href.ticket(ticket.id))
write_prop('DESCRIPTION', ticket['description'])
priority = get_priority(ticket)
if priority:
write_prop('PRIORITY', unicode(priority))
write_prop('STATUS', get_status(ticket))
if ticket['status'] == 'closed':
cursor = db.cursor()
cursor.execute("SELECT time FROM ticket_change "
"WHERE ticket=%s AND field='status' "
"ORDER BY time desc LIMIT 1",
(ticket.id,))
row = cursor.fetchone()
if row:
write_utctime('COMPLETED', from_utimestamp(row[0]))
write_prop('END', 'VTODO')
write_prop('END', 'VCALENDAR')
ics_str = buf.getvalue().encode('utf-8')
req.send_header('Content-Length', len(ics_str))
req.end_headers()
req.write(ics_str)
raise RequestDone
class MilestoneModule(Component):
implements(INavigationContributor, IPermissionRequestor, IRequestHandler,
ITimelineEventProvider, IWikiSyntaxProvider, IResourceManager,
ISearchSource)
stats_provider = ExtensionOption('milestone', 'stats_provider',
ITicketGroupStatsProvider,
'DefaultTicketGroupStatsProvider',
"""Name of the component implementing `ITicketGroupStatsProvider`,
which is used to collect statistics on groups of tickets for display
in the milestone views.""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'roadmap'
def get_navigation_items(self, req):
return []
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['MILESTONE_CREATE', 'MILESTONE_DELETE', 'MILESTONE_MODIFY',
'MILESTONE_VIEW']
return actions + [('MILESTONE_ADMIN', actions)]
# ITimelineEventProvider methods
def get_timeline_filters(self, req):
if 'MILESTONE_VIEW' in req.perm:
yield ('milestone', _('Milestones reached'))
def get_timeline_events(self, req, start, stop, filters):
if 'milestone' in filters:
milestone_realm = Resource('milestone')
db = self.env.get_db_cnx()
cursor = db.cursor()
# TODO: creation and (later) modifications should also be reported
cursor.execute("SELECT completed,name,description FROM milestone "
"WHERE completed>=%s AND completed<=%s",
(to_utimestamp(start), to_utimestamp(stop)))
for completed, name, description in cursor:
milestone = milestone_realm(id=name)
if 'MILESTONE_VIEW' in req.perm(milestone):
yield('milestone', from_utimestamp(completed),
'', (milestone, description)) # FIXME: author?
# Attachments
for event in AttachmentModule(self.env).get_timeline_events(
req, milestone_realm, start, stop):
yield event
def render_timeline_event(self, context, field, event):
milestone, description = event[3]
if field == 'url':
return context.href.milestone(milestone.id)
elif field == 'title':
return tag_('Milestone %(name)s completed',
name=tag.em(milestone.id))
elif field == 'description':
return format_to(self.env, None, context(resource=milestone),
description)
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/milestone(?:/(.+))?$', req.path_info)
if match:
if match.group(1):
req.args['id'] = match.group(1)
return True
def process_request(self, req):
milestone_id = req.args.get('id')
req.perm('milestone', milestone_id).require('MILESTONE_VIEW')
add_link(req, 'up', req.href.roadmap(), _('Roadmap'))
db = self.env.get_db_cnx() # TODO: db can be removed
action = req.args.get('action', 'view')
try:
milestone = Milestone(self.env, milestone_id, db)
except ResourceNotFound:
if 'MILESTONE_CREATE' not in req.perm('milestone', milestone_id):
raise
milestone = Milestone(self.env, None, db)
milestone.name = milestone_id
action = 'edit' # rather than 'new' so that it works for POST/save
if req.method == 'POST':
if req.args.has_key('cancel'):
if milestone.exists:
req.redirect(req.href.milestone(milestone.name))
else:
req.redirect(req.href.roadmap())
elif action == 'edit':
return self._do_save(req, db, milestone)
elif action == 'delete':
self._do_delete(req, milestone)
elif action in ('new', 'edit'):
return self._render_editor(req, db, milestone)
elif action == 'delete':
return self._render_confirm(req, db, milestone)
if not milestone.name:
req.redirect(req.href.roadmap())
return self._render_view(req, db, milestone)
# Internal methods
def _do_delete(self, req, milestone):
req.perm(milestone.resource).require('MILESTONE_DELETE')
retarget_to = None
if req.args.has_key('retarget'):
retarget_to = req.args.get('target') or None
milestone.delete(retarget_to, req.authname)
add_notice(req, _('The milestone "%(name)s" has been deleted.',
name=milestone.name))
req.redirect(req.href.roadmap())
def _do_save(self, req, db, milestone):
if milestone.exists:
req.perm(milestone.resource).require('MILESTONE_MODIFY')
else:
req.perm(milestone.resource).require('MILESTONE_CREATE')
old_name = milestone.name
new_name = req.args.get('name')
milestone.description = req.args.get('description', '')
if 'due' in req.args:
due = req.args.get('duedate', '')
milestone.due = due and parse_date(due, req.tz, 'datetime') or None
else:
milestone.due = None
completed = req.args.get('completeddate', '')
retarget_to = req.args.get('target')
# Instead of raising one single error, check all the constraints and
# let the user fix them by going back to edit mode showing the warnings
warnings = []
def warn(msg):
add_warning(req, msg)
warnings.append(msg)
# -- check the name
# If the name has changed, check that the milestone doesn't already
# exist
# FIXME: the whole .exists business needs to be clarified
# (#4130) and should behave like a WikiPage does in
# this respect.
try:
new_milestone = Milestone(self.env, new_name, db)
if new_milestone.name == old_name:
pass # Creation or no name change
elif new_milestone.name:
warn(_('Milestone "%(name)s" already exists, please '
'choose another name.', name=new_milestone.name))
else:
warn(_('You must provide a name for the milestone.'))
except ResourceNotFound:
milestone.name = new_name
# -- check completed date
if 'completed' in req.args:
completed = completed and parse_date(completed, req.tz,
'datetime') or None
if completed and completed > datetime.now(utc):
warn(_('Completion date may not be in the future'))
else:
completed = None
milestone.completed = completed
if warnings:
return self._render_editor(req, db, milestone)
# -- actually save changes
if milestone.exists:
milestone.update()
# eventually retarget opened tickets associated with the milestone
if 'retarget' in req.args and completed:
@self.env.with_transaction()
def retarget(db):
cursor = db.cursor()
cursor.execute("UPDATE ticket SET milestone=%s WHERE "
"milestone=%s and status != 'closed'",
(retarget_to, old_name))
self.env.log.info('Tickets associated with milestone %s '
'retargeted to %s' % (old_name, retarget_to))
else:
milestone.insert()
add_notice(req, _('Your changes have been saved.'))
req.redirect(req.href.milestone(milestone.name))
def _render_confirm(self, req, db, milestone):
req.perm(milestone.resource).require('MILESTONE_DELETE')
milestones = [m for m in Milestone.select(self.env, db=db)
if m.name != milestone.name
and 'MILESTONE_VIEW' in req.perm(m.resource)]
data = {
'milestone': milestone,
'milestone_groups': group_milestones(milestones,
'TICKET_ADMIN' in req.perm)
}
return 'milestone_delete.html', data, None
def _render_editor(self, req, db, milestone):
# Suggest a default due time of 18:00 in the user's timezone
default_due = datetime.now(req.tz).replace(hour=18, minute=0, second=0,
microsecond=0)
if default_due <= datetime.now(utc):
default_due += timedelta(days=1)
data = {
'milestone': milestone,
'datetime_hint': get_datetime_format_hint(),
'default_due': default_due,
'milestone_groups': [],
}
if milestone.exists:
req.perm(milestone.resource).require('MILESTONE_MODIFY')
milestones = [m for m in Milestone.select(self.env, db=db)
if m.name != milestone.name
and 'MILESTONE_VIEW' in req.perm(m.resource)]
data['milestone_groups'] = group_milestones(milestones,
'TICKET_ADMIN' in req.perm)
else:
req.perm(milestone.resource).require('MILESTONE_CREATE')
Chrome(self.env).add_wiki_toolbars(req)
return 'milestone_edit.html', data, None
def _render_view(self, req, db, milestone):
milestone_groups = []
available_groups = []
component_group_available = False
ticket_fields = TicketSystem(self.env).get_ticket_fields()
# collect fields that can be used for grouping
for field in ticket_fields:
if field['type'] == 'select' and field['name'] != 'milestone' \
or field['name'] in ('owner', 'reporter'):
available_groups.append({'name': field['name'],
'label': field['label']})
if field['name'] == 'component':
component_group_available = True
# determine the field currently used for grouping
by = None
if component_group_available:
by = 'component'
elif available_groups:
by = available_groups[0]['name']
by = req.args.get('by', by)
tickets = get_tickets_for_milestone(self.env, db, milestone.name, by)
tickets = apply_ticket_permissions(self.env, req, tickets)
stat = get_ticket_stats(self.stats_provider, tickets)
context = Context.from_request(req, milestone.resource)
data = {
'context': context,
'milestone': milestone,
'attachments': AttachmentModule(self.env).attachment_data(context),
'available_groups': available_groups,
'grouped_by': by,
'groups': milestone_groups
}
data.update(milestone_stats_data(self.env, req, stat, milestone.name))
if by:
groups = []
for field in ticket_fields:
if field['name'] == by:
if 'options' in field:
groups = field['options']
if field.get('optional'):
groups.insert(0, '')
else:
cursor = db.cursor()
cursor.execute("""
SELECT DISTINCT COALESCE(%s,'') FROM ticket
ORDER BY COALESCE(%s,'')
""" % (by, by))
groups = [row[0] for row in cursor]
max_count = 0
group_stats = []
for group in groups:
values = group and (group,) or (None, group)
group_tickets = [t for t in tickets if t[by] in values]
if not group_tickets:
continue
gstat = get_ticket_stats(self.stats_provider, group_tickets)
if gstat.count > max_count:
max_count = gstat.count
group_stats.append(gstat)
gs_dict = {'name': group}
gs_dict.update(milestone_stats_data(self.env, req, gstat,
milestone.name, by, group))
milestone_groups.append(gs_dict)
for idx, gstat in enumerate(group_stats):
gs_dict = milestone_groups[idx]
percent = 1.0
if max_count:
percent = float(gstat.count) / float(max_count) * 100
gs_dict['percent_of_max_total'] = percent
add_stylesheet(req, 'common/css/roadmap.css')
add_script(req, 'common/js/folding.js')
return 'milestone_view.html', data, None
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
return []
def get_link_resolvers(self):
yield ('milestone', self._format_link)
def _format_link(self, formatter, ns, name, label):
name, query, fragment = formatter.split_link(name)
return self._render_link(formatter.context, name, label,
query + fragment)
def _render_link(self, context, name, label, extra=''):
try:
milestone = Milestone(self.env, name)
except TracError:
milestone = None
# Note: the above should really not be needed, `Milestone.exists`
# should simply be false if the milestone doesn't exist in the db
# (related to #4130)
href = context.href.milestone(name)
if milestone and milestone.exists:
if 'MILESTONE_VIEW' in context.perm(milestone.resource):
closed = milestone.is_completed and 'closed ' or ''
return tag.a(label, class_='%smilestone' % closed,
href=href + extra)
elif 'MILESTONE_CREATE' in context.perm('milestone', name):
return tag.a(label, class_='missing milestone', href=href + extra,
rel='nofollow')
return tag.a(label, class_='missing milestone')
# IResourceManager methods
def get_resource_realms(self):
yield 'milestone'
def get_resource_description(self, resource, format=None, context=None,
**kwargs):
desc = resource.id
if format != 'compact':
desc = _('Milestone %(name)s', name=resource.id)
if context:
return self._render_link(context, resource.id, desc)
else:
return desc
def resource_exists(self, resource):
"""
>>> from trac.test import EnvironmentStub
>>> env = EnvironmentStub()
>>> m1 = Milestone(env)
>>> m1.name = 'M1'
>>> m1.insert()
>>> MilestoneModule(env).resource_exists(Resource('milestone', 'M1'))
True
>>> MilestoneModule(env).resource_exists(Resource('milestone', 'M2'))
False
"""
db = self.env.get_read_db()
cursor = db.cursor()
cursor.execute("SELECT name FROM milestone WHERE name=%s",
(resource.id,))
return bool(cursor.fetchall())
# ISearchSource methods
def get_search_filters(self, req):
if 'MILESTONE_VIEW' in req.perm:
yield ('milestone', _('Milestones'))
def get_search_results(self, req, terms, filters):
if not 'milestone' in filters:
return
db = self.env.get_db_cnx()
sql_query, args = search_to_sql(db, ['name', 'description'], terms)
cursor = db.cursor()
cursor.execute("SELECT name,due,completed,description "
"FROM milestone "
"WHERE " + sql_query, args)
milestone_realm = Resource('milestone')
for name, due, completed, description in cursor:
milestone = milestone_realm(id=name)
if 'MILESTONE_VIEW' in req.perm(milestone):
dt = (completed and from_utimestamp(completed) or
due and from_utimestamp(due) or datetime.now(utc))
yield (get_resource_url(self.env, milestone, req.href),
get_resource_name(self.env, milestone), dt,
'', shorten_result(description, terms))
# Attachments
for result in AttachmentModule(self.env).get_search_results(
req, milestone_realm, terms):
yield result
| |
"""sstruct.py -- SuperStruct
Higher level layer on top of the struct module, enabling to
bind names to struct elements. The interface is similar to
struct, except the objects passed and returned are not tuples
(or argument lists), but dictionaries or instances.
Just like struct, we use fmt strings to describe a data
structure, except we use one line per element. Lines are
separated by newlines or semi-colons. Each line contains
either one of the special struct characters ('@', '=', '<',
'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f').
Repetitions, like the struct module offers them are not useful
in this context, except for fixed length strings (eg. 'myInt:5h'
is not allowed but 'myString:5s' is). The 'x' fmt character
(pad byte) is treated as 'special', since it is by definition
anonymous. Extra whitespace is allowed everywhere.
The sstruct module offers one feature that the "normal" struct
module doesn't: support for fixed point numbers. These are spelled
as "n.mF", where n is the number of bits before the point, and m
the number of bits after the point. Fixed point numbers get
converted to floats.
pack(fmt, object):
'object' is either a dictionary or an instance (or actually
anything that has a __dict__ attribute). If it is a dictionary,
its keys are used for names. If it is an instance, it's
attributes are used to grab struct elements from. Returns
a string containing the data.
unpack(fmt, data, object=None)
If 'object' is omitted (or None), a new dictionary will be
returned. If 'object' is a dictionary, it will be used to add
struct elements to. If it is an instance (or in fact anything
that has a __dict__ attribute), an attribute will be added for
each struct element. In the latter two cases, 'object' itself
is returned.
unpack2(fmt, data, object=None)
Convenience function. Same as unpack, except data may be longer
than needed. The returned value is a tuple: (object, leftoverdata).
calcsize(fmt)
like struct.calcsize(), but uses our own fmt strings:
it returns the size of the data in bytes.
"""
from fontTools.misc.py23 import tobytes, tostr
from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
import struct
import re
__version__ = "1.2"
__copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"
class Error(Exception):
pass
def pack(fmt, obj):
formatstring, names, fixes = getformat(fmt)
elements = []
if not isinstance(obj, dict):
obj = obj.__dict__
for name in names:
value = obj[name]
if name in fixes:
# fixed point conversion
value = fl2fi(value, fixes[name])
elif isinstance(value, str):
value = tobytes(value)
elements.append(value)
data = struct.pack(*(formatstring,) + tuple(elements))
return data
def unpack(fmt, data, obj=None):
if obj is None:
obj = {}
data = tobytes(data)
formatstring, names, fixes = getformat(fmt)
if isinstance(obj, dict):
d = obj
else:
d = obj.__dict__
elements = struct.unpack(formatstring, data)
for i in range(len(names)):
name = names[i]
value = elements[i]
if name in fixes:
# fixed point conversion
value = fi2fl(value, fixes[name])
elif isinstance(value, bytes):
try:
value = tostr(value)
except UnicodeDecodeError:
pass
d[name] = value
return obj
def unpack2(fmt, data, obj=None):
length = calcsize(fmt)
return unpack(fmt, data[:length], obj), data[length:]
def calcsize(fmt):
formatstring, names, fixes = getformat(fmt)
return struct.calcsize(formatstring)
# matches "name:formatchar" (whitespace is allowed)
_elementRE = re.compile(
r"\s*" # whitespace
r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
r"\s*:\s*" # whitespace : whitespace
r"([cbBhHiIlLqQfd]|[0-9]+[ps]|" # formatchar...
r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
r"\s*" # whitespace
r"(#.*)?$" # [comment] + end of string
)
# matches the special struct fmt chars and 'x' (pad byte)
_extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
# matches an "empty" string, possibly containing whitespace and/or a comment
_emptyRE = re.compile(r"\s*(#.*)?$")
_fixedpointmappings = {
8: "b",
16: "h",
32: "l"}
_formatcache = {}
def getformat(fmt):
fmt = tostr(fmt, encoding="ascii")
try:
formatstring, names, fixes = _formatcache[fmt]
except KeyError:
lines = re.split("[\n;]", fmt)
formatstring = ""
names = []
fixes = {}
for line in lines:
if _emptyRE.match(line):
continue
m = _extraRE.match(line)
if m:
formatchar = m.group(1)
if formatchar != 'x' and formatstring:
raise Error("a special fmt char must be first")
else:
m = _elementRE.match(line)
if not m:
raise Error("syntax error in fmt: '%s'" % line)
name = m.group(1)
names.append(name)
formatchar = m.group(2)
if m.group(3):
# fixed point
before = int(m.group(3))
after = int(m.group(4))
bits = before + after
if bits not in [8, 16, 32]:
raise Error("fixed point must be 8, 16 or 32 bits long")
formatchar = _fixedpointmappings[bits]
assert m.group(5) == "F"
fixes[name] = after
formatstring = formatstring + formatchar
_formatcache[fmt] = formatstring, names, fixes
return formatstring, names, fixes
def _test():
fmt = """
# comments are allowed
> # big endian (see documentation for struct)
# empty lines are allowed:
ashort: h
along: l
abyte: b # a byte
achar: c
astr: 5s
afloat: f; adouble: d # multiple "statements" are allowed
afixed: 16.16F
"""
print('size:', calcsize(fmt))
class foo(object):
pass
i = foo()
i.ashort = 0x7fff
i.along = 0x7fffffff
i.abyte = 0x7f
i.achar = "a"
i.astr = "12345"
i.afloat = 0.5
i.adouble = 0.5
i.afixed = 1.5
data = pack(fmt, i)
print('data:', repr(data))
print(unpack(fmt, data))
i2 = foo()
unpack(fmt, data, i2)
print(vars(i2))
if __name__ == "__main__":
_test()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for routers
"""
# Import Local Modules
from marvin.codes import FAILED
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackAPI import (stopRouter,
restartNetwork,
startRouter,
rebootRouter)
from marvin.lib.utils import (cleanup_resources,
get_process_status,
get_host_credentials)
from marvin.lib.base import (Account,
ServiceOffering,
VirtualMachine)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_hosts,
list_routers,
list_networks,
list_zones,
list_vlan_ipranges)
from nose.plugins.attrib import attr
# Import System modules
import time
_multiprocess_shared_ = True
class TestRouterServices(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestRouterServices, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
template = get_template(
cls.apiclient,
cls.zone.id,
cls.services["ostype"]
)
if template == FAILED:
cls.fail(
"get_template() failed to return template\
with description %s" %
cls.services["ostype"])
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
# Create an account, network, VM and IP addresses
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls.vm_1 = VirtualMachine.create(
cls.apiclient,
cls.services["virtual_machine"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls.cleanup = [
cls.account,
cls.service_offering
]
return
@classmethod
def tearDownClass(cls):
try:
cls.apiclient = super(
TestRouterServices,
cls
).getClsTestClient().getApiClient()
# Clean up, terminate the created templates
cleanup_resources(cls.apiclient, cls.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
return
@attr(tags=["advanced", "basic", "sg", "smoke"], required_hardware="true")
def test_01_router_internal_basic(self):
"""Test router internal basic zone
"""
# Validate the following
# 1. Router only does dhcp
# 2. Verify that ports 67 (DHCP) and 53 (DNS) are open on UDP
# by checking status of dnsmasq process
# Find router associated with user account
if self.zone.networktype == "Basic":
list_router_response = list_routers(
self.apiclient,
listall="true"
)
else:
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
hosts = list_hosts(
self.apiclient,
zoneid=router.zoneid,
type='Routing',
state='Up',
id=router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list host returns a valid list"
)
host = hosts[0]
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
router.linklocalip,
"service dnsmasq status",
hypervisor=self.hypervisor
)
else:
try:
host.user, host.passwd = get_host_credentials(
self.config, host.ipaddress)
result = get_process_status(
host.ipaddress,
22,
host.user,
host.passwd,
router.linklocalip,
"service dnsmasq status"
)
except KeyError:
self.skipTest(
"Marvin configuration has no host credentials to\
check router services")
res = str(result)
self.debug("Dnsmasq process status: %s" % res)
self.assertEqual(
res.count("running"),
1,
"Check dnsmasq service is running or not"
)
return
@attr(tags=["advanced", "advancedns"], required_hardware="false")
def test_02_router_internal_adv(self):
"""Test router internal advanced zone
"""
# Validate the following
# 1. Router does dhcp, dns, gateway, LB, PF, FW
# 2. verify that dhcp, dns ports are open on UDP
# 3. dnsmasq, haproxy processes should be running
# Find router associated with user account
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
hosts = list_hosts(
self.apiclient,
zoneid=router.zoneid,
type='Routing',
state='Up',
id=router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
host = hosts[0]
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
router.linklocalip,
"service dnsmasq status",
hypervisor=self.hypervisor
)
else:
try:
host.user, host.passwd = get_host_credentials(
self.config, host.ipaddress)
result = get_process_status(
host.ipaddress,
22,
host.user,
host.passwd,
router.linklocalip,
"service dnsmasq status"
)
except KeyError:
self.skipTest(
"Marvin configuration has no host credentials\
to check router services")
res = str(result)
self.debug("Dnsmasq process status: %s" % res)
self.assertEqual(
res.count("running"),
1,
"Check dnsmasq service is running or not"
)
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
router.linklocalip,
"service haproxy status",
hypervisor=self.hypervisor
)
else:
try:
host.user, host.passwd = get_host_credentials(
self.config, host.ipaddress)
result = get_process_status(
host.ipaddress,
22,
host.user,
host.passwd,
router.linklocalip,
"service haproxy status"
)
except KeyError:
self.skipTest(
"Marvin configuration has no host credentials\
to check router services")
res = str(result)
self.assertEqual(
res.count("running"),
1,
"Check haproxy service is running or not"
)
self.debug("Haproxy process status: %s" % res)
return
@attr(
tags=[
"advanced",
"basic",
"advancedns",
"smoke",
"dvs"],
required_hardware="false")
def test_03_restart_network_cleanup(self):
"""Test restart network
"""
# Validate the following
# 1. When cleanup = true, router is destroyed and a new one created
# 2. New router should have the same public IP
# Find router associated with user account
if self.zone.networktype.lower() == "basic":
list_router_response = list_routers(
self.apiclient,
listall="true"
)
else:
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
# Store old values before restart
if self.zone.networktype.lower == "basic":
old_publicip = router.guestipaddress
else:
old_publicip = router.publicip
timeout = 10
# Network should be in Implemented or Setup stage before restart
while True:
networks = list_networks(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(networks, list),
True,
"Check list response returns a valid list"
)
network = networks[0]
if network.state in ["Implemented", "Setup"]:
break
elif timeout == 0:
break
else:
time.sleep(self.services["sleep"])
timeout = timeout - 1
self.debug(
"Restarting network with ID: %s, Network state: %s" % (
network.id,
network.state
))
cmd = restartNetwork.restartNetworkCmd()
cmd.id = network.id
cmd.cleanup = True
self.apiclient.restartNetwork(cmd)
# Get router details after restart
if self.zone.networktype.lower() == "basic":
list_router_response = list_routers(
self.apiclient,
listall="true"
)
else:
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
if self.zone.networktype.lower() == "basic":
new_publicip = router.guestipaddress
else:
new_publicip = router.publicip
self.assertEqual(
new_publicip,
old_publicip,
"Public IP of the router should remain same after network restart"
)
return
@attr(tags=["advanced", "advancedns", "smoke", "dvs"], required_hardware="true")
def test_04_restart_network_wo_cleanup(self):
"""Test restart network without cleanup
"""
# Validate the following
# 1. When cleanup = false, router is restarted and
# all services inside the router are restarted
# 2. check 'uptime' to see if the actual restart happened
timeout = 10
# Network should be in Implemented or Setup stage before restart
while True:
networks = list_networks(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(networks, list),
True,
"Check list response returns a valid list"
)
network = networks[0]
if network.state in ["Implemented", "Setup"]:
break
elif timeout == 0:
break
else:
time.sleep(self.services["sleep"])
timeout = timeout - 1
self.debug(
"Restarting network with ID: %s, Network state: %s" % (
network.id,
network.state
))
cmd = restartNetwork.restartNetworkCmd()
cmd.id = network.id
cmd.cleanup = False
self.apiclient.restartNetwork(cmd)
# Get router details after restart
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
hosts = list_hosts(
self.apiclient,
zoneid=router.zoneid,
type='Routing',
state='Up',
id=router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
host = hosts[0]
if self.hypervisor.lower() in ('vmware', 'hyperv'):
res = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
router.linklocalip,
"uptime",
hypervisor=self.hypervisor
)
else:
try:
host.user, host.passwd = get_host_credentials(
self.config, host.ipaddress)
res = get_process_status(
host.ipaddress,
22,
host.user,
host.passwd,
router.linklocalip,
"uptime"
)
except KeyError:
self.skipTest(
"Marvin configuration has no host credentials\
to check router services")
# res = 12:37:14 up 1 min, 0 users, load average: 0.61, 0.22, 0.08
# Split result to check the uptime
result = res[0].split()
self.debug("Router Uptime: %s" % result)
self.assertEqual(
str(result[1]),
'up',
"Check router is running or not"
)
if str(result[3]) == "min,":
self.assertEqual(
(int(result[2]) < 3),
True,
"Check uptime is less than 3 mins or not"
)
else:
self.assertEqual(
str(result[3]),
'sec,',
"Check uptime is in seconds"
)
return
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="false")
def test_05_router_basic(self):
"""Test router basic setup
"""
# Validate the following:
# 1. verify that listRouters returned a 'Running' router
# 2. router will have dns same as that seen in listZones
# 3. router will have a guestIP and a linkLocalIp"
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_router_response),
0,
"Check list router response"
)
for router in list_router_response:
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
zones = list_zones(
self.apiclient,
id=router.zoneid
)
self.assertEqual(
isinstance(zones, list),
True,
"Check list response returns a valid list"
)
zone = zones[0]
self.assertEqual(
router.dns1,
zone.dns1,
"Compare DNS1 of router and zone"
)
self.assertEqual(
router.dns2,
zone.dns2,
"Compare DNS2 of router and zone"
)
self.assertEqual(
hasattr(router, 'guestipaddress'),
True,
"Check whether router has guest IP field"
)
self.assertEqual(
hasattr(router, 'linklocalip'),
True,
"Check whether router has link local IP field"
)
return
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="false")
def test_06_router_advanced(self):
"""Test router advanced setup
"""
# Validate the following
# 1. verify that listRouters returned a 'Running' router
# 2. router will have dns and gateway as in listZones, listVlanIpRanges
# 3. router will have guest,public and linklocal IPs
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_router_response),
0,
"Check list router response"
)
for router in list_router_response:
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
zones = list_zones(
self.apiclient,
id=router.zoneid
)
self.assertEqual(
isinstance(zones, list),
True,
"Check list response returns a valid list"
)
zone = zones[0]
self.assertEqual(
router.dns1,
zone.dns1,
"Compare DNS1 of router and zone"
)
self.assertEqual(
router.dns2,
zone.dns2,
"Compare DNS2 of router and zone"
)
self.assertEqual(
hasattr(router, 'guestipaddress'),
True,
"Check whether router has guest IP field"
)
self.assertEqual(
hasattr(router, 'linklocalip'),
True,
"Check whether router has link local IP field"
)
# Fetch corresponding ip ranges information from listVlanIpRanges
ipranges_response = list_vlan_ipranges(
self.apiclient,
zoneid=router.zoneid
)
self.assertEqual(
isinstance(ipranges_response, list),
True,
"Check list response returns a valid list"
)
iprange = ipranges_response[0]
self.assertEqual(
router.gateway,
iprange.gateway,
"Check gateway with that of corresponding IP range"
)
return
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="false")
def test_07_stop_router(self):
"""Test stop router
"""
# Validate the following
# 1. listRouter should report the router for the account as stopped
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug("Stopping the router with ID: %s" % router.id)
# Stop the router
cmd = stopRouter.stopRouterCmd()
cmd.id = router.id
self.apiclient.stopRouter(cmd)
# List routers to check state of router
router_response = list_routers(
self.apiclient,
id=router.id
)
self.assertEqual(
isinstance(router_response, list),
True,
"Check list response returns a valid list"
)
# List router should have router in stopped state
self.assertEqual(
router_response[0].state,
'Stopped',
"Check list router response for router state"
)
return
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="false")
def test_08_start_router(self):
"""Test start router
"""
# Validate the following
# 1. listRouter should report the router for the account as stopped
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
self.debug("Starting the router with ID: %s" % router.id)
# Start the router
cmd = startRouter.startRouterCmd()
cmd.id = router.id
self.apiclient.startRouter(cmd)
# List routers to check state of router
router_response = list_routers(
self.apiclient,
id=router.id
)
self.assertEqual(
isinstance(router_response, list),
True,
"Check list response returns a valid list"
)
# List router should have router in running state
self.assertEqual(
router_response[0].state,
'Running',
"Check list router response for router state"
)
return
def verifyRouterResponse(self, router_response, ip):
if (router_response) and (isinstance(router_response, list)) and \
(router_response[0].state == "Running") and \
(router_response[0].publicip == ip):
return True
return False
@attr(tags=["advanced", "advancedns", "smoke", "dvs"], required_hardware="false")
def test_09_reboot_router(self):
"""Test reboot router
"""
# Validate the following
# 1. listRouter should report the router for the account as stopped
list_router_response = list_routers(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(list_router_response, list),
True,
"Check list response returns a valid list"
)
router = list_router_response[0]
public_ip = router.publicip
self.debug("Rebooting the router with ID: %s" % router.id)
# Reboot the router
cmd = rebootRouter.rebootRouterCmd()
cmd.id = router.id
self.apiclient.rebootRouter(cmd)
# List routers to check state of router
retries_cnt = 6
while retries_cnt >= 0:
router_response = list_routers(
self.apiclient,
id=router.id
)
if self.verifyRouterResponse(router_response, public_ip):
self.debug("Router is running successfully after reboot")
return
time.sleep(10)
retries_cnt = retries_cnt - 1
self.fail(
"Router response after reboot is either is invalid\
or in stopped state")
return
| |
import warnings
import numpy as np
import pytest
from scipy import linalg
from sklearn.base import clone
from sklearn.model_selection import train_test_split
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import TempMemmap
from sklearn.utils.fixes import np_version, parse_version
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model._least_angle import _lars_path_residues
from sklearn.linear_model import LassoLarsIC, lars_path
from sklearn.linear_model import Lars, LassoLars, LarsCV, LassoLarsCV
# TODO: use another dataset that has multiple drops
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
n_samples = y.size
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from io import StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
_, _, coef_path_ = linear_model.lars_path(
X, y, method="lar", verbose=10
)
sys.stdout = old_stdout
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
_, _, coef_path_ = linear_model.lars_path(X, y, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
def _assert_same_lars_path_result(output1, output2):
assert len(output1) == len(output2)
for o1, o2 in zip(output1, output2):
assert_allclose(o1, o2)
@pytest.mark.parametrize("method", ["lar", "lasso"])
@pytest.mark.parametrize("return_path", [True, False])
def test_lars_path_gram_equivalent(method, return_path):
_assert_same_lars_path_result(
linear_model.lars_path_gram(
Xy=Xy, Gram=G, n_samples=n_samples, method=method,
return_path=return_path),
linear_model.lars_path(
X, y, Gram=G, method=method,
return_path=return_path))
def test_x_none_gram_none_raises_value_error():
# Test that lars_path with no X and Gram raises exception
Xy = np.dot(X.T, y)
with pytest.raises(ValueError):
linear_model.lars_path(None, y, Gram=None, Xy=Xy)
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in "lar", "lasso":
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy,
method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
@pytest.mark.filterwarnings('ignore: `rcond` parameter will change')
# numpy deprecation
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * X # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
# Avoid FutureWarning about default value change when numpy >= 1.14
rcond = None if np_version >= parse_version('1.14') else -1
coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
@pytest.mark.filterwarnings('ignore:`rcond` parameter will change')
# numpy deprecation
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
_, _, coef_path_ = linear_model.lars_path(X, y, method='lasso')
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
rng = np.random.RandomState(0)
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert not np.isnan(coef_path_).any()
residual = np.dot(X, coef_path_[:, -1]) - y
assert (residual ** 2).sum() < 1. # just make sure it's bounded
n_samples = 10
X = rng.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, _, coef_path_ = linear_model.lars_path(X, y, method="lar")
alpha_, _, coef = linear_model.lars_path(
X, y, method="lar", return_path=False
)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
alphas_, _, coef_path_ = linear_model.lars_path(X, y, method="lar", Gram=G)
alpha_, _, coef = linear_model.lars_path(
X, y, method="lar", Gram=G, return_path=False
)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9)
alpha_, _, coef = linear_model.lars_path(
X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
@pytest.mark.parametrize(
'classifier',
[linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC])
def test_lars_precompute(classifier):
# Check for different values of precompute
G = np.dot(X.T, X)
clf = classifier(precompute=G)
output_1 = ignore_warnings(clf.fit)(X, y).coef_
for precompute in [True, False, 'auto', None]:
clf = classifier(precompute=precompute)
output_2 = clf.fit(X, y).coef_
assert_array_almost_equal(output_1, output_2, decimal=8)
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
_, _, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in (
[[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]]
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert obj_lars < obj_cd * (1. + 1e-8)
def test_lasso_lars_vs_lasso_cd():
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert err < 1e-3
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
def test_lasso_lars_vs_lasso_cd_early_stopping():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert error < 0.01
# same test, with normalization
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(normalize=True, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert error < 0.01
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert np.all(np.diff(lasso.alphas_) < 0)
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
warning_message = (
"Regressors in active set degenerate."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
lars.fit(X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert lars_obj < cd_obj * (1. + 1e-8)
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert np.all(np.isfinite(clf.coef_))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert len(lars.coef_.nonzero()[0]) == 6
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert len(lars.alphas_) == 7
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
Y = np.vstack([y, y ** 2]).T
n_targets = Y.shape[1]
estimators = [
linear_model.LassoLars(),
linear_model.Lars(),
# regression test for gh-1615
linear_model.LassoLars(fit_intercept=False),
linear_model.Lars(fit_intercept=False),
]
for estimator in estimators:
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
assert not hasattr(lars_cv, 'n_nonzero_coefs')
def test_lars_cv_max_iter(recwarn):
warnings.simplefilter('always')
with np.errstate(divide='raise', invalid='raise'):
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
x = rng.randn(len(y))
X = diabetes.data
X = np.c_[X, x, x] # add correlated features
lars_cv = linear_model.LassoLarsCV(max_iter=5, cv=5)
lars_cv.fit(X, y)
# Check that there is no warning in general and no ConvergenceWarning
# in particular.
# Materialize the string representation of the warning to get a more
# informative error message in case of AssertionError.
recorded_warnings = [str(w) for w in recwarn]
assert recorded_warnings == []
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert lars_bic.alpha_ > lars_aic.alpha_
assert len(nonzero_bic) < len(nonzero_aic)
assert np.max(nonzero_bic) < diabetes.data.shape[1]
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
with pytest.raises(ValueError):
lars_broken.fit(X, y)
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
err_msg = "Positive constraint not supported for 'lar' coding method."
with pytest.raises(ValueError, match=err_msg):
linear_model.lars_path(
diabetes["data"], diabetes["target"], method="lar", positive=True
)
method = 'lasso'
_, _, coefs = \
linear_model.lars_path(X, y, return_path=True, method=method,
positive=False)
assert coefs.min() < 0
_, _, coefs = \
linear_model.lars_path(X, y, return_path=True, method=method,
positive=True)
assert coefs.min() >= 0
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(X, y)
assert estimator.coef_.min() < 0
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(X, y)
assert min(estimator.coef_) >= 0
def test_lasso_lars_vs_lasso_cd_positive():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert err < 1e-3
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
def test_lasso_lars_vs_R_implementation():
# Test that sklearn LassoLars implementation agrees with the LassoLars
# implementation available in R (lars library) under the following
# scenarios:
# 1) fit_intercept=False and normalize=False
# 2) fit_intercept=True and normalize=True
# Let's generate the data used in the bug report 7778
y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822,
-19.42109366])
x = np.array([[0.47299829, 0, 0, 0, 0],
[0.08239882, 0.85784863, 0, 0, 0],
[0.30114139, -0.07501577, 0.80895216, 0, 0],
[-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0],
[-0.69363927, 0.06754067, 0.18064514, -0.0803561,
0.40427291]])
X = x.T
###########################################################################
# Scenario 1: Let's compare R vs sklearn when fit_intercept=False and
# normalize=False
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE,
# trace=TRUE, normalize=FALSE)
# r = t(model_lasso_lars$beta)
#
r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829,
-83.777653739190711, -83.784156932888934,
-84.033390591756657],
[0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0,
0.025219751009936],
[0, -3.577397088285891, -4.702795355871871,
-7.016748621359461, -7.614898471899412, -0.336938391359179,
0, 0, 0.001213370600853, 0.048162321585148],
[0, 0, 0, 2.231558436628169, 2.723267514525966,
2.811549786389614, 2.813766976061531, 2.817462468949557,
2.817368178703816, 2.816221090636795],
[0, 0, -1.218422599914637, -3.457726183014808,
-4.021304522060710, -45.827461592423745,
-47.776608869312305,
-47.911561610746404, -47.914845922736234,
-48.039562334265717]])
model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False,
normalize=False)
model_lasso_lars.fit(X, y)
skl_betas = model_lasso_lars.coef_path_
assert_array_almost_equal(r, skl_betas, decimal=12)
###########################################################################
###########################################################################
# Scenario 2: Let's compare R vs sklearn when fit_intercept=True and
# normalize=True
#
# Note: When normalize is equal to True, R returns the coefficients in
# their original units, that is, they are rescaled back, whereas sklearn
# does not do that, therefore, we need to do this step before comparing
# their results.
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE,
# trace=TRUE, normalize=TRUE)
# r2 = t(model_lasso_lars2$beta)
r2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 8.371887668009453, 19.463768371044026],
[0, 0, 0, 0, 9.901611055290553],
[0, 7.495923132833733, 9.245133544334507,
17.389369207545062, 26.971656815643499],
[0, 0, -1.569380717440311, -5.924804108067312,
-7.996385265061972]])
model_lasso_lars2 = linear_model.LassoLars(alpha=0, normalize=True)
model_lasso_lars2.fit(X, y)
skl_betas2 = model_lasso_lars2.coef_path_
# Let's rescale back the coefficients returned by sklearn before comparing
# against the R result (read the note above)
temp = X - np.mean(X, axis=0)
normx = np.sqrt(np.sum(temp ** 2, axis=0))
skl_betas2 /= normx[:, np.newaxis]
assert_array_almost_equal(r2, skl_betas2, decimal=12)
###########################################################################
@pytest.mark.parametrize('copy_X', [True, False])
def test_lasso_lars_copyX_behaviour(copy_X):
"""
Test that user input regarding copy_X is not being overridden (it was until
at least version 0.21)
"""
lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y)
assert copy_X == np.array_equal(X, X_copy)
@pytest.mark.parametrize('copy_X', [True, False])
def test_lasso_lars_fit_copyX_behaviour(copy_X):
"""
Test that user input to .fit for copy_X overrides default __init__ value
"""
lasso_lars = LassoLarsIC(precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y, copy_X=copy_X)
assert copy_X == np.array_equal(X, X_copy)
@pytest.mark.parametrize('est', (LassoLars(alpha=1e-3), Lars()))
def test_lars_with_jitter(est):
# Test that a small amount of jitter helps stability,
# using example provided in issue #2746
X = np.array([[0.0, 0.0, 0.0, -1.0, 0.0],
[0.0, -1.0, 0.0, 0.0, 0.0]])
y = [-2.5, -2.5]
expected_coef = [0, 2.5, 0, 2.5, 0]
# set to fit_intercept to False since target is constant and we want check
# the value of coef. coef would be all zeros otherwise.
est.set_params(fit_intercept=False)
est_jitter = clone(est).set_params(jitter=10e-8, random_state=0)
est.fit(X, y)
est_jitter.fit(X, y)
assert np.mean((est.coef_ - est_jitter.coef_)**2) > .1
np.testing.assert_allclose(est_jitter.coef_, expected_coef, rtol=1e-3)
def test_X_none_gram_not_none():
with pytest.raises(ValueError,
match="X cannot be None if Gram is not None"):
lars_path(X=None, y=[1], Gram='not None')
def test_copy_X_with_auto_gram():
# Non-regression test for #17789, `copy_X=True` and Gram='auto' does not
# overwrite X
rng = np.random.RandomState(42)
X = rng.rand(6, 6)
y = rng.rand(6)
X_before = X.copy()
linear_model.lars_path(X, y, Gram='auto', copy_X=True, method='lasso')
# X did not change
assert_allclose(X, X_before)
@pytest.mark.parametrize("LARS, has_coef_path, args",
((Lars, True, {}),
(LassoLars, True, {}),
(LassoLarsIC, False, {}),
(LarsCV, True, {}),
# max_iter=5 is for avoiding ConvergenceWarning
(LassoLarsCV, True, {"max_iter": 5})))
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
def test_lars_dtype_match(LARS, has_coef_path, args, dtype):
# The test ensures that the fit method preserves input dtype
rng = np.random.RandomState(0)
X = rng.rand(6, 6).astype(dtype)
y = rng.rand(6).astype(dtype)
model = LARS(**args)
model.fit(X, y)
assert model.coef_.dtype == dtype
if has_coef_path:
assert model.coef_path_.dtype == dtype
assert model.intercept_.dtype == dtype
@pytest.mark.parametrize("LARS, has_coef_path, args",
((Lars, True, {}),
(LassoLars, True, {}),
(LassoLarsIC, False, {}),
(LarsCV, True, {}),
# max_iter=5 is for avoiding ConvergenceWarning
(LassoLarsCV, True, {"max_iter": 5})))
def test_lars_numeric_consistency(LARS, has_coef_path, args):
# The test ensures numerical consistency between trained coefficients
# of float32 and float64.
rtol = 1e-5
atol = 1e-5
rng = np.random.RandomState(0)
X_64 = rng.rand(6, 6)
y_64 = rng.rand(6)
model_64 = LARS(**args).fit(X_64, y_64)
model_32 = LARS(**args).fit(X_64.astype(np.float32),
y_64.astype(np.float32))
assert_allclose(model_64.coef_, model_32.coef_, rtol=rtol, atol=atol)
if has_coef_path:
assert_allclose(model_64.coef_path_, model_32.coef_path_,
rtol=rtol, atol=atol)
assert_allclose(model_64.intercept_, model_32.intercept_,
rtol=rtol, atol=atol)
| |
# coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
import warnings
from ruamel.yaml.error import MarkedYAMLError, ReusedAnchorWarning
from ruamel.yaml.compat import utf8
from ruamel.yaml.events import (
StreamStartEvent, StreamEndEvent, MappingStartEvent, MappingEndEvent,
SequenceStartEvent, SequenceEndEvent, AliasEvent, ScalarEvent,
)
from ruamel.yaml.nodes import (
MappingNode, ScalarNode, SequenceNode,
)
if False: # MYPY
from typing import Any, Dict, Optional, List # NOQA
__all__ = ['Composer', 'ComposerError']
class ComposerError(MarkedYAMLError):
pass
class Composer(object):
def __init__(self, loader=None):
# type: (Any) -> None
self.loader = loader
if self.loader is not None and getattr(self.loader, '_composer', None) is None:
self.loader._composer = self
self.anchors = {} # type: Dict[Any, Any]
@property
def parser(self):
# type: () -> Any
if hasattr(self.loader, 'typ'):
self.loader.parser # type: ignore
return self.loader._parser # type: ignore
@property
def resolver(self):
# type: () -> Any
# assert self.loader._resolver is not None
if hasattr(self.loader, 'typ'):
self.loader.resolver # type: ignore
return self.loader._resolver # type: ignore
def check_node(self):
# type: () -> Any
# Drop the STREAM-START event.
if self.parser.check_event(StreamStartEvent):
self.parser.get_event()
# If there are more documents available?
return not self.parser.check_event(StreamEndEvent)
def get_node(self):
# type: () -> Any
# Get the root node of the next document.
if not self.parser.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# type: () -> Any
# Drop the STREAM-START event.
self.parser.get_event()
# Compose a document if the stream is not empty.
document = None # type: Any
if not self.parser.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.parser.check_event(StreamEndEvent):
event = self.parser.get_event()
raise ComposerError(
"expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.parser.get_event()
return document
def compose_document(self):
# type: (Any) -> Any
# Drop the DOCUMENT-START event.
self.parser.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.parser.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
# type: (Any, Any) -> Any
if self.parser.check_event(AliasEvent):
event = self.parser.get_event()
alias = event.anchor
if alias not in self.anchors:
raise ComposerError(
None, None, "found undefined alias %r"
% utf8(alias), event.start_mark)
return self.anchors[alias]
event = self.parser.peek_event()
anchor = event.anchor
if anchor is not None: # have an anchor
if anchor in self.anchors:
# raise ComposerError(
# "found duplicate anchor %r; first occurrence"
# % utf8(anchor), self.anchors[anchor].start_mark,
# "second occurrence", event.start_mark)
ws = "\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence "\
"{}".format(
(anchor), self.anchors[anchor].start_mark, event.start_mark)
warnings.warn(ws, ReusedAnchorWarning)
self.resolver.descend_resolver(parent, index)
if self.parser.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.parser.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.parser.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.resolver.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
# type: (Any) -> Any
event = self.parser.get_event()
tag = event.tag
if tag is None or tag == u'!':
tag = self.resolver.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style,
comment=event.comment)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
# type: (Any) -> Any
start_event = self.parser.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolver.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style,
comment=start_event.comment, anchor=anchor)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.parser.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.parser.get_event()
if node.flow_style is True and end_event.comment is not None:
if node.comment is not None:
print('Warning: unexpected end_event commment in sequence '
'node {}'.format(node.flow_style))
node.comment = end_event.comment
node.end_mark = end_event.end_mark
self.check_end_doc_comment(end_event, node)
return node
def compose_mapping_node(self, anchor):
# type: (Any) -> Any
start_event = self.parser.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolver.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style,
comment=start_event.comment, anchor=anchor)
if anchor is not None:
self.anchors[anchor] = node
while not self.parser.check_event(MappingEndEvent):
# key_event = self.parser.peek_event()
item_key = self.compose_node(node, None)
# if item_key in node.value:
# raise ComposerError("while composing a mapping",
# start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
# node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.parser.get_event()
if node.flow_style is True and end_event.comment is not None:
node.comment = end_event.comment
node.end_mark = end_event.end_mark
self.check_end_doc_comment(end_event, node)
return node
def check_end_doc_comment(self, end_event, node):
# type: (Any, Any) -> None
if end_event.comment and end_event.comment[1]:
# pre comments on an end_event, no following to move to
if node.comment is None:
node.comment = [None, None]
assert not isinstance(node, ScalarEvent)
# this is a post comment on a mapping node, add as third element
# in the list
node.comment.append(end_event.comment[1])
end_event.comment[1] = None
| |
# Copyright 2014 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
countdata.py
Functions for stats and analysis of count data.
"""
import sys
import numpy as np
import scipy as sp
# import scipy.stats
# ==============================================================================
# ======================
# = Count manipulation =
# ======================
def sample2counts(sample, categories=0):
"""Return count vector from list of samples.
Take vector of samples and return a vector of counts. The elts
refer to indices in something that would ultimately map to the
originating category (like from a multinomial). Therefore, if there
are, say, 8 categories, then valid values in sample should be 0-7.
If categories is not given, then i compute it from the highest value
present in sample (+1).
"""
counts = np.bincount(sample)
if (categories > 0) and (categories > len(counts)):
counts = np.append( counts, np.zeros(categories-len(counts)) )
return counts
def counts2sample(counts):
"""Computes a consistent sample from a vector of counts.
Takes a vector of counts and returns a vector of indices x
such that len(x) = sum(c) and each elt of x is the index of
a corresponding elt in c
"""
x = np.ones(np.sum(counts),dtype=np.int_)
start_idx = 0
end_idx = 0
for i in xrange(len(counts)):
start_idx = end_idx
end_idx = end_idx + counts[i]
x[start_idx:end_idx] = x[start_idx:end_idx] * i
return x
# ==============================================================================
# ========================
# = Percentile functions =
# ========================
def scoreatpercentile(values,rank):
return sp.stats.scoreatpercentile(values,rank)
def percentileofscore(values,score):
values.sort()
return values.searchsorted(score) / np.float_(len(values))
#The scipy version does some funny histogramming thing
#def percentileofscore(values,score):
# return stats.percentileofscore(values,score,kind='weak')
# ==============================================================================
# ============
# = q-values =
# ============
def qvalues(p,lambd=np.arange(0,0.91,0.05),method='bootstrap',B=100,smoothlog = False,robust=False):
"""Compute q-values using Storey method from array of p-values.
Adapted from his R software.
"""
# check validity of values
p = np.array(p)
if np.min(p)<0 or np.max(p)>1:
raise Exception, "p-values not in valid range"
m = len(p)
pi0 = np.zeros(len(lambd))
for i in np.arange(len(lambd)):
pi0[i] = np.mean(p >= lambd[i]) / (1-lambd[i])
if method == 'bootstrap':
minpi0 = np.min(pi0)
mse = np.zeros(len(lambd))
pi0_boot = np.zeros(len(lambd))
for i in np.arange( B ):
p_boot = p[ np.random.randint(0,m,m) ]
for j in np.arange( len(lambd) ):
pi0_boot[j] = np.mean(p_boot >= lambd[j]) / (1-lambd[j])
mse += (pi0_boot - minpi0)**2
pi0 = np.min(pi0[mse == np.min(mse)])
pi0 = np.min(pi0,1)
elif method == 'smoother':
# TODO
print "Not implemented yet"
return
if pi0 <= 0:
raise Exception, "The estimated pi0 <=0. May be problem with pvalues."
# calculate estimated q-values
u = np.argsort(p)
v = qvalrank(p)
qvalue = pi0*m*p/v
if robust == True:
qvalue = pi0*m*p/(v*(1-(1-p)**m))
qvalue[u[m-1]] = np.min( [qvalue[u[m-1]], 1] )
for i in np.arange(m-2,-1,-1):
qvalue[u[i]] = np.min( [qvalue[u[i]], qvalue[u[i+1]], 1] )
return qvalue
def qvalrank(x):
idx = np.argsort(x)
levels = np.unique(x) # sorted unique-d list
bin = levels.searchsorted(x)
tbl = np.bincount(bin)
cs = np.cumsum(tbl)
tbl = cs.repeat(tbl)
tbl2 = np.zeros(len(tbl),np.int_)
tbl2[idx] = tbl
return tbl2
# ==============================================================================
# ====================
# = Compute p-values =
# ====================
def pval_KalZtest(n1,N1,n2,N2):
"""Compute p-value using Kal Z-test for count data.
Compute pval using Z-test, as published in
Kal et al, 1999, Mol Biol Cell 10:1859.
Z = (p1-p2) / sqrt( p0 * (1-p0) * (1/N1 + 1/N2) )
where p1 = n1/N1, p2=n2/N2, and p0=(n1+n2)/(N1+N2)
You reject if |Z| > Z_a/2 where a is sig lev. Here
we return the p-value itself.
"""
if n1==0 and n2==0:
return 1.0
n1 = np.float_(n1)
N1 = np.float_(N1)
n2 = np.float_(n2)
N2 = np.float_(N2)
p0 = (n1+n2)/(N1+N2)
p1 = n1/N1
p2 = n2/N2
Z = (p1-p2) / np.sqrt( p0 * (1-p0) * ((1/N1) + (1/N2)) )
pval = 2 * sp.stats.norm.cdf(-1*abs(Z))
return pval
def pval_KalZtest_vec(n1,N1,n2,N2):
assert n1.shape[0] == n2.shape[0]
p0 = (n1+n2)/(float(N1)+N2)
p1 = n1/float(N1)
p2 = n2/float(N2)
p0[(n1 == 0) & (n2 == 0)] = 0.5
Z = (p1-p2) / np.sqrt( p0 * (1.-p0) * ((1./N1) + (1./N2)) )
pval = 2 * sp.stats.norm.cdf(-1*abs(Z))
pval[(n1 == 0) & (n2 == 0)] = -1.
return pval
def pval_logRatioMC(n1,N1,n2,N2):
pass
def pvals_logRatioMC(counts1, counts2, B=1e6, pseudocount=1, verbose=False):
"""Compute component-wise p-values of difference between two count vectors
using Monte Carlo sampling of log ratios.
Null hypothesis is that data is from same multinomial. Parameters estimated
by combining both count vectors. Zeros are handled by adding pseudocount to
each element.
The test statistic is log Ratio, which is computed for each component.
Two random count vectors are generated, and and component-wise log ratio
is computed. For each component, it is recorded whether the abs random log
ratio was greater than or less than the abs test statistic value. This is
performed B times. The absolute value makes the test two-sided and symmetric.
The achieved significance level (ASL) is returned for each component.
"""
if len(counts1) != len(counts2): raise ValueError, "Counts vectors have different lengths."
counts1 = np.asarray(counts1, dtype=np.float)
counts2 = np.asarray(counts2, dtype=np.float)
total1 = int(np.round(np.sum(counts1)))
total2 = int(np.round(np.sum(counts2)))
countsMLE = counts1 + counts2 + pseudocount
counts1 = counts1 + pseudocount # note: counts1 and counts2 are changed at this point
counts2 = counts2 + pseudocount
normcounts1 = counts1 / np.sum(counts1)
normcounts2 = counts2 / np.sum(counts2)
testabslogratios = np.abs(np.log10(normcounts2 / normcounts1))
probvec = countsMLE / np.sum(countsMLE)
atleastasextreme = np.zeros(len(counts1))
for i in xrange(B):
if verbose and i % 10 == 0:
sys.stdout.write("%i " % i)
sys.stdout.flush()
randcounts1 = np.float_(np.random.multinomial(total1, probvec)) + pseudocount
randcounts2 = np.float_(np.random.multinomial(total2, probvec)) + pseudocount
normrandcounts1 = randcounts1 / np.sum(randcounts1)
normrandcounts2 = randcounts2 / np.sum(randcounts2)
randabslogratios = np.abs(np.log10(normrandcounts2 / normrandcounts1))
atleastasextreme += np.float_(randabslogratios >= testabslogratios)
ASL = atleastasextreme / B
return ASL
def pvals_counts(counts1,counts2,method='KalZtest'):
"""Compute component-wise p-values of difference between two count vectors.
method can be one of:
KalZtest
MonteCarlo
"""
if len(counts1) != len(counts2): raise ValueError, "Counts vectors have different lengths."
pvals = np.zeros(len(counts1))
N1 = np.sum(counts1)
N2 = np.sum(counts2)
if method == 'KalZtest':
for i in xrange(len(pvals)):
pvals[i] = pval_KalZtest(counts1[i],N1,counts2[i],N2)
elif method == 'MonteCarlo':
pvals = pvals_logRatioMC(counts1,counts2,B=1e6,pseudocounts=1)
else:
raise Exception, method + " is not a recognized method for computing p-values."
return pvals
# ==============================================================================
# ==========================
# = Random data generation =
# ==========================
def gen_rand_count_vec(numComponents,numCounts,fracNull,probvecNull,probvecAlt):
pass
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
| |
'''
TST - A Ternary Search Trie
Author: Tim Henderson
Contact: tim.tadh@hackthology.com or timothy.henderson@case.edu
This File: TST Implementation.
Copyright (c) 2010, Tim Henderson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name TST nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from collections import deque, MutableMapping
from os.path import sep as PATH_SEP
import sys, itertools
END = '\x00'
# Instructions
MATCH = 0
CHAR = 1
SPLIT = 2
JMP = 3
# Other constants
WILDCARD = 0x11FFFF
INF = 0x10FFFF
class TST(MutableMapping):
'''
A TST based symbol table:
see Algorithms in (C|C++|Java) by Robert Sedgewick Ch. 15 Section 4
NB:
works on byte strings if you are using unicode strings encode to a byte
string before passing it to this class
ex:
>>> x = u'\u03A0' # capital greek letter Pi
>>> x
u'\u03a0'
>>> x.encode('utf8')
'\xce\xa0'
>>> t[x.encode('utf8')] = 123
>>> t
{'\xce\xa0': 123}
>>> t[x] = 123
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "tst.py", line 241, in __setitem__
self.heads[ord(symbol[0])] = insert(self.heads[ord(symbol[0])], 1)
IndexError: list index out of range
a good python unicode resource can be found at:
http://boodebr.org/main/python/all-about-python-and-unicode
TST supports flexible matching using Java style glob patterns. However
if you can override this behavior, and use a non-java glob interpretation.
The Java vs. Non-Java glob are controlled by whether or not a path seperator
has been set. By default it is set to os.path.sep and therefore uses Java
style matching.
Syntax to Regular Expression:
if sep != None:
** --> .*
**SEP --> .*
* --> [^SEP]*
? --> [^SEP]
c --> c ie. any other character is just that character
if sep == None:
** --> .*
* --> .*
? --> .
c --> ie. any other character is just that character
Examples:
t = TST()
Java Glob Matching:
SEP = os.path.sep [eg. '/' on unix]
**/*.jsp = .*[^/]*\.jsp
/**/*.jsp = /.*[^/]*\.jsp
binary/**/WEB-INF/*.jsp = binary/.*WEB-INF/[^/]*\.jsp
???/*.jsp = [^/][^/][^/]/[^/]*\.jsp
t = TST(sep=None)
Flexible Matching
SEP = None
**/*.jsp = .*/.*\.jsp
/**/*.jsp = /.*/.*\.jsp
binary/**/WEB-INF/*.jsp = binary/.*/WEB-INF/.*\.jsp
???/*.jsp = .../.*\.jsp
you can set sep to any character ex:
t = TST(sep=':')
Custom SEP:
SEP = ':'
**/*.jsp = .*/[^:]*\.jsp
/**/*.jsp = /.*/[^:]*\.jsp
binary/**/WEB-INF/*.jsp = binary/.*/WEB-INF/[^:]*\.jsp
???/*.jsp = [^:][^:][^:]/[^:]*\.jsp
*/lib/*:/bin/*:** = [^:]*/lib/[^:]*:/bin/[^:]*:.*
NB:
Using this flexible matching only pays off verses naive use of a regex
loop in the case of lots of strings. For hundreds of strings this table
may be slower than a naive loop
eg.
set(f for f in files if re.match(p, f))
however for lots of files the TST will be increasingly faster especially
if the patterns prune at the root of the string.
eg
sometext*
rather than
*sometext
The TST may perform worse for *sometext as it will have to iterate
through every node in the trie. A better a approach than a TST in this
case would be a Suffix Tree however Suffix Trees are very expensive to
build. A TST can be the basis of a Suffix Tree. See Sedgewick for a
discussion.
'''
def __init__(self, *args, **kwargs):
'''
@params sep = the seperator for Java style glob matching pass None to
disable see class documentations for more info.
@params *args, **kwargs = passed to self.update see documentation for
update for more info (basically a copy constructor) use like dict().
eg:
t = TST({'ab':12, 'cd':34}, sep=None)
t = TST(((k, v) for k,v in {'ab':12, 'cd':34}.iteritems()), sep=None)
'''
self.heads = [None for x in xrange(256)]
#self.root = None
self.sep = kwargs.pop('sep', PATH_SEP)
self.update(*args, **kwargs)
def find(self, pattern):
'''
finds all key, value pairs matching the pattern (see class documentation
for pattern syntax).
@params pattern = the pattern to match
@returns generator object of tuple(key, value) pairs
ex:
dict(t.find('**')) (all items in the table as a python dictionary)
'''
if '*' not in pattern and '?' not in pattern:
try:
yield pattern, self[pattern]
except KeyError:
return
return
pattern += END
insts = list()
p = None
pp = None
# the query compiler.
for ch in pattern:
if self.sep != None and pp == '*' and p == '*' and ch == self.sep:
pass
elif ch != '*' and ch != '?':
insts.append((CHAR, ord(ch), ord(ch)))
elif self.sep == None and ch == '?':
insts.append((CHAR, WILDCARD, WILDCARD))
elif ch == '?':
i = len(insts)
insts.append((SPLIT, len(insts)+1, len(insts)+3))
insts.append((CHAR, 0, ord(self.sep)-1))
insts.append((JMP, len(insts)+2, 0))
insts.append((CHAR, ord(self.sep)+1, INF))
elif self.sep == None and p == '*' and ch == '*':
pass
elif self.sep == None and ch == '*':
i = len(insts)
insts.append((SPLIT, len(insts)+1, len(insts)+3))
insts.append((CHAR, WILDCARD, WILDCARD))
insts.append((JMP, i, 0))
elif ch == '*' and p == '*':
insts = insts[:-6]
i = len(insts)
insts.append((SPLIT, len(insts)+1, len(insts)+3))
insts.append((CHAR, WILDCARD, WILDCARD))
insts.append((JMP, i, 0))
else:
i = len(insts)
insts.append((SPLIT, len(insts)+1, len(insts)+6))
insts.append((SPLIT, len(insts)+1, len(insts)+3))
insts.append((CHAR, 0, ord(self.sep)-1))
insts.append((JMP, len(insts)+2, 0))
insts.append((CHAR, ord(self.sep)+1, INF))
insts.append((JMP, i, 0))
pp = p
p = ch
insts.append((MATCH, 0, 0))
#print insts
matches = [(x for x in [])]
for i,h in enumerate(self.heads):
if h == None: continue
accept, clist = acceptone(insts, chr(i), 0)
if accept:
matches.append(hendersonvm(insts, h, 1, clist))
for match in itertools.chain(*matches):
yield match
def keys(self):
'''
All the keys in the table in sorted in byte order.
'''
return [k for k in self]
def iteritems(self):
'''
All the items [tuple(key, value) pairs] in the table sorted in byte
order of their keys.
'''
q = deque()
for h in self.heads:
if h == None: continue
q.appendleft(h)
#q.append(self.root)
j = 0
while q:
n = q.pop()
if not n: continue
if n.accepting:
yield n.key[:-1], n.val
q.append(n.r)
q.append(n.m)
q.append(n.l)
def __len__(self):
return len(self.keys())
def __setitem__(self, symbol, obj):
## a modified version of the algorithm given by sedgewicks
## fixes some bugs
symbol += END
# node split
def split(p, q, d):
pd = p.key[d] # chr for p
qd = q.key[d] # chr for q
# get the next chr for q so we can update its ch field
if d+1 < len(q.key): nqd = q.key[d+1]
else: nqd = END
t = node(qd) # the new node that will be the parent of both p, and q
# update the char fields necessary, because if you don't they may be
# wrong and cause problems in the regex matching.
q.ch = nqd
p.ch = pd
if pd < qd: t.m = q; t.l = p
elif pd == qd: t.m = split(p, q, d+1);
elif pd > qd: t.m = q; t.r = p
return t
# recursive insert
def insert(n, d):
if n == None:
# if the node is None we found the spot make a new node and
# return it
if d == len(symbol): ch = '\0'
else: ch = symbol[d]
return node(ch, key=symbol, val=obj)
if not n.internal():
# if it a leaf node we either have found the symbol or we need
# to split a node.
if len(n.key) == len(symbol) and n.key == symbol:
# found the symbol
n.val = obj
return n
else:
# split the node
ch = symbol[d]
return split(node(ch, key=symbol, val=obj), n, d)
# it is an internal node
ch = symbol[d]
if ch < n.ch: n.l = insert(n.l, d)
elif ch == n.ch: n.m = insert(n.m, d+1) # matches current chr so d+1
elif ch > n.ch: n.r = insert(n.r, d)
return n
# start at the "head" of the trie rooted at the first chacter of the
# symbol.
self.heads[ord(symbol[0])] = insert(self.heads[ord(symbol[0])], 1)
#self.root = insert(self.root, 0)
def __getitem__(self, symbol):
## an iterative version of the algorithm given by sedgewick
## I made it iterative because it is faster that way.
symbol += END
next = (self.heads[ord(symbol[0])], 1)
#next = (self.root, 0)
while next:
n, d = next
if n == None:
raise KeyError, "Symbol '%s' is not in table." % symbol[:-1]
if n.internal():
ch = symbol[d]
if ch < n.ch: next = (n.l, d); continue
elif ch == n.ch: next = (n.m, d+1); continue
elif ch > n.ch: next = (n.r, d); continue
elif n.key == symbol:
return n.val
raise KeyError, "Symbol '%s' is not in table." % symbol[:-1]
#should never reach ...
raise KeyError, "Symbol '%s' is not in table." % symbol[:-1]
def __delitem__(self, symbol):
## not given by sedgewick inferred by Tim Henderson
## the algorithm is very similar to the get algorithm.
symbol += END
def check(n):
# ensure the node is valid.
if n == None: return None
if not n.internal() and n.key == None:
return None
return n
def remove(n, d):
if n == None:
raise KeyError, "Symbol '%s' is not in table." % symbol
if n.internal():
ch = symbol[d]
if ch < n.ch: n.l = check(remove(n.l, d))
elif ch == n.ch: n.m = check(remove(n.m, d+1))
elif ch > n.ch: n.r = check(remove(n.r, d))
else:
if n.key == symbol:
return None
else:
raise KeyError, "Symbol '%s' is not in table." % symbol
return check(n)
self.heads[ord(symbol[0])] = remove(self.heads[ord(symbol[0])], 1)
#self.root = remove(self.root, 0)
def __iter__(self):
'''
returns the keys in sorted byte order.
'''
q = deque()
for h in self.heads:
if h == None: continue
q.appendleft(h)
#q.append(self.root)
j = 0
while q:
n = q.pop()
if not n: continue
if n.accepting:
yield n.key[:-1]
q.append(n.r)
q.append(n.m)
q.append(n.l)
def __contains__(self, pattern):
'''
checks to see if the pattern is in the dictionary. first checks to see
if it is a symbol name, if not checks if it the pattern matches anything
if not returns false.
'''
try:
x = self[pattern]
except KeyError:
try: return bool(tuple(self.find(pattern)))
except KeyError: return False
return False
return True
def __getstate__(self):
'''Used by pickle to save the state of this table.'''
return {'sep':self.sep, 'dict':dict(self)}
def __setstate__(self, val):
'''Used by pickle to restore the state of this table.'''
self.__init__(val['dict'], sep=val['sep'])
def __str__(self):
return str(dict(self))
def __repr__(self):
return str(self)
def dotty(self):
header = 'digraph TST {\nrankdir=LR;\n'
node_root = ' %s[label="%s", shape="rect"];'
node = ' %s[label="%s", shape="circle", fillcolor="#aaffff" style="filled"];'
node_acc = ' %s[label="%s", fillcolor="#aaffaa" style="filled"];'
edge = ' %s -> %s [label="%s"];'
edge_nolabel = ' %s -> %s;'
footer = '\n}\n'
nodes = list()
edges = list()
def dotnode(cur, parent, ch):
name = 'node%i' % len(nodes)
if cur.accepting: nodes.append(node_acc % (name, cur.key[:-1]))
elif cur.ch == '\0': nodes.append(node % (name, '\\\\0'))
else: nodes.append(node % (name, cur.ch))
#print ch is "", '"' + ch + '"', type(ch), ch[0], len(ch)
if ch[-1] != "\0": edges.append(edge % (parent, name, ch))
elif ch[-1] == "\0": edges.append(edge % (parent, name, ch[:-1]+'\\\\0'))
else: edges.append(edge % (parent, name, '\\\\0'))
if cur.l is not None: dotnode(cur.l, name, "<")
if cur.m is not None: dotnode(cur.m, name, '=')
if cur.r is not None: dotnode(cur.r, name, ">")
root = 'node%i' % len(nodes)
nodes.append(node_root % (root, 'heads'))
for k in xrange(len(self.heads)):
if self.heads[k] is None: continue
dotnode(self.heads[k], root, chr(k))
return (
header + '\n'.join(nodes) + '\n' + '\n'.join(edges) + footer
)
class node(object):
'''A node of a TST'''
__slots__ = ['ch', 'key', 'val', 'l', 'm', 'r', 'accepting']
def __init__(self, ch, key=None, val=None, m=None):
self.ch = ch
self.key = key
self.val = val
self.l = None
self.m = m
self.r = None
if key == None: self.accepting = False
else: self.accepting = True
def internal(self):
return self.l != None or self.m != None or self.r != None
def __str__(self):
ch = self.ch
k = self.key
if ch == END: ch = r'\0'
if k: k = k[:-1]
if self.accepting: return "%s %s %s" % (ch, k, str(self.val))
return ch
def __getstate__(self):
d = dict((attr, getattr(self, attr)) for attr in self.__slots__)
return d
def __setstate__(self, s):
for k,v in s.iteritems():
setattr(self, k, v)
def __repr__(self):
return str(self)
def acceptone(program, text, pc):
'''
checks one character and sees if it matches. if it does returns True and
a the next queue of program counters [pc] to excute. If it doesn't match
returns False and None. see thompsonvm citation for a very through
explanation of the theory behind this algorithm.
'''
tc = 0
cqueue, nqueue = deque(), deque()
cqueue.append(pc)
while cqueue:
pc = cqueue.pop()
inst = program[pc]
if inst[0] == JMP:
cqueue.append(inst[1])
if inst[0] == SPLIT:
cqueue.append(inst[1])
cqueue.append(inst[2])
if inst[0] == CHAR:
if tc >= len(text): continue
x, y = inst[1], inst[2]; c = ord(text[tc])
if (y and x <= c <= y) or c == x or x == WILDCARD:
nqueue.append(pc+1)
if inst[0] == MATCH:
if tc == len(text):
return True, [0]
if nqueue:
return True, nqueue
return False, None
def thompsonvm(program, text, tc, pc):
'''
A version of the Thompson Virtual Machine as defined by Russ Cox in:
http://swtch.com/~rsc/regexp/regexp2.html
this article provides a very through explanation of regular expression
NFA matching as implemented in this module.
This version modifies the algorithm to start at a later position in the
string, and regular expression. Used to match the end of the string stored
in the leaves of the TST.
'''
cqueue, nqueue = deque(), deque()
cqueue.append(pc)
while tc <= len(text):
while cqueue:
pc = cqueue.pop()
inst = program[pc]
if inst[0] == JMP:
cqueue.append(inst[1])
if inst[0] == SPLIT:
cqueue.append(inst[1])
cqueue.append(inst[2])
if inst[0] == CHAR:
if tc >= len(text): continue
x, y = inst[1], inst[2]; c = ord(text[tc])
if (y and x <= c <= y) or c == x or x == WILDCARD:
nqueue.append(pc+1)
if inst[0] == MATCH:
if tc == len(text):
return True
cqueue = nqueue
nqueue = deque()
tc += 1
return False
def hendersonvm(program, node, d, clist):
'''
A Regex Virtual Machine for matching regular expressions stored in a trie.
Originally implemented for a N-Way trie, this version has been modified for
use on a TST. For more details contact Tim Henderson at
tim.tadh@gmail.com or timothy.henderson@case.edu
Note this algorithm starts at text position d and with thread list clist.
'''
#print clist, d
def addthread(l, thread):
if thread not in l: l.appendleft(thread)
def addnode(d, t, node):
if not d.has_key(t):
d[t] = set()
if node not in d[t]: d[t].add(node)
def dupnodes(d, t, t2):
#print d
if not d.has_key(t2):
d[t2] = set()
d[t2] |= d[t]
if not program: return
if not node: return
nlist = deque()
cnodes = dict()
nnodes = dict()
for pc in clist:
addnode(cnodes, pc, (node,d))
while clist:
while clist:
pc = clist.popleft()
inst = program[pc]
if inst[0] == JMP:
addthread(clist, inst[1])
dupnodes(cnodes, pc, inst[1])
elif inst[0] == SPLIT:
addthread(clist, inst[2])
addthread(clist, inst[1])
dupnodes(cnodes, pc, inst[1])
dupnodes(cnodes, pc, inst[2])
elif cnodes.has_key(pc):
for n, d in cnodes[pc]:
if n == None: continue
if inst[0] == CHAR:
x, y = inst[1], inst[2]; c = ord(n.ch)
if (x < c or x == WILDCARD) and n.l != None:
addthread(nlist, pc)
addnode(nnodes, pc, (n.l, d))
if x == c or y == c or (x < c < y) or x == WILDCARD:
if not n.internal():
if thompsonvm(program, n.key, d+1, pc+1):
#print n.key, inst, pc, program
yield n.key[:-1], n.val
elif n.m != None:
addthread(nlist, pc+1)
addnode(nnodes, pc+1, (n.m, d+1))
if (y > c or x == WILDCARD) and n.r != None:
addthread(nlist, pc)
addnode(nnodes, pc, (n.r, d))
elif inst[0] == MATCH:
if d == len(n.key):
yield n.key[:-1], n.val
cnodes = nnodes
nnodes = dict()
clist = nlist
nlist = deque()
#print
#print
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparse tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python import tf2
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.types import internal
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_eval_using_default_session = ops._eval_using_default_session
_override_helper = ops._override_helper
# pylint: enable=protected-access
@tf_export("sparse.SparseTensor", "SparseTensor")
class SparseTensor(internal.NativeObject, composite_tensor.CompositeTensor):
"""Represents a sparse tensor.
TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `dense_shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `dense_shape` tensors, wrap them in a `SparseTensor`
object before passing to the ops below.
Concretely, the sparse tensor `SparseTensor(indices, values, dense_shape)`
comprises the following components, where `N` and `ndims` are the number
of values and number of dimensions in the `SparseTensor`, respectively:
* `indices`: A 2-D int64 tensor of shape `[N, ndims]`, which specifies the
indices of the elements in the sparse tensor that contain nonzero values
(elements are zero-indexed). For example, `indices=[[1,3], [2,4]]` specifies
that the elements with indexes of [1,3] and [2,4] have nonzero values.
* `values`: A 1-D tensor of any type and shape `[N]`, which supplies the
values for each element in `indices`. For example, given `indices=[[1,3],
[2,4]]`, the parameter `values=[18, 3.6]` specifies that element [1,3] of
the sparse tensor has a value of 18, and element [2,4] of the tensor has a
value of 3.6.
* `dense_shape`: A 1-D int64 tensor of shape `[ndims]`, which specifies the
dense_shape of the sparse tensor. Takes a list indicating the number of
elements in each dimension. For example, `dense_shape=[3,6]` specifies a
two-dimensional 3x6 tensor, `dense_shape=[2,3,4]` specifies a
three-dimensional 2x3x4 tensor, and `dense_shape=[9]` specifies a
one-dimensional tensor with 9 elements.
The corresponding dense tensor satisfies:
```python
dense.shape = dense_shape
dense[tuple(indices[i])] = values[i]
```
By convention, `indices` should be sorted in row-major order (or equivalently
lexicographic order on the tuples `indices[i]`). This is not enforced when
`SparseTensor` objects are constructed, but most ops assume correct ordering.
If the ordering of sparse tensor `st` is wrong, a fixed version can be
obtained by calling `tf.sparse.reorder(st)`.
Example: The sparse tensor
```python
SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
```
represents the dense tensor
```python
[[1, 0, 0, 0]
[0, 0, 2, 0]
[0, 0, 0, 0]]
```
"""
@classmethod
def from_value(cls, sparse_tensor_value):
if not is_sparse(sparse_tensor_value):
raise TypeError("Neither a SparseTensor nor SparseTensorValue: %s." %
sparse_tensor_value)
return SparseTensor(
indices=sparse_tensor_value.indices,
values=sparse_tensor_value.values,
dense_shape=sparse_tensor_value.dense_shape)
def __init__(self, indices, values, dense_shape):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of shape `[N, ndims]`.
values: A 1-D tensor of any type and shape `[N]`.
dense_shape: A 1-D int64 tensor of shape `[ndims]`.
Raises:
ValueError: When building an eager SparseTensor if `dense_shape` is
unknown or contains unknown elements (None or -1).
"""
with ops.name_scope(None, "SparseTensor", [indices, values, dense_shape]):
indices = ops.convert_to_tensor(
indices, name="indices", dtype=dtypes.int64)
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = ops.convert_to_tensor(values, name="values")
dense_shape = ops.convert_to_tensor(
dense_shape, name="dense_shape", dtype=dtypes.int64)
dense_shape_default = tensor_util.constant_value_as_shape(dense_shape)
self._indices = indices
self._values = values
self._dense_shape = dense_shape
self._dense_shape_default = dense_shape_default
indices_shape = indices.shape.with_rank(2)
values_shape = values.shape.with_rank(1)
dense_shape_shape = dense_shape.shape.with_rank(1)
# Assert number of rows in indices match the number of elements in values.
indices_shape.dims[0].assert_is_compatible_with(values_shape.dims[0])
# Assert number of columns in indices matches the number of elements in
# dense_shape.
indices_shape.dims[1].assert_is_compatible_with(dense_shape_shape.dims[0])
def get_shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return self._dense_shape_default
@property
def indices(self):
"""The indices of non-zero values in the represented dense tensor.
Returns:
A 2-D Tensor of int64 with dense_shape `[N, ndims]`, where `N` is the
number of non-zero values in the tensor, and `ndims` is the rank.
"""
return self._indices
@property
def values(self):
"""The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
"""
return self._values
def with_values(self, new_values):
"""Returns a copy of `self` with `values` replaced by `new_values`.
This method produces a new `SparseTensor` that has the same nonzero
`indices` and same `dense_shape`, but updated values.
Args:
new_values: The values of the new `SparseTensor`. Needs to have the same
shape as the current `.values` `Tensor`. May have a different type than
the current `values`.
Returns:
A `SparseTensor` with identical indices and shape but updated values.
Example usage:
>>> st = tf.sparse.from_dense([[1, 0, 2, 0], [3, 0, 0, 4]])
>>> tf.sparse.to_dense(st.with_values([10, 20, 30, 40])) # 4 nonzero values
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[10, 0, 20, 0],
[30, 0, 0, 40]], dtype=int32)>
"""
return SparseTensor(self._indices, new_values, self._dense_shape)
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self._values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._values.dtype
@property
def dense_shape(self):
"""A 1-D Tensor of int64 representing the shape of the dense tensor."""
return self._dense_shape
@property
def shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return self._dense_shape_default
@property
def graph(self):
"""The `Graph` that contains the index, value, and dense_shape tensors."""
return self._indices.graph
def __str__(self):
return "SparseTensor(indices=%s, values=%s, dense_shape=%s)" % (
self._indices, self._values, self._dense_shape)
def eval(self, feed_dict=None, session=None):
"""Evaluates this sparse tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `SparseTensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
Returns:
A `SparseTensorValue` object.
"""
indices, values, dense_shape = _eval_using_default_session(
[self.indices, self.values, self.dense_shape], feed_dict, self.graph,
session)
return SparseTensorValue(indices, values, dense_shape)
@staticmethod
def _override_operator(operator, func):
_override_helper(SparseTensor, operator, func)
@property
def _type_spec(self):
return SparseTensorSpec(self.shape, self.dtype)
def _shape_invariant_to_type_spec(self, shape):
# From the tf.while_loop docs: "If a loop variable is a SparseTensor, the
# shape invariant must be TensorShape([r]) where r is the rank of the dense
# tensor represented by the sparse tensor. It means the shapes of the three
# tensors of the SparseTensor are ([None], [None, r], [r]). NOTE: The shape
# invariant here is the shape of the SparseTensor.dense_shape property. It
# must be the shape of a vector.
if shape.ndims is not None and shape.ndims != 1:
raise ValueError("Expected a shape with 1 dimension")
rank = tensor_shape.dimension_value(shape[0])
return SparseTensorSpec(tensor_shape.unknown_shape(rank), self.dtype)
def consumers(self):
return self._consumers()
SparseTensorValue = collections.namedtuple("SparseTensorValue",
["indices", "values", "dense_shape"])
tf_export(v1=["SparseTensorValue"])(SparseTensorValue)
_pywrap_utils.RegisterType("SparseTensorValue", SparseTensorValue)
@tf_export("SparseTensorSpec")
@type_spec.register("tf.SparseTensorSpec")
class SparseTensorSpec(type_spec.BatchableTypeSpec):
"""Type specification for a `tf.sparse.SparseTensor`."""
__slots__ = ["_shape", "_dtype"]
value_type = property(lambda self: SparseTensor)
def __init__(self, shape=None, dtype=dtypes.float32):
"""Constructs a type specification for a `tf.sparse.SparseTensor`.
Args:
shape: The dense shape of the `SparseTensor`, or `None` to allow any dense
shape.
dtype: `tf.DType` of values in the `SparseTensor`.
"""
self._shape = tensor_shape.as_shape(shape)
self._dtype = dtypes.as_dtype(dtype)
def _serialize(self):
return (self._shape, self._dtype)
@property
def dtype(self):
"""The `tf.dtypes.DType` specified by this type for the SparseTensor."""
return self._dtype
@property
def shape(self):
"""The `tf.TensorShape` specified by this type for the SparseTensor."""
return self._shape
@property
def _component_specs(self):
rank = self._shape.ndims
num_values = None
return [
tensor_spec.TensorSpec([num_values, rank], dtypes.int64),
tensor_spec.TensorSpec([num_values], self._dtype),
tensor_spec.TensorSpec([rank], dtypes.int64)]
def _to_components(self, value):
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
return [value.indices, value.values, value.dense_shape]
def _from_components(self, tensor_list):
if (all(isinstance(t, np.ndarray) for t in tensor_list) and
not tf2.enabled()):
return SparseTensorValue(*tensor_list)
else:
return SparseTensor(*tensor_list)
# The SparseTensorSpec tensor_list encoding uses (de)serialize_sparse ops
# to (un)box the component tensors in a way that allows for batching &
# unbatching.
@property
def _flat_tensor_specs(self):
# NOTE(mrry): The default flat shape of a boxed `SparseTensor` is `(3,)`,
# but a `SparseTensorSpec` can also represent a batch of boxed
# `SparseTensor` objects with shape `(..., 3)` (and batches of batches,
# etc.), so the flat shape must be unknown.
return [tensor_spec.TensorSpec(None, dtypes.variant)]
def _to_tensor_list(self, value):
value = SparseTensor.from_value(value)
return [gen_sparse_ops.serialize_sparse(
value.indices, value.values, value.dense_shape,
out_type=dtypes.variant)]
def _to_batched_tensor_list(self, value):
dense_shape = tensor_util.constant_value_as_shape(value.dense_shape)
if self._shape.merge_with(dense_shape).ndims == 0:
raise ValueError(
"Unbatching a sparse tensor is only supported for rank >= 1")
return [gen_sparse_ops.serialize_many_sparse(
value.indices, value.values, value.dense_shape,
out_type=dtypes.variant)]
def _from_compatible_tensor_list(self, tensor_list):
tensor_list = gen_sparse_ops.deserialize_sparse(tensor_list[0], self._dtype)
indices, values, dense_shape = tensor_list
rank = self._shape.ndims
indices.set_shape([None, rank])
# We restore the dense_shape from the SparseTypeSpec. This is necessary
# for shape inference when using placeholder SparseTensors in function
# tracing.
if self._shape.is_fully_defined():
dense_shape = ops.convert_to_tensor(
self._shape, dtype=dtypes.int64, name="shape")
elif (self._shape.rank is not None and
any(dim.value is not None for dim in self._shape.dims)):
# array_ops imports sparse_tensor.py. Local import to avoid import cycle.
from tensorflow.python.ops import array_ops # pylint: disable=g-import-not-at-top
pieces = array_ops.unstack(dense_shape, num=self._shape.rank)
for i, dim in enumerate(self._shape.dims):
if dim.value is not None:
pieces[i] = constant_op.constant(dim.value, dense_shape.dtype)
dense_shape = array_ops.stack(pieces)
else:
dense_shape.set_shape([rank])
return SparseTensor(indices, values, dense_shape)
def _batch(self, batch_size):
return SparseTensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype)
def _unbatch(self):
if self._shape.ndims == 0:
raise ValueError("Unbatching a tensor is only supported for rank >= 1")
return SparseTensorSpec(self._shape[1:], self._dtype)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return SparseTensor
@classmethod
def from_value(cls, value):
if isinstance(value, SparseTensor):
return cls(value.shape, value.dtype)
if isinstance(value, SparseTensorValue):
if isinstance(value.values, np.ndarray):
return cls(value.dense_shape, value.values.dtype)
else:
return cls.from_value(SparseTensor.from_value(value))
else:
raise TypeError("Expected SparseTensor or SparseTensorValue")
# TODO(b/133606651) Delete the SparseTensor registration when CompositeTensor
# is updated to define a _type_spec field (since registration will be
# automatic). Do *not* delete the SparseTensorValue registration.
type_spec.register_type_spec_from_value_converter(
SparseTensor, SparseTensorSpec.from_value)
type_spec.register_type_spec_from_value_converter(
SparseTensorValue, SparseTensorSpec.from_value)
@tf_export(v1=["convert_to_tensor_or_sparse_tensor"])
def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
if isinstance(value, SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError("Sparse dtype: requested = %s, actual = %s" %
(dtype.name, value.dtype.name))
return value
return ops.convert_to_tensor(value, dtype=dtype, name=name)
def is_sparse(x):
"""Check whether `x` is sparse.
Check whether an object is a `tf.sparse.SparseTensor` or
`tf.compat.v1.SparseTensorValue`.
Args:
x: A python object to check.
Returns:
`True` iff `x` is a `tf.sparse.SparseTensor` or
`tf.compat.v1.SparseTensorValue`.
"""
return isinstance(x, (SparseTensor, SparseTensorValue))
| |
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Michael Stead <michael.stead@gmail.com> #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 @tmshn <tmshn@r.recruit.co.jp> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Aaron Levine <allevin@sandia.gov> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 Ben Yohay <ben@lightricks.com> #
# Copyright 2018 Gilad Shefer <gshefer@redhat.com> #
# Copyright 2018 Martin Monperrus <monperrus@users.noreply.github.com> #
# Copyright 2018 Matt Babineau <9685860+babineaum@users.noreply.github.com> #
# Copyright 2018 Shinichi TAMURA <shnch.tmr@gmail.com> #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2018 Thibault Jamet <tjamet@users.noreply.github.com> #
# Copyright 2018 per1234 <accounts@perglass.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import datetime
import urllib
import github.GithubObject
import github.PaginatedList
import github.PullRequestMergeStatus
import github.NamedUser
import github.PullRequestPart
import github.PullRequestComment
import github.File
import github.IssueComment
import github.Commit
import github.PullRequestReview
class PullRequest(github.GithubObject.CompletableGithubObject):
"""
This class represents PullRequests. The reference can be found here http://developer.github.com/v3/pulls/
"""
def __repr__(self):
return self.get__repr__({"number": self._number.value, "title": self._title.value})
@property
def additions(self):
"""
:type: integer
"""
self._completeIfNotSet(self._additions)
return self._additions.value
@property
def assignee(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._assignee)
return self._assignee.value
@property
def assignees(self):
"""
:type: list of :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._assignees)
return self._assignees.value
@property
def base(self):
"""
:type: :class:`github.PullRequestPart.PullRequestPart`
"""
self._completeIfNotSet(self._base)
return self._base.value
@property
def body(self):
"""
:type: string
"""
self._completeIfNotSet(self._body)
return self._body.value
@property
def changed_files(self):
"""
:type: integer
"""
self._completeIfNotSet(self._changed_files)
return self._changed_files.value
@property
def closed_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._closed_at)
return self._closed_at.value
@property
def comments(self):
"""
:type: integer
"""
self._completeIfNotSet(self._comments)
return self._comments.value
@property
def comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commits(self):
"""
:type: integer
"""
self._completeIfNotSet(self._commits)
return self._commits.value
@property
def commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._commits_url)
return self._commits_url.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def deletions(self):
"""
:type: integer
"""
self._completeIfNotSet(self._deletions)
return self._deletions.value
@property
def diff_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._diff_url)
return self._diff_url.value
@property
def head(self):
"""
:type: :class:`github.PullRequestPart.PullRequestPart`
"""
self._completeIfNotSet(self._head)
return self._head.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def issue_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issue_url)
return self._issue_url.value
@property
def labels(self):
"""
:type: list of :class:`github.Label.Label`
"""
self._completeIfNotSet(self._labels)
return self._labels.value
@property
def merge_commit_sha(self):
"""
:type: string
"""
self._completeIfNotSet(self._merge_commit_sha)
return self._merge_commit_sha.value
@property
def mergeable(self):
"""
:type: bool
"""
self._completeIfNotSet(self._mergeable)
return self._mergeable.value
@property
def mergeable_state(self):
"""
:type: string
"""
self._completeIfNotSet(self._mergeable_state)
return self._mergeable_state.value
@property
def merged(self):
"""
:type: bool
"""
self._completeIfNotSet(self._merged)
return self._merged.value
@property
def merged_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._merged_at)
return self._merged_at.value
@property
def merged_by(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._merged_by)
return self._merged_by.value
@property
def milestone(self):
"""
:type: :class:`github.Milestone.Milestone`
"""
self._completeIfNotSet(self._milestone)
return self._milestone.value
@property
def number(self):
"""
:type: integer
"""
self._completeIfNotSet(self._number)
return self._number.value
@property
def patch_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._patch_url)
return self._patch_url.value
@property
def review_comment_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._review_comment_url)
return self._review_comment_url.value
@property
def review_comments(self):
"""
:type: integer
"""
self._completeIfNotSet(self._review_comments)
return self._review_comments.value
@property
def review_comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._review_comments_url)
return self._review_comments_url.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def title(self):
"""
:type: string
"""
self._completeIfNotSet(self._title)
return self._title.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
def as_issue(self):
"""
:calls: `GET /repos/:owner/:repo/issues/:number <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.Issue.Issue`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.issue_url
)
return github.Issue.Issue(self._requester, headers, data, completed=True)
def create_comment(self, body, commit_id, path, position):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:param commit_id: :class:`github.Commit.Commit`
:param path: string
:param position: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
return self.create_review_comment(body, commit_id, path, position)
def create_review_comment(self, body, commit_id, path, position):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:param commit_id: :class:`github.Commit.Commit`
:param path: string
:param position: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
assert isinstance(body, (str, unicode)), body
assert isinstance(commit_id, github.Commit.Commit), commit_id
assert isinstance(path, (str, unicode)), path
assert isinstance(position, (int, long)), position
post_parameters = {
"body": body,
"commit_id": commit_id._identity,
"path": path,
"position": position,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/comments",
input=post_parameters
)
return github.PullRequestComment.PullRequestComment(self._requester, headers, data, completed=True)
def create_issue_comment(self, body):
"""
:calls: `POST /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_
:param body: string
:rtype: :class:`github.IssueComment.IssueComment`
"""
assert isinstance(body, (str, unicode)), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.issue_url + "/comments",
input=post_parameters
)
return github.IssueComment.IssueComment(self._requester, headers, data, completed=True)
def create_review(self, commit=github.GithubObject.NotSet, body=None, event=github.GithubObject.NotSet, comments=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/reviews <https://developer.github.com/v3/pulls/reviews/>`_
:param commit: github.Commit.Commit
:param body: string
:param event: string
:param comments: list
:rtype: :class:`github.PullRequestReview.PullRequestReview`
"""
assert commit is github.GithubObject.NotSet or isinstance(commit, github.Commit.Commit), commit
assert isinstance(body, str), body
assert event is github.GithubObject.NotSet or isinstance(event, str), event
assert comments is github.GithubObject.NotSet or isinstance(comments, list), comments
post_parameters = dict()
if commit is not github.GithubObject.NotSet:
post_parameters['commit_id'] = commit.sha
post_parameters['body'] = body
post_parameters['event'] = 'COMMENT' if event == github.GithubObject.NotSet else event
if comments is github.GithubObject.NotSet:
post_parameters['comments'] = []
else:
post_parameters['comments'] = comments
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/reviews",
input=post_parameters
)
self._useAttributes(data)
return github.PullRequestReview.PullRequestReview(self._requester, headers, data, completed=True)
def create_review_request(self, reviewers=github.GithubObject.NotSet, team_reviewers=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/requested_reviewers <https://developer.github.com/v3/pulls/review_requests/>`_
:param reviewers: list of strings
:param team_reviewers: list of strings
:rtype: None
"""
post_parameters = dict()
if reviewers is not github.GithubObject.NotSet:
assert all(isinstance(element, (str, unicode)) for element in reviewers), reviewers
post_parameters["reviewers"] = reviewers
if team_reviewers is not github.GithubObject.NotSet:
assert all(isinstance(element, (str, unicode)) for element in team_reviewers), team_reviewers
post_parameters["team_reviewers"] = team_reviewers
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/requested_reviewers",
input=post_parameters
)
def delete_review_request(self, reviewers=github.GithubObject.NotSet, team_reviewers=github.GithubObject.NotSet):
"""
:calls: `DELETE /repos/:owner/:repo/pulls/:number/requested_reviewers <https://developer.github.com/v3/pulls/review_requests/>`_
:param reviewers: list of strings
:param team_reviewers: list of strings
:rtype: None
"""
post_parameters = dict()
if reviewers is not github.GithubObject.NotSet:
assert all(isinstance(element, (str, unicode)) for element in reviewers), reviewers
post_parameters["reviewers"] = reviewers
if team_reviewers is not github.GithubObject.NotSet:
assert all(isinstance(element, (str, unicode)) for element in team_reviewers), team_reviewers
post_parameters["team_reviewers"] = team_reviewers
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/requested_reviewers",
input=post_parameters
)
def edit(self, title=github.GithubObject.NotSet, body=github.GithubObject.NotSet, state=github.GithubObject.NotSet, base=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/pulls/:number <http://developer.github.com/v3/pulls>`_
:param title: string
:param body: string
:param state: string
:param base: string
:rtype: None
"""
assert title is github.GithubObject.NotSet or isinstance(title, (str, unicode)), title
assert body is github.GithubObject.NotSet or isinstance(body, (str, unicode)), body
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert base is github.GithubObject.NotSet or isinstance(base, (str, unicode)), base
post_parameters = dict()
if title is not github.GithubObject.NotSet:
post_parameters["title"] = title
if body is not github.GithubObject.NotSet:
post_parameters["body"] = body
if state is not github.GithubObject.NotSet:
post_parameters["state"] = state
if base is not github.GithubObject.NotSet:
post_parameters["base"] = base
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def get_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:param id: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
return self.get_review_comment(id)
def get_review_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:param id: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self._parentUrl(self.url) + "/comments/" + str(id)
)
return github.PullRequestComment.PullRequestComment(self._requester, headers, data, completed=True)
def get_comments(self):
"""
Warning: this only returns review comments. For normal conversation comments, use get_issue_comments.
:calls: `GET /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
return self.get_review_comments()
def get_review_comments(self, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:param since: datetime.datetime format YYYY-MM-DDTHH:MM:SSZ
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.PullRequestComment.PullRequestComment,
self._requester,
self.url + "/comments",
url_parameters
)
def get_single_review_comments(self, id):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/review/:id/comments <https://developer.github.com/v3/pulls/reviews/>`_
:param id: integer
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
assert isinstance(id, (int, long)), id
return github.PaginatedList.PaginatedList(
github.PullRequestComment.PullRequestComment,
self._requester,
self.url + "/reviews/" + str(id) + "/comments",
None
)
def get_commits(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/commits <http://developer.github.com/v3/pulls>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Commit.Commit`
"""
return github.PaginatedList.PaginatedList(
github.Commit.Commit,
self._requester,
self.url + "/commits",
None
)
def get_files(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/files <http://developer.github.com/v3/pulls>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.File.File`
"""
return github.PaginatedList.PaginatedList(
github.File.File,
self._requester,
self.url + "/files",
None
)
def get_issue_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/issues/comments/:id <http://developer.github.com/v3/issues/comments>`_
:param id: integer
:rtype: :class:`github.IssueComment.IssueComment`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self._parentUrl(self.issue_url) + "/comments/" + str(id)
)
return github.IssueComment.IssueComment(self._requester, headers, data, completed=True)
def get_issue_comments(self):
"""
:calls: `GET /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueComment.IssueComment`
"""
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self.issue_url + "/comments",
None
)
def get_review(self, id):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/reviews/:id <https://developer.github.com/v3/pulls/reviews>`_
:param id: integer
:rtype: :class:`github.PullRequestReview.PullRequestReview`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/reviews/" + str(id),
)
return github.PullRequestReview.PullRequestReview(self._requester, headers, data, completed=True)
def get_reviews(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/reviews <https://developer.github.com/v3/pulls/reviews/>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestReview.PullRequestReview`
"""
return github.PaginatedList.PaginatedList(
github.PullRequestReview.PullRequestReview,
self._requester,
self.url + "/reviews",
None,
)
def get_review_requests(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/requested_reviewers <https://developer.github.com/v3/pulls/review_requests/>`_
:rtype: tuple of :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser` and of :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return (
github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/requested_reviewers",
None,
list_item='users'
),
github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
self.url + "/requested_reviewers",
None,
list_item='teams'
)
)
def get_labels(self):
"""
:calls: `GET /repos/:owner/:repo/issues/:number/labels <http://developer.github.com/v3/issues/labels>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Label.Label`
"""
return github.PaginatedList.PaginatedList(
github.Label.Label,
self._requester,
self.issue_url + "/labels",
None
)
def add_to_labels(self, *labels):
"""
:calls: `POST /repos/:owner/:repo/issues/:number/labels <http://developer.github.com/v3/issues/labels>`_
:param label: :class:`github.Label.Label` or string
:rtype: None
"""
assert all(isinstance(element, (github.Label.Label, str, unicode)) for element in labels), labels
post_parameters = [label.name if isinstance(label, github.Label.Label) else label for label in labels]
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.issue_url + "/labels",
input=post_parameters
)
def delete_labels(self):
"""
:calls: `DELETE /repos/:owner/:repo/issues/:number/labels <http://developer.github.com/v3/issues/labels>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.issue_url + "/labels"
)
def remove_from_labels(self, label):
"""
:calls: `DELETE /repos/:owner/:repo/issues/:number/labels/:name <http://developer.github.com/v3/issues/labels>`_
:param label: :class:`github.Label.Label` or string
:rtype: None
"""
assert isinstance(label, (github.Label.Label, str, unicode)), label
if isinstance(label, github.Label.Label):
label = label._identity
else:
label = urllib.quote(label)
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.issue_url + "/labels/" + label
)
def set_labels(self, *labels):
"""
:calls: `PUT /repos/:owner/:repo/issues/:number/labels <http://developer.github.com/v3/issues/labels>`_
:param labels: list of :class:`github.Label.Label` or strings
:rtype: None
"""
assert all(isinstance(element, (github.Label.Label, str, unicode)) for element in labels), labels
post_parameters = [label.name if isinstance(label, github.Label.Label) else label for label in labels]
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.issue_url + "/labels",
input=post_parameters
)
def is_merged(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_
:rtype: bool
"""
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/merge"
)
return status == 204
def merge(self, commit_message=github.GithubObject.NotSet, commit_title=github.GithubObject.NotSet, merge_method=github.GithubObject.NotSet, sha=github.GithubObject.NotSet):
"""
:calls: `PUT /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_
:param commit_message: string
:rtype: :class:`github.PullRequestMergeStatus.PullRequestMergeStatus`
"""
assert commit_message is github.GithubObject.NotSet or isinstance(commit_message, (str, unicode)), commit_message
assert commit_title is github.GithubObject.NotSet or isinstance(commit_title, (str, unicode)), commit_title
assert merge_method is github.GithubObject.NotSet or isinstance(merge_method, (str, unicode)), merge_method
assert sha is github.GithubObject.NotSet or isinstance(sha, (str, unicode)), sha
post_parameters = dict()
if commit_message is not github.GithubObject.NotSet:
post_parameters["commit_message"] = commit_message
if commit_title is not github.GithubObject.NotSet:
post_parameters["commit_title"] = commit_title
if merge_method is not github.GithubObject.NotSet:
post_parameters["merge_method"] = merge_method
if sha is not github.GithubObject.NotSet:
post_parameters["sha"] = sha
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/merge",
input=post_parameters
)
return github.PullRequestMergeStatus.PullRequestMergeStatus(self._requester, headers, data, completed=True)
def _initAttributes(self):
self._additions = github.GithubObject.NotSet
self._assignee = github.GithubObject.NotSet
self._assignees = github.GithubObject.NotSet
self._base = github.GithubObject.NotSet
self._body = github.GithubObject.NotSet
self._changed_files = github.GithubObject.NotSet
self._closed_at = github.GithubObject.NotSet
self._comments = github.GithubObject.NotSet
self._comments_url = github.GithubObject.NotSet
self._commits = github.GithubObject.NotSet
self._commits_url = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._deletions = github.GithubObject.NotSet
self._diff_url = github.GithubObject.NotSet
self._head = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._issue_url = github.GithubObject.NotSet
self._labels = github.GithubObject.NotSet
self._merge_commit_sha = github.GithubObject.NotSet
self._mergeable = github.GithubObject.NotSet
self._mergeable_state = github.GithubObject.NotSet
self._merged = github.GithubObject.NotSet
self._merged_at = github.GithubObject.NotSet
self._merged_by = github.GithubObject.NotSet
self._milestone = github.GithubObject.NotSet
self._number = github.GithubObject.NotSet
self._patch_url = github.GithubObject.NotSet
self._review_comment_url = github.GithubObject.NotSet
self._review_comments = github.GithubObject.NotSet
self._review_comments_url = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._title = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "additions" in attributes: # pragma no branch
self._additions = self._makeIntAttribute(attributes["additions"])
if "assignee" in attributes: # pragma no branch
self._assignee = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["assignee"])
if "assignees" in attributes: # pragma no branch
self._assignees = self._makeListOfClassesAttribute(github.NamedUser.NamedUser, attributes["assignees"])
elif "assignee" in attributes:
if attributes["assignee"] is not None:
self._assignees = self._makeListOfClassesAttribute(github.NamedUser.NamedUser, [attributes["assignee"]])
else:
self._assignees = self._makeListOfClassesAttribute(github.NamedUser.NamedUser, [])
if "base" in attributes: # pragma no branch
self._base = self._makeClassAttribute(github.PullRequestPart.PullRequestPart, attributes["base"])
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "changed_files" in attributes: # pragma no branch
self._changed_files = self._makeIntAttribute(attributes["changed_files"])
if "closed_at" in attributes: # pragma no branch
self._closed_at = self._makeDatetimeAttribute(attributes["closed_at"])
if "comments" in attributes: # pragma no branch
self._comments = self._makeIntAttribute(attributes["comments"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commits" in attributes: # pragma no branch
self._commits = self._makeIntAttribute(attributes["commits"])
if "commits_url" in attributes: # pragma no branch
self._commits_url = self._makeStringAttribute(attributes["commits_url"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "deletions" in attributes: # pragma no branch
self._deletions = self._makeIntAttribute(attributes["deletions"])
if "diff_url" in attributes: # pragma no branch
self._diff_url = self._makeStringAttribute(attributes["diff_url"])
if "head" in attributes: # pragma no branch
self._head = self._makeClassAttribute(github.PullRequestPart.PullRequestPart, attributes["head"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issue_url" in attributes: # pragma no branch
self._issue_url = self._makeStringAttribute(attributes["issue_url"])
if "labels" in attributes: # pragma no branch
self._labels = self._makeListOfClassesAttribute(github.Label.Label, attributes["labels"])
if "merge_commit_sha" in attributes: # pragma no branch
self._merge_commit_sha = self._makeStringAttribute(attributes["merge_commit_sha"])
if "mergeable" in attributes: # pragma no branch
self._mergeable = self._makeBoolAttribute(attributes["mergeable"])
if "mergeable_state" in attributes: # pragma no branch
self._mergeable_state = self._makeStringAttribute(attributes["mergeable_state"])
if "merged" in attributes: # pragma no branch
self._merged = self._makeBoolAttribute(attributes["merged"])
if "merged_at" in attributes: # pragma no branch
self._merged_at = self._makeDatetimeAttribute(attributes["merged_at"])
if "merged_by" in attributes: # pragma no branch
self._merged_by = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["merged_by"])
if "milestone" in attributes: # pragma no branch
self._milestone = self._makeClassAttribute(github.Milestone.Milestone, attributes["milestone"])
if "number" in attributes: # pragma no branch
self._number = self._makeIntAttribute(attributes["number"])
if "patch_url" in attributes: # pragma no branch
self._patch_url = self._makeStringAttribute(attributes["patch_url"])
if "review_comment_url" in attributes: # pragma no branch
self._review_comment_url = self._makeStringAttribute(attributes["review_comment_url"])
if "review_comments" in attributes: # pragma no branch
self._review_comments = self._makeIntAttribute(attributes["review_comments"])
if "review_comments_url" in attributes: # pragma no branch
self._review_comments_url = self._makeStringAttribute(attributes["review_comments_url"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "title" in attributes: # pragma no branch
self._title = self._makeStringAttribute(attributes["title"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
| |
import base64
import os
import re
import logging
from datetime import datetime
from uuid import uuid4
from django.conf import settings
fmt = getattr(settings, 'LOG_FORMAT', None)
lvl = getattr(settings, 'LOG_LEVEL', logging.DEBUG)
logging.basicConfig(format=fmt, level=lvl)
from watson import search as watson
from auditlog.registry import auditlog
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import Q
from django.utils.timezone import now
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToCover
from django.utils import timezone
from pytz import all_timezones
from tagging.registry import register as tag_register
from multiselectfield import MultiSelectField
import hashlib
class System_Settings(models.Model):
enable_deduplication = models.BooleanField(default=False,
blank=False,
verbose_name='Deduplicate findings',
help_text='With this setting turned on, Dojo deduplicates findings by comparing endpoints, ' \
'cwe fields, and titles. ' \
'If two findings share a URL and have the same CWE or title, Dojo marks the ' \
'less recent finding as a duplicate. When deduplication is enabled, a list of ' \
'deduplicated findings is added to the engagement view.')
delete_dupulicates = models.BooleanField(default=False, blank=False)
max_dupes = models.IntegerField(blank=True, null=True, verbose_name='Max Duplicates', help_text='When enabled, if' \
'a single issue reaches the maximum number of duplicates, the oldest will be' \
'deleted.')
enable_jira = models.BooleanField(default=False, verbose_name='Enable JIRA integration', blank=False)
enable_slack_notifications = models.BooleanField(default=False, verbose_name='Enable Slack notifications', blank=False)
slack_channel = models.CharField(max_length=100, default='', blank=True)
slack_token = models.CharField(max_length=100, default='', blank=True, help_text='Token required for interacting with Slack. Get one at https://api.slack.com/tokens')
slack_username = models.CharField(max_length=100, default='', blank=True)
enable_hipchat_notifications = models.BooleanField(default=False, verbose_name='Enable HipChat notifications', blank=False)
hipchat_site = models.CharField(max_length=100, default='', blank=True, help_text='The full fqdn of your hipchat site, e.g. "yoursite.hipchat.com"')
hipchat_channel = models.CharField(max_length=100, default='', blank=True)
hipchat_token = models.CharField(max_length=100, default='', blank=True, help_text='Token required for interacting with HipChat. Get one at https://patriktest.hipchat.com/addons/')
enable_mail_notifications = models.BooleanField(default=False, blank=False)
mail_notifications_from = models.CharField(max_length=200, default='from@example.com', blank=True)
mail_notifications_to = models.CharField(max_length=200, default='', blank=True)
s_finding_severity_naming = models.BooleanField(default=False,
blank=False,
help_text='With this setting turned on, Dojo will display S0, S1, S2, etc ' \
'in most places, whereas if turned off Critical, High, Medium, etc will be displayed.')
false_positive_history = models.BooleanField(default=False)
url_prefix = models.CharField(max_length=300, default='', blank=True)
team_name = models.CharField(max_length=100, default='', blank=True)
time_zone = models.CharField(max_length=50,
choices=[(tz,tz) for tz in all_timezones],
default='UTC',blank=False)
def get_current_date():
return timezone.now().date()
def get_current_datetime():
return timezone.now()
# proxy class for convenience and UI
class Dojo_User(User):
class Meta:
proxy = True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s (%s)' % (self.first_name,
self.last_name,
self.username)
return full_name.strip()
def __unicode__(self):
return self.get_full_name()
class UserContactInfo(models.Model):
user = models.OneToOneField(User)
title = models.CharField(blank=True, null=True, max_length=150)
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$',
message="Phone number must be entered in the format: '+999999999'. "
"Up to 15 digits allowed.")
phone_number = models.CharField(validators=[phone_regex], blank=True, max_length=15,
help_text="Phone number must be entered in the format: '+999999999'. "
"Up to 15 digits allowed.")
cell_number = models.CharField(validators=[phone_regex], blank=True, max_length=15,
help_text="Phone number must be entered in the format: '+999999999'. "
"Up to 15 digits allowed.")
twitter_username = models.CharField(blank=True, null=True, max_length=150)
github_username = models.CharField(blank=True, null=True, max_length=150)
slack_username = models.CharField(blank=True, null=True, max_length=150)
hipchat_username = models.CharField(blank=True, null=True, max_length=150)
class Contact(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
team = models.CharField(max_length=100)
is_admin = models.BooleanField(default=False)
is_globally_read_only = models.BooleanField(default=False)
updated = models.DateTimeField(editable=False)
class Product_Type(models.Model):
name = models.CharField(max_length=300)
critical_product = models.BooleanField(default=False)
key_product = models.BooleanField(default=False)
def critical_present(self):
c_findings = Finding.objects.filter(test__engagement__product__prod_type=self, severity='Critical')
if c_findings.count() > 0:
return True
def high_present(self):
c_findings = Finding.objects.filter(test__engagement__product__prod_type=self, severity='High')
if c_findings.count() > 0:
return True
def calc_health(self):
h_findings = Finding.objects.filter(test__engagement__product__prod_type=self, severity='High')
c_findings = Finding.objects.filter(test__engagement__product__prod_type=self, severity='Critical')
health = 100
if c_findings.count() > 0:
health = 40
health = health - ((c_findings.count() - 1) * 5)
if h_findings.count() > 0:
if health == 100:
health = 60
health = health - ((h_findings.count() - 1) * 2)
if health < 5:
return 5
else:
return health
def findings_count(self):
return Finding.objects.filter(mitigated__isnull=True,
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False,
test__engagement__product__prod_type=self).filter(Q(severity="Critical") |
Q(severity="High") |
Q(severity="Medium") |
Q(severity="Low")).count()
def products_count(self):
return Product.objects.filter(prod_type=self).count()
def __unicode__(self):
return self.name
def get_breadcrumbs(self):
bc = [{'title': self.__unicode__(),
'url': reverse('edit_product_type', args=(self.id,))}]
return bc
class Product_Line(models.Model):
name = models.CharField(max_length=300)
description = models.CharField(max_length=2000)
def __unicode__(self):
return self.name
class Report_Type(models.Model):
name = models.CharField(max_length=300)
class Test_Type(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
def get_breadcrumbs(self):
bc = [{'title': self.__unicode__(),
'url': None}]
return bc
class Product(models.Model):
name = models.CharField(max_length=300)
description = models.CharField(max_length=4000)
'''
The following three fields are deprecated and no longer in use.
They remain in model for backwards compatibility and will be removed
in a future release. prod_manager, tech_contact, manager
The admin script migrate_product_contacts should be used to migrate data from
these fields to their replacements. ./manage.py migrate_product_contacts
'''
prod_manager = models.CharField(default=0, max_length=200) # unused
tech_contact = models.CharField(default=0, max_length=200) # unused
manager = models.CharField(default=0, max_length=200) # unused
product_manager = models.ForeignKey(Dojo_User, null=True, blank=True, related_name='product_manager')
technical_contact = models.ForeignKey(Dojo_User, null=True, blank=True, related_name='technical_contact')
team_manager = models.ForeignKey(Dojo_User, null=True, blank=True, related_name='team_manager')
created = models.DateTimeField(editable=False, null=True, blank=True)
prod_type = models.ForeignKey(Product_Type, related_name='prod_type',
null=True, blank=True)
updated = models.DateTimeField(editable=False, null=True, blank=True)
tid = models.IntegerField(default=0, editable=False)
authorized_users = models.ManyToManyField(User, blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
@property
def findings_count(self):
return Finding.objects.filter(mitigated__isnull=True,
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False,
test__engagement__product=self).count()
@property
def endpoint_count(self):
endpoints = Endpoint.objects.filter(finding__test__engagement__product=self,
finding__active=True,
finding__verified=True,
finding__mitigated__isnull=True)
hosts = []
ids = []
for e in endpoints:
if ":" in e.host:
host_no_port = e.host[:e.host.index(':')]
else:
host_no_port = e.host
if host_no_port in hosts:
continue
else:
hosts.append(host_no_port)
ids.append(e.id)
return len(hosts)
def open_findings(self, start_date=None, end_date=None):
if start_date is None or end_date is None:
return {}
else:
critical = Finding.objects.filter(test__engagement__product=self,
mitigated__isnull=True,
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False,
severity="Critical",
date__range=[start_date,
end_date]).count()
high = Finding.objects.filter(test__engagement__product=self,
mitigated__isnull=True,
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False,
severity="High",
date__range=[start_date,
end_date]).count()
medium = Finding.objects.filter(test__engagement__product=self,
mitigated__isnull=True,
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False,
severity="Medium",
date__range=[start_date,
end_date]).count()
low = Finding.objects.filter(test__engagement__product=self,
mitigated__isnull=True,
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False,
severity="Low",
date__range=[start_date,
end_date]).count()
return {'Critical': critical,
'High': high,
'Medium': medium,
'Low': low,
'Total': (critical + high + medium + low)}
def get_breadcrumbs(self):
bc = [{'title': self.__unicode__(),
'url': reverse('view_product', args=(self.id,))}]
return bc
class ScanSettings(models.Model):
product = models.ForeignKey(Product, default=1, editable=False)
addresses = models.TextField(default="none")
user = models.ForeignKey(User, editable=False)
date = models.DateTimeField(editable=False, blank=True,
default=get_current_datetime)
frequency = models.CharField(max_length=10000, null=True,
blank=True)
email = models.CharField(max_length=512)
protocol = models.CharField(max_length=10, default='TCP')
def addresses_as_list(self):
if self.addresses:
return [a.strip() for a in self.addresses.split(',')]
return []
def get_breadcrumbs(self):
bc = self.product.get_breadcrumbs()
bc += [{'title': "Scan Settings",
'url': reverse('view_scan_settings', args=(self.product.id, self.id,))}]
return bc
"""
Modified by Fatimah and Micheal
removed ip_scans field
"""
class Scan(models.Model):
scan_settings = models.ForeignKey(ScanSettings, default=1, editable=False)
date = models.DateTimeField(editable=False, blank=True,
default=get_current_datetime)
protocol = models.CharField(max_length=10, default='TCP')
status = models.CharField(max_length=10, default='Pending', editable=False)
baseline = models.BooleanField(default=False,
verbose_name="Current Baseline")
def __unicode__(self):
return self.scan_settings.protocol + " Scan " + str(self.date)
def get_breadcrumbs(self):
bc = self.scan_settings.get_breadcrumbs()
bc += [{'title': self.__unicode__(),
'url': reverse('view_scan', args=(self.id,))}]
return bc
"""
Modified by Fatimah and Micheal
Changed services from a ManytToMany field to a formatted string
"port,protocol,status"
Added scan_id
"""
class IPScan(models.Model):
address = models.TextField(editable=False, default="none")
services = models.CharField(max_length=800, null=True)
scan = models.ForeignKey(Scan, default=1, editable=False)
class Engagement_Type(models.Model):
name = models.CharField(max_length=200)
class Engagement(models.Model):
name = models.CharField(max_length=300, null=True, blank=True)
description = models.CharField(max_length=2000, null=True, blank=True)
version = models.CharField(max_length=100, null=True, blank=True)
eng_type = models.ForeignKey(Engagement_Type, null=True, blank=True)
first_contacted = models.DateField(null=True, blank=True)
target_start = models.DateField(null=False, blank=False)
target_end = models.DateField(null=False, blank=False)
lead = models.ForeignKey(User, editable=True, null=True)
requester = models.ForeignKey(Contact, null=True, blank=True)
reason = models.CharField(max_length=2000, null=True, blank=True)
report_type = models.ForeignKey(Report_Type, null=True, blank=True)
product = models.ForeignKey(Product)
updated = models.DateTimeField(editable=False, null=True, blank=True)
active = models.BooleanField(default=True, editable=False)
test_strategy = models.URLField(editable=True, blank=True, null=True)
threat_model = models.BooleanField(default=True)
api_test = models.BooleanField(default=True)
pen_test = models.BooleanField(default=True)
check_list = models.BooleanField(default=True)
status = models.CharField(editable=True, max_length=2000, default='',
null=True,
choices=(('In Progress', 'In Progress'),
('On Hold', 'On Hold'),
('Completed', 'Completed')))
progress = models.CharField(max_length=100,
default='threat_model', editable=False)
tmodel_path = models.CharField(max_length=1000, default='none',
editable=False, blank=True, null=True)
risk_path = models.CharField(max_length=1000, default='none',
editable=False, blank=True, null=True)
risk_acceptance = models.ManyToManyField("Risk_Acceptance",
default=None,
editable=False,
blank=True)
done_testing = models.BooleanField(default=False, editable=False)
class Meta:
ordering = ['-target_start']
def __unicode__(self):
return "Engagement: %s (%s)" % (self.name if self.name else '',
self.target_start.strftime(
"%b %d, %Y"))
def get_breadcrumbs(self):
bc = self.product.get_breadcrumbs()
bc += [{'title': self.__unicode__(),
'url': reverse('view_engagement', args=(self.id,))}]
return bc
class CWE(models.Model):
url = models.CharField(max_length=1000)
description = models.CharField(max_length=2000)
number = models.IntegerField()
class Endpoint(models.Model):
protocol = models.CharField(null=True, blank=True, max_length=10,
help_text="The communication protocol such as 'http', 'ftp', etc.")
host = models.CharField(null=True, blank=True, max_length=500,
help_text="The host name or IP address, you can also include the port number. For example"
"'127.0.0.1', '127.0.0.1:8080', 'localhost', 'yourdomain.com'.")
fqdn = models.CharField(null=True, blank=True, max_length=500)
port = models.IntegerField(null=True, blank=True, help_text="The network port associated with the endpoint.")
path = models.CharField(null=True, blank=True, max_length=500,
help_text="The location of the resource, it should start with a '/'. For example"
"/endpoint/420/edit")
query = models.CharField(null=True, blank=True, max_length=5000,
help_text="The query string, the question mark should be omitted."
"For example 'group=4&team=8'")
fragment = models.CharField(null=True, blank=True, max_length=500,
help_text="The fragment identifier which follows the hash mark. The hash mark should "
"be omitted. For example 'section-13', 'paragraph-2'.")
product = models.ForeignKey(Product, null=True, blank=True, )
class Meta:
ordering = ['product', 'protocol', 'host', 'path', 'query', 'fragment']
def __unicode__(self):
from urlparse import uses_netloc
netloc = self.host
fqdn = self.fqdn
port = self.port
scheme = self.protocol
url = self.path if self.path else ''
query = self.query
fragment = self.fragment
if port:
netloc += ':%s' % port
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
if scheme and scheme in uses_netloc and url[:2] != '//':
url = '//' + (netloc or '') + url
else:
url = (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return url
def __eq__(self, other):
if isinstance(other, Endpoint):
return self.__unicode__() == other.__unicode__()
else:
return NotImplemented
def finding_count(self):
host = self.host_no_port
endpoints = Endpoint.objects.filter(host__regex="^" + host + ":?",
product=self.product).distinct()
findings = Finding.objects.filter(endpoints__in=endpoints,
active=True,
verified=True,
out_of_scope=False).distinct()
return findings.count()
def active_findings(self):
host = self.host_no_port
endpoints = Endpoint.objects.filter(host__regex="^" + host + ":?",
product=self.product).distinct()
return Finding.objects.filter(endpoints__in=endpoints,
active=True,
verified=True,
mitigated__isnull=True,
false_p=False,
duplicate=False).distinct().order_by('numerical_severity')
def get_breadcrumbs(self):
bc = self.product.get_breadcrumbs()
bc += [{'title': self.host_no_port,
'url': reverse('view_endpoint', args=(self.id,))}]
return bc
@staticmethod
def from_uri(uri):
return Endpoint()
@property
def host_no_port(self):
if ":" in self.host:
return self.host[:self.host.index(":")]
else:
return self.host
class Notes(models.Model):
entry = models.CharField(max_length=2400)
date = models.DateTimeField(null=False, editable=False,
default=get_current_datetime)
author = models.ForeignKey(User, editable=False)
class Meta:
ordering = ['-date']
def __unicode__(self):
return self.entry
class Development_Environment(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
def get_breadcrumbs(self):
return [{"title": self.__unicode__(), "url": reverse("edit_dev_env", args=(self.id,))}]
class Test(models.Model):
engagement = models.ForeignKey(Engagement, editable=False)
lead = models.ForeignKey(User, editable=True, null=True)
test_type = models.ForeignKey(Test_Type)
target_start = models.DateTimeField()
target_end = models.DateTimeField()
estimated_time = models.TimeField(null=True, blank=True, editable=False)
actual_time = models.TimeField(null=True, blank=True, editable=False, )
percent_complete = models.IntegerField(null=True, blank=True,
editable=True)
notes = models.ManyToManyField(Notes, blank=True,
editable=False)
environment = models.ForeignKey(Development_Environment, null=True,
blank=False)
def __unicode__(self):
return "%s (%s)" % (self.test_type,
self.target_start.strftime("%b %d, %Y"))
def get_breadcrumbs(self):
bc = self.engagement.get_breadcrumbs()
bc += [{'title': self.__unicode__(),
'url': reverse('view_test', args=(self.id,))}]
return bc
def verified_finding_count(self):
return Finding.objects.filter(test=self, verified=True).count()
class VA(models.Model):
address = models.TextField(editable=False, default="none")
user = models.ForeignKey(User, editable=False)
result = models.ForeignKey(Test, editable=False, null=True, blank=True)
status = models.BooleanField(default=False, editable=False)
start = models.CharField(max_length=100)
class Finding(models.Model):
title = models.TextField(max_length=1000)
date = models.DateField(default=get_current_date)
cwe = models.IntegerField(default=0, null=True, blank=True)
url = models.TextField(null=True, blank=True, editable=False)
severity = models.CharField(max_length=200)
description = models.TextField()
mitigation = models.TextField()
impact = models.TextField()
endpoints = models.ManyToManyField(Endpoint, blank=True, )
unsaved_endpoints = []
unsaved_request = None
unsaved_response = None
unsaved_tags = None
references = models.TextField(null=True, blank=True, db_column="refs")
test = models.ForeignKey(Test, editable=False)
# TODO: Will be deprecated soon
is_template = models.BooleanField(default=False)
active = models.BooleanField(default=True)
verified = models.BooleanField(default=True)
false_p = models.BooleanField(default=False, verbose_name="False Positive")
duplicate = models.BooleanField(default=False)
duplicate_finding = models.ForeignKey('self', editable=False, null=True, related_name='original_finding', blank=True)
duplicate_list = models.ManyToManyField("self",editable=False, null=True, blank=True)
out_of_scope = models.BooleanField(default=False)
under_review = models.BooleanField(default=False)
review_requested_by = models.ForeignKey(Dojo_User, null=True, blank=True, related_name='review_requested_by')
reviewers = models.ManyToManyField(Dojo_User, blank=True)
#Defect Tracking Review
under_defect_review = models.BooleanField(default=False)
defect_review_requested_by = models.ForeignKey(Dojo_User, null=True, blank=True, related_name='defect_review_requested_by')
thread_id = models.IntegerField(default=0, editable=False)
mitigated = models.DateTimeField(editable=False, null=True, blank=True)
mitigated_by = models.ForeignKey(User, null=True, editable=False, related_name="mitigated_by")
reporter = models.ForeignKey(User, editable=False, related_name='reporter')
notes = models.ManyToManyField(Notes, blank=True,
editable=False)
numerical_severity = models.CharField(max_length=4)
last_reviewed = models.DateTimeField(null=True, editable=False)
last_reviewed_by = models.ForeignKey(User, null=True, editable=False, related_name='last_reviewed_by')
images = models.ManyToManyField('FindingImage', blank=True)
issue_id = models.TextField(null=True, blank=True)
line_number = models.TextField(null=True, blank=True)
sourcefilepath = models.TextField(null=True, blank=True)
sourcefile = models.TextField(null=True, blank=True)
param = models.TextField(null=True, blank=True)
payload = models.TextField(null=True, blank=True)
#alter table dojo_finding add column function longtext;
function = models.TextField(null=True, blank=True)
SEVERITIES = {'Info': 4, 'Low': 3, 'Medium': 2,
'High': 1, 'Critical': 0}
class Meta:
ordering = ('numerical_severity', '-date', 'title')
def get_hash_code(self):
hash_string = self.title + self.description
return hashlib.sha256(hash_string).hexdigest()
@staticmethod
def get_numerical_severity(severity):
if severity == 'Critical':
return 'S0'
elif severity == 'High':
return 'S1'
elif severity == 'Medium':
return 'S2'
elif severity == 'Low':
return 'S3'
else:
return 'S4'
def __unicode__(self):
return self.title
def status(self):
status = []
if self.active:
status += ['Active']
else:
status += ['Inactive']
if self.verified:
status += ['Verified']
if self.mitigated:
status += ['Mitigated']
if self.false_p:
status += ['False Positive']
if self.out_of_scope:
status += ['Out Of Scope']
if self.duplicate:
status += ['Duplicate']
if len(self.risk_acceptance_set.all()) > 0:
status += ['Accepted']
if not len(status):
status += ['Initial']
return ", ".join([str(s) for s in status])
def age(self):
if self.mitigated:
days = (self.mitigated.date() - datetime.combine(self.date, datetime.min.time()).date()).days
else:
days = (get_current_date() - datetime.combine(self.date, datetime.min.time()).date()).days
return days if days > 0 else 0
def jira(self):
try:
jissue = JIRA_Issue.objects.get(finding=self)
except:
jissue = None
pass
return jissue
def jira_conf(self):
try:
jpkey = JIRA_PKey.objects.get(product=self.test.engagement.product)
jconf = jpkey.conf
except:
jconf = None
pass
return jconf
def long_desc(self):
long_desc = ''
long_desc += '*' + self.title + '*\n\n'
long_desc += '*Severity:* ' + self.severity + '\n\n'
long_desc += '*Systems*: \n'
for e in self.endpoints.all():
long_desc += str(e) + '\n\n'
long_desc += '*Description*: \n' + self.description + '\n\n'
long_desc += '*Mitigation*: \n' + self.mitigation + '\n\n'
long_desc += '*Impact*: \n' + self.impact + '\n\n'
long_desc += '*References*:' + self.references
return long_desc
def save(self, *args, **kwargs):
super(Finding, self).save(*args, **kwargs)
self.hash_code = self.get_hash_code()
system_settings = System_Settings.objects.get()
if system_settings.enable_deduplication :
from dojo.tasks import async_dedupe
async_dedupe.delay(self, *args, **kwargs)
if system_settings.false_positive_history:
from dojo.tasks import async_false_history
async_false_history.delay(self, *args, **kwargs)
def clean(self):
no_check = ["test", "reporter"]
bigfields = ["description", "mitigation", "references", "impact", "url"]
for field_obj in self._meta.fields:
field = field_obj.name
if field not in no_check:
val = getattr(self, field)
if not val and field == "title":
setattr(self, field, "No title given")
if not val and field in bigfields:
setattr(self, field, "No %s given" % field)
def severity_display(self):
try:
system_settings = System_Settings.objects.get()
if system_settings.s_finding_severity_naming:
return self.numerical_severity
else:
return self.severity
except:
return self.severity
def get_breadcrumbs(self):
bc = self.test.get_breadcrumbs()
bc += [{'title': self.__unicode__(),
'url': reverse('view_finding', args=(self.id,))}]
return bc
# def get_request(self):
# if self.burprawrequestresponse_set.count() > 0:
# reqres = BurpRawRequestResponse.objects.get(finding=self)
# return base64.b64decode(reqres.burpRequestBase64)
#
# def get_response(self):
# if self.burprawrequestresponse_set.count() > 0:
# reqres = BurpRawRequestResponse.objects.get(finding=self)
# res = base64.b64decode(reqres.burpResponseBase64)
# # Removes all blank lines
# res = re.sub(r'\n\s*\n', '\n', res)
# return res
Finding.endpoints.through.__unicode__ = lambda x: "Endpoint: " + x.endpoint.host
class Stub_Finding(models.Model):
title = models.TextField(max_length=1000, blank=False, null=False)
date = models.DateField(default=get_current_date, blank=False, null=False)
severity = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, null=True)
test = models.ForeignKey(Test, editable=False)
reporter = models.ForeignKey(User, editable=False)
class Meta:
ordering = ('-date', 'title')
def __unicode__(self):
return self.title
def get_breadcrumbs(self):
bc = self.test.get_breadcrumbs()
bc += [{'title': "Potential Finding: " + self.__unicode__(),
'url': reverse('view_potential_finding', args=(self.id,))}]
return bc
class Finding_Template(models.Model):
title = models.TextField(max_length=1000)
cwe = models.IntegerField(default=None, null=True, blank=True)
severity = models.CharField(max_length=200, null=True, blank=True)
description = models.TextField(null=True, blank=True)
mitigation = models.TextField(null=True, blank=True)
impact = models.TextField(null=True, blank=True)
references = models.TextField(null=True, blank=True, db_column="refs")
numerical_severity = models.CharField(max_length=4, null=True, blank=True, editable=False)
SEVERITIES = {'Info': 4, 'Low': 3, 'Medium': 2,
'High': 1, 'Critical': 0}
class Meta:
ordering = ['-cwe']
def __unicode__(self):
return self.title
def get_breadcrumbs(self):
bc = [{'title': self.__unicode__(),
'url': reverse('view_template', args=(self.id,))}]
return bc
class Check_List(models.Model):
session_management = models.CharField(max_length=50, default='none')
session_issues = models.ManyToManyField(Finding,
related_name='session_issues',
blank=True)
encryption_crypto = models.CharField(max_length=50, default='none')
crypto_issues = models.ManyToManyField(Finding,
related_name='crypto_issues',
blank=True)
configuration_management = models.CharField(max_length=50, default='')
config_issues = models.ManyToManyField(Finding,
related_name='config_issues',
blank=True)
authentication = models.CharField(max_length=50, default='none')
auth_issues = models.ManyToManyField(Finding,
related_name='auth_issues',
blank=True)
authorization_and_access_control = models.CharField(max_length=50,
default='none')
author_issues = models.ManyToManyField(Finding,
related_name='author_issues',
blank=True)
data_input_sanitization_validation = models.CharField(max_length=50,
default='none')
data_issues = models.ManyToManyField(Finding, related_name='data_issues',
blank=True)
sensitive_data = models.CharField(max_length=50, default='none')
sensitive_issues = models.ManyToManyField(Finding,
related_name='sensitive_issues',
blank=True)
other = models.CharField(max_length=50, default='none')
other_issues = models.ManyToManyField(Finding, related_name='other_issues',
blank=True)
engagement = models.ForeignKey(Engagement, editable=False,
related_name='eng_for_check')
@staticmethod
def get_status(pass_fail):
if pass_fail == 'Pass':
return 'success'
elif pass_fail == 'Fail':
return 'danger'
else:
return 'warning'
def get_breadcrumb(self):
bc = self.engagement.get_breadcrumb()
bc += [{'title': "Check List",
'url': reverse('complete_checklist', args=(self.engagement.id,))}]
return bc
class BurpRawRequestResponse(models.Model):
finding = models.ForeignKey(Finding, blank=True, null=True)
burpRequestBase64 = models.BinaryField()
burpResponseBase64 = models.BinaryField()
def get_request(self):
return base64.b64decode(self.burpRequestBase64)
def get_response(self):
res = base64.b64decode(self.burpResponseBase64)
# Removes all blank lines
res = re.sub(r'\n\s*\n', '\n', res)
return res
class Risk_Acceptance(models.Model):
path = models.FileField(upload_to='risk/%Y/%m/%d',
editable=False, null=False,
blank=False, verbose_name="Risk Acceptance File")
accepted_findings = models.ManyToManyField(Finding)
reporter = models.ForeignKey(User, editable=False)
notes = models.ManyToManyField(Notes, editable=False)
created = models.DateTimeField(null=False, editable=False,
default=now)
def __unicode__(self):
return "Risk Acceptance added on %s" % self.created.strftime(
"%b %d, %Y")
def filename(self):
return os.path.basename(self.path.name) \
if self.path is not None else ''
def get_breadcrumbs(self):
bc = self.engagement_set.first().get_breadcrumbs()
bc += [{'title': self.__unicode__(),
'url': reverse('view_risk', args=(self.engagement_set.first().product.id, self.id,))}]
return bc
class Report(models.Model):
name = models.CharField(max_length=200)
type = models.CharField(max_length=100, default='Finding')
format = models.CharField(max_length=15, default='AsciiDoc')
requester = models.ForeignKey(User)
task_id = models.CharField(max_length=50)
file = models.FileField(upload_to='reports/%Y/%m/%d', verbose_name='Report File', null=True)
status = models.CharField(max_length=10, default='requested')
options = models.TextField()
datetime = models.DateTimeField(auto_now_add=True)
done_datetime = models.DateTimeField(null=True)
def __unicode__(self):
return self.name
def get_url(self):
return reverse('download_report', args=(self.id,))
class Meta:
ordering = ['-datetime']
class FindingImage(models.Model):
image = models.ImageField(upload_to='finding_images', null=True)
image_thumbnail = ImageSpecField(source='image',
processors=[ResizeToCover(100, 100)],
format='JPEG',
options={'quality': 70})
image_small = ImageSpecField(source='image',
processors=[ResizeToCover(640, 480)],
format='JPEG',
options={'quality': 100})
image_medium = ImageSpecField(source='image',
processors=[ResizeToCover(800, 600)],
format='JPEG',
options={'quality': 100})
image_large = ImageSpecField(source='image',
processors=[ResizeToCover(1024, 768)],
format='JPEG',
options={'quality': 100})
def __unicode__(self):
return self.image.name
class FindingImageAccessToken(models.Model):
"""This will allow reports to request the images without exposing the media root to the world without
authentication"""
user = models.ForeignKey(User, null=False, blank=False)
image = models.ForeignKey(FindingImage, null=False, blank=False)
token = models.CharField(max_length=255)
size = models.CharField(max_length=9,
choices=(
('small', 'Small'),
('medium', 'Medium'),
('large', 'Large'),
('thumbnail', 'Thumbnail'),
('original', 'Original')),
default='medium')
def save(self, *args, **kwargs):
if not self.token:
self.token = uuid4()
return super(FindingImageAccessToken, self).save(*args, **kwargs)
class JIRA_Conf(models.Model):
url = models.URLField(max_length=2000, verbose_name="JIRA URL")
# product = models.ForeignKey(Product)
username = models.CharField(max_length=2000 )
password = models.CharField(max_length=2000)
# project_key = models.CharField(max_length=200,null=True, blank=True)
# enabled = models.BooleanField(default=True)
default_issue_type = models.CharField(max_length=9,
choices=(
('Task', 'Task'),
('Story', 'Story'),
('Epic', 'Epic'),
('Spike', 'Spike'),
('Bug', 'Bug')),
default='Bug')
epic_name_id = models.IntegerField()
open_status_key = models.IntegerField()
close_status_key = models.IntegerField()
low_mapping_severity = models.CharField(max_length=200)
medium_mapping_severity = models.CharField(max_length=200)
high_mapping_severity = models.CharField(max_length=200)
critical_mapping_severity = models.CharField(max_length=200)
finding_text = models.TextField(null=True, blank=True)
def __unicode__(self):
return self.url + " | " + self.username
def get_priority(self, status):
if status == 'Low':
return self.low_mapping_severity
elif status == 'Medium':
return self.medium_mapping_severity
elif status == 'High':
return self.high_mapping_severity
elif status == 'Critical':
return self.critical_mapping_severity
else:
return 'N/A'
class JIRA_Issue(models.Model):
jira_id = models.CharField(max_length=200)
jira_key = models.CharField(max_length=200)
finding = models.OneToOneField(Finding, null=True, blank=True)
engagement = models.OneToOneField(Engagement, null=True, blank=True)
class JIRA_Clone(models.Model):
jira_id = models.CharField(max_length=200)
jira_clone_id = models.CharField(max_length=200)
class JIRA_Details_Cache(models.Model):
jira_id = models.CharField(max_length=200)
jira_key = models.CharField(max_length=200)
jira_status = models.CharField(max_length=200)
jira_resolution = models.CharField(max_length=200)
class JIRA_PKey(models.Model):
project_key = models.CharField(max_length=200, blank=True)
product = models.ForeignKey(Product)
conf = models.ForeignKey(JIRA_Conf, verbose_name="JIRA Configuration", null=True, blank=True)
component = models.CharField(max_length=200, blank=True)
push_all_issues = models.BooleanField(default=False, blank=True)
enable_engagement_epic_mapping = models.BooleanField(default=False, blank=True)
push_notes = models.BooleanField(default=False, blank=True)
NOTIFICATION_CHOICES=(("slack","slack"),("hipchat","hipchat"),("mail","mail"),("alert","alert"))
class Notifications(models.Model):
engagement_added = MultiSelectField(choices=NOTIFICATION_CHOICES, default='alert', blank=True)
test_added = MultiSelectField(choices=NOTIFICATION_CHOICES, default='alert', blank=True)
results_added = MultiSelectField(choices=NOTIFICATION_CHOICES, default='alert', blank=True)
report_created = MultiSelectField(choices=NOTIFICATION_CHOICES, default='alert', blank=True)
jira_update = MultiSelectField(choices=NOTIFICATION_CHOICES, default='alert', blank=True)
upcoming_engagement = MultiSelectField(choices=NOTIFICATION_CHOICES, default='alert', blank=True)
user_mentioned = MultiSelectField(choices=NOTIFICATION_CHOICES, default='alert', blank=True)
other = MultiSelectField(choices=NOTIFICATION_CHOICES, default='alert', blank=True)
user = models.ForeignKey(User, default=None, null=True, editable=False)
class Tool_Type(models.Model):
name = models.CharField(max_length=200)
description = models.CharField(max_length=2000, null=True)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Tool_Configuration(models.Model):
name = models.CharField(max_length=200, null=False)
description = models.CharField(max_length=2000, null=True, blank=True)
url = models.URLField(max_length=2000, null=True)
tool_type = models.ForeignKey(Tool_Type, related_name='tool_type')
authentication_type = models.CharField(max_length=15,
choices=(
('API', 'API Key'),
('Password', 'Username/Password'),
('SSH', 'SSH')),
null=True, blank=True)
username = models.CharField(max_length=200, null=True, blank=True)
password = models.CharField(max_length=600, null=True, blank=True)
auth_title = models.CharField(max_length=200, null=True, blank=True, verbose_name="Title for SSH/API Key")
ssh = models.CharField(max_length=6000, null=True, blank=True)
api_key = models.CharField(max_length=600, null=True, blank=True, verbose_name="API Key")
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Tool_Product_Settings(models.Model):
name = models.CharField(max_length=200, null=False)
description = models.CharField(max_length=2000, null=True, blank=True)
url = models.URLField(max_length=2000, null=True, blank=True)
product = models.ForeignKey(Product, default=1, editable=False)
tool_configuration = models.ForeignKey(Tool_Configuration, null=False, related_name='tool_configuration')
tool_project_id = models.CharField(max_length=200, null=True, blank=True)
notes = models.ManyToManyField(Notes, blank=True, editable=False)
class Meta:
ordering = ['name']
class Tool_Product_History(models.Model):
product = models.ForeignKey(Tool_Product_Settings, editable=False)
last_scan = models.DateTimeField(null=False, editable=False, default=now)
succesfull = models.BooleanField(default=True, verbose_name="Succesfully")
configuration_details = models.CharField(max_length=2000, null=True, blank=True)
class Alerts(models.Model):
title = models.CharField(max_length=100, default='', null=False)
description = models.CharField(max_length=2000, null=True)
url = models.URLField(max_length=2000, null=True)
source = models.CharField(max_length=100, default='Generic')
icon = models.CharField(max_length=25, default='icon-user-check')
user_id = models.ForeignKey(User, null=True, editable=False)
created = models.DateTimeField(null=False, editable=False, default=now)
class Meta:
ordering = ['-created']
class Cred_User(models.Model):
name = models.CharField(max_length=200, null=False)
username = models.CharField(max_length=200, null=False)
password = models.CharField(max_length=600, null=False)
role = models.CharField(max_length=200, null=False)
authentication = models.CharField(max_length=15,
choices=(
('Form', 'Form Authentication'),
('SSO', 'SSO Redirect')),
default='Form')
http_authentication = models.CharField(max_length=15,
choices=(
('Basic', 'Basic'),
('NTLM', 'NTLM')),
null=True, blank=True)
description = models.CharField(max_length=2000, null=True, blank=True)
url = models.URLField(max_length=2000, null=False)
environment = models.ForeignKey(Development_Environment, null=False)
login_regex = models.CharField(max_length=200, null=True, blank=True)
logout_regex = models.CharField(max_length=200, null=True, blank=True)
notes = models.ManyToManyField(Notes, blank=True, editable=False)
is_valid = models.BooleanField(default=True, verbose_name="Login is valid")
#selenium_script = models.CharField(max_length=1000, default='none',
# editable=False, blank=True, null=True, verbose_name="Selenium Script File")
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name + " (" + self.role + ")"
class Cred_Mapping(models.Model):
cred_id = models.ForeignKey(Cred_User, null=False, related_name="cred_user", verbose_name="Credential")
product = models.ForeignKey(Product, null=True, blank=True, related_name="product")
finding = models.ForeignKey(Finding, null=True, blank=True, related_name="finding")
engagement = models.ForeignKey(Engagement, null=True, blank=True, related_name="engagement")
test = models.ForeignKey(Test, null=True, blank=True, related_name="test")
is_authn_provider = models.BooleanField(default=False, verbose_name="Authentication Provider")
url = models.URLField(max_length=2000, null=True, blank=True)
def __unicode__(self):
return self.cred_id.name + " (" + self.cred_id.role + ")"
# Register for automatic logging to database
auditlog.register(Dojo_User)
auditlog.register(Endpoint)
auditlog.register(Engagement)
auditlog.register(Finding)
auditlog.register(Product)
auditlog.register(Test)
auditlog.register(Risk_Acceptance)
auditlog.register(Finding_Template)
auditlog.register(Cred_User)
# Register tagging for models
tag_register(Product)
tag_register(Test)
tag_register(Finding)
tag_register(Engagement)
tag_register(Endpoint)
tag_register(Finding_Template)
admin.site.register(Test)
admin.site.register(Finding)
admin.site.register(FindingImage)
admin.site.register(FindingImageAccessToken)
admin.site.register(Stub_Finding)
admin.site.register(Engagement)
admin.site.register(Risk_Acceptance)
admin.site.register(Check_List)
admin.site.register(Test_Type)
admin.site.register(Endpoint)
admin.site.register(Product)
admin.site.register(Product_Type)
admin.site.register(Dojo_User)
admin.site.register(UserContactInfo)
admin.site.register(Notes)
admin.site.register(Report)
admin.site.register(Scan)
admin.site.register(ScanSettings)
admin.site.register(IPScan)
admin.site.register(Alerts)
admin.site.register(JIRA_Issue)
admin.site.register(Tool_Configuration)
admin.site.register(Tool_Product_Settings)
admin.site.register(Tool_Type)
admin.site.register(Cred_User)
admin.site.register(Cred_Mapping)
admin.site.register(System_Settings)
watson.register(Product)
watson.register(Test)
watson.register(Finding)
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProjectLayer.optional'
db.add_column(u'orm_projectlayer', 'optional',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'ProjectTarget.task'
db.add_column(u'orm_projecttarget', 'task',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ProjectLayer.optional'
db.delete_column(u'orm_projectlayer', 'optional')
# Deleting field 'ProjectTarget.task'
db.delete_column(u'orm_projecttarget', 'task')
models = {
u'orm.build': {
'Meta': {'object_name': 'Build'},
'bitbake_version': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'build_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'completed_on': ('django.db.models.fields.DateTimeField', [], {}),
'cooker_log_path': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'distro': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'distro_version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'errors_no': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'machine': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'outcome': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']", 'null': 'True'}),
'started_on': ('django.db.models.fields.DateTimeField', [], {}),
'timespent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'warnings_no': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'orm.helptext': {
'Meta': {'object_name': 'HelpText'},
'area': ('django.db.models.fields.IntegerField', [], {}),
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'helptext_build'", 'to': u"orm['orm.Build']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'orm.layer': {
'Meta': {'object_name': 'Layer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_index_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'local_path': ('django.db.models.fields.FilePathField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'orm.layer_version': {
'Meta': {'object_name': 'Layer_Version'},
'branch': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'layer_version_build'", 'to': u"orm['orm.Build']"}),
'commit': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'layer_version_layer'", 'to': u"orm['orm.Layer']"}),
'priority': ('django.db.models.fields.IntegerField', [], {})
},
u'orm.logmessage': {
'Meta': {'object_name': 'LogMessage'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lineno': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'pathname': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Task']", 'null': 'True', 'blank': 'True'})
},
u'orm.package': {
'Meta': {'object_name': 'Package'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'installed_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Recipe']", 'null': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'orm.package_dependency': {
'Meta': {'object_name': 'Package_Dependency'},
'dep_type': ('django.db.models.fields.IntegerField', [], {}),
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_dependencies_target'", 'to': u"orm['orm.Package']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_dependencies_source'", 'to': u"orm['orm.Package']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']", 'null': 'True'})
},
u'orm.package_file': {
'Meta': {'object_name': 'Package_File'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'buildfilelist_package'", 'to': u"orm['orm.Package']"}),
'path': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
u'orm.project': {
'Meta': {'object_name': 'Project'},
'branch': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'orm.projectlayer': {
'Meta': {'object_name': 'ProjectLayer'},
'commit': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'dirpath': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'giturl': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'optional': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']"})
},
u'orm.projecttarget': {
'Meta': {'object_name': 'ProjectTarget'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']"}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'orm.projectvariable': {
'Meta': {'object_name': 'ProjectVariable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']"}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'orm.recipe': {
'Meta': {'object_name': 'Recipe'},
'bugtracker': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file_path': ('django.db.models.fields.FilePathField', [], {'max_length': '255'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'recipe_layer_version'", 'to': u"orm['orm.Layer_Version']"}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'orm.recipe_dependency': {
'Meta': {'object_name': 'Recipe_Dependency'},
'dep_type': ('django.db.models.fields.IntegerField', [], {}),
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'r_dependencies_depends'", 'to': u"orm['orm.Recipe']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'r_dependencies_recipe'", 'to': u"orm['orm.Recipe']"})
},
u'orm.target': {
'Meta': {'object_name': 'Target'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'license_manifest_path': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'orm.target_file': {
'Meta': {'object_name': 'Target_File'},
'directory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'directory_set'", 'null': 'True', 'to': u"orm['orm.Target_File']"}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inodetype': ('django.db.models.fields.IntegerField', [], {}),
'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.FilePathField', [], {'max_length': '100'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'sym_target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'symlink_set'", 'null': 'True', 'to': u"orm['orm.Target_File']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"})
},
u'orm.target_image_file': {
'Meta': {'object_name': 'Target_Image_File'},
'file_name': ('django.db.models.fields.FilePathField', [], {'max_length': '254'}),
'file_size': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"})
},
u'orm.target_installed_package': {
'Meta': {'object_name': 'Target_Installed_Package'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'buildtargetlist_package'", 'to': u"orm['orm.Package']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"})
},
u'orm.task': {
'Meta': {'ordering': "('order', 'recipe')", 'unique_together': "(('build', 'recipe', 'task_name'),)", 'object_name': 'Task'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_build'", 'to': u"orm['orm.Build']"}),
'cpu_usage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2'}),
'disk_io': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'elapsed_time': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_number': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'logfile': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'outcome': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'path_to_sstate_obj': ('django.db.models.fields.FilePathField', [], {'max_length': '500', 'blank': 'True'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'build_recipe'", 'to': u"orm['orm.Recipe']"}),
'script_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'source_url': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'sstate_checksum': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'sstate_result': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_executed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'work_directory': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'})
},
u'orm.task_dependency': {
'Meta': {'object_name': 'Task_Dependency'},
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_dependencies_depends'", 'to': u"orm['orm.Task']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_dependencies_task'", 'to': u"orm['orm.Task']"})
},
u'orm.variable': {
'Meta': {'object_name': 'Variable'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variable_build'", 'to': u"orm['orm.Build']"}),
'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'human_readable_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'variable_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'variable_value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'orm.variablehistory': {
'Meta': {'object_name': 'VariableHistory'},
'file_name': ('django.db.models.fields.FilePathField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_number': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'operation': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'variable': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vhistory'", 'to': u"orm['orm.Variable']"})
}
}
complete_apps = ['orm']
| |
import math
import time
import itertools
from src.Surface import Surface
from shapely import geometry
from src.Bucket import Bucket
class Road(Surface):
"""
One of the essential building blocks of the traffic environment. A road has a number of lanes and connects to
an intersection on each of its ends. Road supports the tick-tock simulation methods, requesting next locations
from vehicles driving on it and handling moving them to their next locations. Road handles transferring vehicles
to and from adjacent intersections.
"""
lane_width = 10
def __init__(self, anchor_corner, length, inbound_lanes, outbound_lanes, orientation, speed_limit):
"""
:param anchor_corner: [double, double]
:param length: double
:param inbound_lanes: int
:param outbound_lanes: int
:param orientatino: double (IN RADIANS!)
:param speed_limit: int
"""
self.anchor = anchor_corner
self.length = length
self.inbound_lanes = inbound_lanes
self.outbound_lanes = outbound_lanes
self.lanes = inbound_lanes + outbound_lanes
self.width = self.lanes * self.lane_width
self.orientation = orientation
self.speed_limit = speed_limit
self.bucket_length = 0.2778 * self.speed_limit * 10
self.initial_intersection = None
self.terminal_intersection = None
self.vehicles = []
self.bucket_list = self.initialize_buckets(road_length = self.length,
road_width = self.width,
bucket_length = self.bucket_length,
inbound_lanes = self.inbound_lanes,
outbound_lanes = self.outbound_lanes)
self.surface = self.generate_surface()
self.next_locations = [] # Prevents conflicts with cars being moved onto roads between tick and tock.
self.name = None
self.reporter = None
def tick(self, ticktime_ms):
"""
Performs the vehicle next location getting tick
:param ticktime_ms:
:return:
"""
self.next_locations = self.request_next_locations(ticktime_ms)
return
def tock_positions(self):
"""
Performs the vehicle position updating tock
:return:
"""
self.update_positions()
return
def tock_crashes(self):
"""
Performs the crash detecting and handling tock, files the timestep report with the reporter
:return:
"""
crashes = self.process_collisions()
if self.reporter is not None:
number_of_vehicles = len(self.vehicles)
if len(self.vehicles) > 0:
avg_speed = sum([math.sqrt(v.vx ** 2 + v.vy ** 2) for v in self.vehicles]) / len(self.vehicles)
else:
avg_speed = "NAN"
self.reporter.add_info_road(self.name, number_of_vehicles, avg_speed, crashes)
return
def initialize_buckets(self, road_length, road_width, bucket_length, inbound_lanes, outbound_lanes):
"""
Creates a list of buckets of length equal to 10 seconds of travel at the speed limit
to populate the length of the road.
:param road_length:
:param road_width:
:param bucket_length:
:param inbound_lanes:
:param outbound_lanes:
:return:
"""
number_of_buckets = math.ceil(road_length / bucket_length)
bucket_list = []
for i in range(number_of_buckets):
if i == 0:
head = Bucket(initial_x = i * self.bucket_length, length=bucket_length,
inbound_lanes=inbound_lanes, outbound_lanes=outbound_lanes)
tail = head
bucket_list.append(head)
else:
next = Bucket(initial_x = i * self.bucket_length, length=bucket_length,
inbound_lanes=inbound_lanes, outbound_lanes=outbound_lanes)
next.set_previous_bucket(tail)
tail.set_next_bucket(next)
tail = next
bucket_list.append(tail)
return bucket_list
def generate_surface(self):
"""
Generates the shapely Polygon storing the surface of the road.
:return:
"""
# Points proceed clockwise around the rectangle from the anchor point
# [x, y] formatting
first = self.anchor
second = [first[0] + self.length * math.cos(self.orientation), first[1] + self.length * math.sin(self.orientation)]
third = [second[0] + self.width * math.cos(self.orientation + math.pi / 2), second[1] + self.width * math.sin(self.orientation + math.pi / 2)]
fourth = [first[0] + self.width * math.cos(self.orientation + math.pi / 2), first[1] + self.width * math.sin(self.orientation + math.pi / 2)]
# Reference : https://toblerity.org/shapely/manual.html#polygons
return geometry.Polygon([first, second, third, fourth])
def request_next_locations(self, ticktime_ms):
"""
Produces the next intended location of each car.
:param ticktime_ms:
:return:
"""
current_time = time.time()*1000
next_locations = [[vehicle.compute_next_location(ticktime_ms), vehicle] for vehicle in self.vehicles]
return next_locations
def update_positions(self):
# Update the location of each vehicle by updating it directly or transferring it to a neighboring intersection
for intended_location, vehicle in self.next_locations:
if self.is_local_on_road(intended_location):
vehicle.update_location(intended_location[0], intended_location[1])
self.appropriately_bucket(vehicle, intended_location)
else:
global_location = self.local_to_global_location_conversion(intended_location)
self.transfer(vehicle, global_location)
# Reset the list of cars intending to move
self.next_locations = []
return
def is_local_on_road(self, location):
"""
Takes a local coordinate and returns whether or not it is on the road
:param location:
:return:
"""
location = self.local_to_global_location_conversion(location)
return self.surface.contains(geometry.Point(location[0], location[1]))
def is_global_on_road(self, location):
"""
Takes a global coordinate and returns whether or not it is on the road
:param location:
:return:
"""
return self.surface.contains(geometry.Point(location[0], location[1]))
def local_to_global_location_conversion(self, location):
"""
Turn a local coordinate into its corresponding global coordinate
:param location:
:return:
"""
x = self.anchor[0] + location[0] * math.cos(self.orientation) + location[1] * math.cos(self.orientation + math.pi / 2)
y = self.anchor[1] + location[0] * math.sin(self.orientation) + location[1] * math.sin(self.orientation + math.pi / 2)
return (x, y)
def global_to_local_location_conversion(self, location):
"""
Turn a global coordinate into its corresponding local coordinate
:param location:
:return:
"""
# Recenter so that the anchor is the origin
relative_x = location[0] - self.anchor[0]
relative_y = location[1] - self.anchor[1]
# Rotate counterclockwise by the orientation
local_x = relative_x * math.cos(-self.orientation) - relative_y * math.sin(-self.orientation)
local_y = relative_y * math.cos(-self.orientation) + relative_x * math.sin(-self.orientation)
return (local_x, local_y)
def which_neighbor(self, location):
"""
Takes a global coordinate and returns which, if any of the neighboring intersections contain that coordinate
:param location:
:return:
"""
if self.initial_intersection.is_global_in_intersection(location):
return self.initial_intersection, "initial"
elif self.terminal_intersection.is_global_in_intersection(location):
return self.terminal_intersection, "terminal"
else:
raise ValueError("No neighbor contains that location.")
return
def transfer(self, vehicle, location):
"""
Takes a vehicle and a global location and attempts to relocate the vehicle to that location
:param vehicle:
:param location:
:return:
"""
try:
# Side is "initial" / "terminal"
neighbor, side = self.which_neighbor(location)
vehicle.last_road = self
vehicle.navlist = vehicle.navlist[1:]
if len(vehicle.navlist) > 0:
neighbor.accept_transfer(vehicle, location, self, side)
self.vehicles.remove(vehicle)
except ValueError:
print("A vehicle couldn't be transferred because it requested an invalid destination.")
self.vehicles.remove(vehicle)
return
def accept_transfer(self, vehicle, location):
"""
Takes a vehicle and a global coordinate and places the vehicle onto the road at the local coordinate
corresponding to the global coordinate
:param vehicle:
:param location:
:return:
"""
local_location = self.global_to_local_location_conversion(location)
vehicle.transfer_to_road(self, local_location)
self.appropriately_bucket(vehicle, local_location)
self.vehicles.append(vehicle)
return
def appropriately_bucket(self, vehicle, location):
"""
Takes a vehicle and a local location and ensures that the vehicle is in the bucket corresponding to the location
:param vehicle:
:param location:
:return:
"""
# Remove the vehicle from its current bucket if it exists
if vehicle.get_bucket() is not None:
vehicle.get_bucket().remove(vehicle)
# And place it into the new bucket in which it belongs
bucket = self.bucket_list[math.floor(location[0] / self.bucket_length)]
bucket.add(vehicle)
# And inform the vehicle which bucket it is now in
vehicle.set_bucket(bucket)
return
def process_collisions(self):
"""
Locates those vehicles which have been in a collision and informs them of that fact.
:return:
"""
count = 0
for bucket in self.bucket_list:
preceding = [] if bucket.get_previous_bucket() == None else bucket.get_previous_bucket().get_vehicles()
following = [] if bucket.get_next_bucket() == None else bucket.get_next_bucket().get_vehicles()
current = bucket.get_vehicles()
vehicle_list = preceding + current + following
vehicle_pairs = list(itertools.combinations(vehicle_list, 2))
for (v1, v2) in vehicle_pairs:
if self.have_collided(v1, v2):
count += 1
# I am assuming that vehicles will want to know which vehicle they collided with.
v1.collided(v2)
v2.collided(v1)
# Collided vehicles are simply removed from the road (tow trucks are fast in this universe)
if v1 in self.vehicles:
self.vehicles.remove(v1)
if v2 in self.vehicles:
self.vehicles.remove(v2)
if v1 in preceding:
bucket.get_previous_bucket().remove(v1)
elif v1 in following:
bucket.get_next_bucket().remove(v1)
else:
bucket.remove(v1)
if v2 in preceding:
bucket.get_previous_bucket().remove(v2)
elif v2 in following:
bucket.get_next_bucket().remove(v2)
else:
bucket.remove(v2)
return count
def add_neighboring_intersection(self, intersection, end):
"""
Takes an intersection and an associated end of the road and adds that intersection at that road.
:param intersection:
:param end:
:return:
"""
if end == "initial":
self.initial_intersection = intersection
elif end == "terminal":
self.terminal_intersection = intersection
else:
raise ValueError("Intersection added to an end other than 'initial' or 'terminal'")
return
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_reporter(self, reporter):
self.reporter = reporter
| |
'''
PipelineEnrichmentGSEA.py
=============================================
:Tags: Python
Usage
-----
This pipeline is a wrapper of script runGSEA.py (enrichment analysis
by using GSEA. Further
description is provided
below.)
To run this pipeline, one needs to specify required parameteres in
pipeline.ini file (configuration file).
This pipeline entails steps:
-----------
First step: Preprocessing of gene list(expression data set)
----------- Note: 1. Input gene list should be tab delimited file.
a. First line of dataset will be considered as
header. Suffix of file name should be ".gene.tsv"
b. Gene ids within gene list and gene set should be the same
2. Annotations from a Database:(to convert genelists)
a. AnnotationSets are predominantly generated from a database using an
AnnotationParser method.
b. The Database is generated using the pipeline pipeline_geneinfo.py.
This database is required to run pipeline_enrichment.
Input gene list is translated into required id type.
(Available options are specified in .ini file), sorts
the gene list on the basis of provided ranking metric.
It also removes all duplicate ids and generates report.
A summary of preprocessing steps of the gene list is provided and lists
of duplicate gene ids that were discarded is also listed.
A new gene list file (after preprocessing is created in a folder
that has the same name as gene list file name. This new file is used
for further analysis.
------------
Second step: Call runGSEA.py script file for enrichemnt analysis
-----------
This script will perform the enrichment analysis, by using gene set enrichment analysis
(GSEA) and leading edge analysis.
"Leading edge are defined as genes that are common to multiple
significantly enriched gene sets and coordinately enriched
in a phenotypic comparison of interest.They represent a rich
source of biologically important genes."
-----
It takes two input files:
1. Ranked list of genes (Preprocessed Expression data set file,
created by first step of pipeline).
2. Gene set
- A gene sets file defines one or more gene sets. For each gene
set,the file contains the gene set name and the list of genes in
that gene set. A gene sets file is a tab-delimited text file in
gmx or gmt format. For descriptions and examples of each file
format please refer to:
http://software.broadinstitute.org/cancer/software/gsea/wiki/index.php/Data_formats
- The Molecular Signatures Database (MSigDB)
(http://software.broadinstitute.org/gsea/msigdb/index.jsp)
is a collection of annotated gene sets, which can be used for gene
set enrichment analysis.OR you can create your own gene set in gmt
or gmx format.
3. Rest of the parameters can be specified in to pipeline.ini configuration
file. Every parameter is set to deafult value.
This script will summarize the analysis in the following format:
1. GSEA Statistics
2. GSEA Report
3. Leading edge analysis report
example
-------
The way the test is ran:
cgat runGSEA -f "Expression_data.tsv" -g "Gene_set.gmt" -n 10 -d 1 -l 4
Default run conditions:
cgat runGSEA -f "Expression_data.tsv" -g "Gene_set.gmt"
--------------
GSEA Statistics
---------------
It includes following statistics for GSEA(for each phenotype):
- Enrichment Score (ES)
- Normalized Enrichment Score (NES)
- False Discovery Rate (FDR)
- Nominal P Value
--------------
GSEA reports
--------------
- Global Statistics and Plots include:
a) Enrichment plot,
b) Three separate bar plots that provide a quick overview of top 20 (this number is user defined)
enriched upregulated, downregulated and overall enriched genesets on the basis of their FDR values.
c) Global distribution of normalized enrichment score
d) Global distribution of normalized enrichment score with corresponding FDR q values and p values.
- Reports:
1 - Enrichment in Phenotype (of up and downregulated genes)
This report provides summary of enrichment analysis of each phenotype.
It includes details of which genesets are up and downregulated and a summary
of significant enriched gensets on the basis of FDR and p values.)
2 - Gene Set Details
This report provides summary of preprocessing steps of the genesets provided and
lists genes sets that were used in the anlysis and which one were discarded due to set thresholds
3 - Detailed Enrichment Results
This report provides detail statistics of each geneset(for each phenotype). Three reports are
generated. report for uoregulated genesets, downregulated genesets, and enriched genesets organised
on the basis of their FDR values.
By default, enrichment plot for top 20 gene sets will be reported.
----------------------------
Leading edge analysis report
----------------------------
It will report graphs that help you visualize the overlap between the selected leading edge subsets. It also
summarises the analysis in the form of reports. By default top 10 enriched genesets will be used for leading edge analysis.
- Leading edge plots include:
a) Heat Map(unclustered)
This provides an overview of overlap between leading edge subsets
b) Heat Map(clustered)
This heat map will be generated after hierarchical clustering of leading edge subset. It will
show you clustered genes among subsets
c) Set-to-Set Heat Map
This plot help you to visualize intensity of overlap between subsets (i.e. the extent of overlap between two genesets)
d) Dendogram to illustrate the arrangement of the clusters produced by hierarchical clustering.
- Reports:
1- Leading_edge_summary_report: summary of genesets and corresponding enrichment statistics that were used for the leading edge analysis.
2- Leading edge matrix (gmx) file provides detailed information on leading edge analysis genesets
(i.e. participating genes in each gene set).
3- Leading edge (gct,cluster format) files for unclustered and clustered gene set. It is a boolean matrix.
that can be used as an input into other resources for additional analysis as this is ideal format for cluster representation
(in GSEA)
For details on the algorithm please refer to
Subramanian, Tamayo, et al. (2005, PNAS 102, 15545-15550)
and
Mootha, Lindgren, et al. (2003, Nat Genet 34, 267-273).
=============================================
'''
import pandas as pd
import sqlite3
import os
import CGAT.Experiment as E
import numpy as np
import csv
import os
def getTables(dbname):
'''
Retrieves the names of all tables in the database.
Groups tables into dictionaries by annotation
'''
dbh = sqlite3.connect(dbname)
c = dbh.cursor()
statement = "SELECT name FROM sqlite_master WHERE type='table'"
c.execute(statement)
tables = c.fetchall()
print(tables)
c.close()
dbh.close()
D = {}
for t in tables:
tname = t[0].replace("ensemblg2", "").split("$")
E.info(tname)
ttype = tname[0]
D.setdefault(ttype, [])
D[ttype].append(tname[1])
return D
def list_Duplicates(seq):
seen = set()
seen_add = seen.add
return [idx for idx, item in enumerate(
seq) if item in seen or seen_add(item)]
def read_Expression_data(filename):
'''
Read gene list file.
'''
f = open(filename, "r")
express_id = []
lines = list(csv.reader(f, delimiter="\t"))
lines.pop(0)
e_id = [item[0] for item in lines]
value = [item[1] for item in lines]
value_arr = np.array(value, dtype=np.float)
express_value = np.zeros((len(value),), dtype=np.float)
ind_sort = np.argsort(-value_arr)
c = 0
for ii in ind_sort:
express_value[c] = value_arr[ii]
express_id.append(e_id[ii])
c = c + 1
f.close()
return express_id, express_value
def readDBTable(dbname, tablename):
'''
Reads the specified table from the specified database.
Returns a list of tuples representing each row
'''
dbh = sqlite3.connect(dbname)
c = dbh.cursor()
statement = "SELECT * FROM %s" % tablename
E.warn(statement)
c.execute(statement)
allresults = c.fetchall()
c.close()
dbh.close()
return allresults
def getDBColumnNames(dbname, tablename):
dbh = sqlite3.connect(dbname)
res = pd.read_sql('SELECT * FROM %s' % tablename, dbh)
dbh.close()
return res.columns
def translateGenelist(dbname, genelist, idtype, id_conversion):
'''
Translates a list of gene names from idtype to id_conversion based
on the database table. This table needs to exist in the database.
'''
if(id_conversion == "ensemblg"):
trans = pd.DataFrame(
readDBTable(
dbname, "%s2%s$geneid" %
(id_conversion, idtype)))
trans.columns = getDBColumnNames(
dbname, "%s2%s$geneid" %
(id_conversion, idtype))
else:
trans = pd.DataFrame(
readDBTable(
dbname, "%s2%s$geneid" %
(idtype, id_conversion)))
trans.columns = getDBColumnNames(
dbname, "%s2%s$geneid" %
(idtype, id_conversion))
#print(trans.loc[trans[1] == '29924'])
mergeon = set(trans.columns)
mergeon.remove(id_conversion)
mergeon = list(mergeon)[0]
database_list = trans[mergeon].tolist()
index_in_db = [database_list.index(x)
for x in genelist if x in database_list]
newgenelist = trans[id_conversion][index_in_db]
return list(newgenelist.values)
def create_File_after_preprocessing(file_ids, values, index_to_kept, outfile):
f = open(outfile, "w")
f.write("Gene Id\tValues\n")
for i in index_to_kept:
f.write(file_ids[i] + "\t" + str(values[i]) + "\n")
f.close()
return
def generate_Report_of_preprocessing(file_ids, index_to_remove, report_file):
f = open(report_file, "w")
if(len(index_to_remove) > 0):
f.write("There were duplicate row identifiers in the ranked list." +
"One id was choosen. Details are below:\n# of row ids" +
" in original dataset:" + str(len(file_ids)) + "\n# of row UNIQUE ids"
" in original dataset:" + str((len(file_ids) - len(index_to_remove))) +
"\n The duplicates were:\n")
for i in index_to_remove:
f.write(file_ids[i] + "\n")
f.close()
else:
f.write("There were no duplicate row identifiers in the ranked list." +
"Details are below:\n# of row ids" +
" in original dataset:" + str(len(file_ids)) + "\n# of row UNIQUE ids"
" in original dataset:" + str((len(file_ids) - len(index_to_remove))) + "\n")
f.close()
return
def preprocess_ExpressionData(
filename,
dbname,
idtype,
id_conversion,
outfile):
'''
Preprocess expression dataset: Remove duplicates,Translate a list of
gene names from idtype to id_conversion and Rank them on the basis
of ranking metrices
'''
filename_base = os.path.basename(filename)
part1, suff1, suff2 = filename_base.split('.')
report_file = "_".join([part1, "preprocessing_report.txt"])
file_ids, values = read_Expression_data(filename)
# print(len(file_ids))
if(idtype != id_conversion):
E.warn("It should not come here")
E.warn(idtype)
E.warn(id_conversion)
file_ids = translateGenelist(dbname, file_ids, idtype, id_conversion)
'''Remove duplicats.'''
# print(len(file_ids))
index_to_kept = list(range(len(file_ids)))
index_to_remove = list_Duplicates(file_ids)
# print(len(index_to_remove))
if(len(index_to_remove) > 0):
index_to_kept = [i for j, i in enumerate(
index_to_kept) if j not in index_to_remove]
generate_Report_of_preprocessing(file_ids, index_to_remove, report_file)
create_File_after_preprocessing(file_ids, values, index_to_kept, outfile)
return
# preprocess_ExpressionData("Expression_data_test.gene.tsv","/ifs/projects/reshma/DATABASE/human_db_110817","entrez","ensemblg","Expression_data_test")
| |
# shamelessly copied from pliExpertInfo (Vali, Mirakels, Littlesat)
from enigma import iServiceInformation, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
from Components.config import config
from Tools.Transponder import ConvertToHumanReadable
from Tools.GetEcmInfo import GetEcmInfo
from Poll import Poll
def addspace(text):
if text:
text += " "
return text
class MetrixExtraInfo(Poll, Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.type = type
self.poll_interval = 1000
self.poll_enabled = True
self.caid_data = (
( "0x100", "0x1ff", "Seca", "S", True ),
( "0x500", "0x5ff", "Via", "V", True ),
( "0x600", "0x6ff", "Irdeto", "I", True ),
( "0x900", "0x9ff", "NDS", "Nd", True ),
( "0xb00", "0xbff", "Conax", "Co", True ),
( "0xd00", "0xdff", "CryptoW", "Cw", True ),
( "0xe00", "0xeff", "PowerVU", "P", False ),
("0x1700", "0x17ff", "Beta", "B", True ),
("0x1800", "0x18ff", "Nagra", "N", True ),
("0x2600", "0x2600", "Biss", "Bi", False ),
("0x4ae0", "0x4ae1", "Dre", "D", False ),
("0x4aee", "0x4aee", "BulCrypt", "B1", False ),
("0x5581", "0x5581", "BulCrypt", "B2", False )
)
self.ca_table = (
("CryptoCaidSecaAvailable", "S", False),
("CryptoCaidViaAvailable", "V", False),
("CryptoCaidIrdetoAvailable", "I", False),
("CryptoCaidNDSAvailable", "Nd", False),
("CryptoCaidConaxAvailable", "Co", False),
("CryptoCaidCryptoWAvailable", "Cw", False),
("CryptoCaidPowerVUAvailable", "P", False),
("CryptoCaidBetaAvailable", "B", False),
("CryptoCaidNagraAvailable", "N", False),
("CryptoCaidBissAvailable", "Bi", False),
("CryptoCaidDreAvailable", "D", False),
("CryptoCaidBulCrypt1Available","B1", False),
("CryptoCaidBulCrypt2Available","B2", False),
("CryptoCaidSecaSelected", "S", True),
("CryptoCaidViaSelected", "V", True),
("CryptoCaidIrdetoSelected", "I", True),
("CryptoCaidNDSSelected", "Nd", True),
("CryptoCaidConaxSelected", "Co", True),
("CryptoCaidCryptoWSelected", "Cw", True),
("CryptoCaidPowerVUSelected", "P", True),
("CryptoCaidBetaSelected", "B", True),
("CryptoCaidNagraSelected", "N", True),
("CryptoCaidBissSelected", "Bi", True),
("CryptoCaidDreSelected", "D", True),
("CryptoCaidBulCrypt1Selected", "B1", True),
("CryptoCaidBulCrypt2Selected", "B2", True),
)
self.ecmdata = GetEcmInfo()
self.feraw = self.fedata = self.updateFEdata = None
def getCryptoInfo(self, info):
if (info.getInfo(iServiceInformation.sIsCrypted) == 1):
data = self.ecmdata.getEcmData()
self.current_source = data[0]
self.current_caid = data[1]
self.current_provid = data[2]
self.current_ecmpid = data[3]
else:
self.current_source = ""
self.current_caid = "0"
self.current_provid = "0"
self.current_ecmpid = "0"
def createCryptoBar(self, info):
res = ""
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if int(self.current_caid, 16) >= int(caid_entry[0], 16) and int(self.current_caid, 16) <= int(caid_entry[1], 16):
color="\c0000??00"
else:
color = "\c007?7?7?"
try:
for caid in available_caids:
if caid >= int(caid_entry[0], 16) and caid <= int(caid_entry[1], 16):
color="\c00????00"
except:
pass
if color != "\c007?7?7?" or caid_entry[4]:
if res: res += " "
res += color + caid_entry[3]
res += "\c00??????"
return res
def createCryptoSeca(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x100', 16) and int(self.current_caid, 16) <= int('0x1ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x100', 16) and caid <= int('0x1ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'S'
res += "\c00??????"
return res
def createCryptoVia(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x500', 16) and int(self.current_caid, 16) <= int('0x5ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x500', 16) and caid <= int('0x5ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'V'
res += "\c00??????"
return res
def createCryptoIrdeto(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x600', 16) and int(self.current_caid, 16) <= int('0x6ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x600', 16) and caid <= int('0x6ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'I'
res += "\c00??????"
return res
def createCryptoNDS(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x900', 16) and int(self.current_caid, 16) <= int('0x9ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x900', 16) and caid <= int('0x9ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'NDS'
res += "\c00??????"
return res
def createCryptoConax(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0xb00', 16) and int(self.current_caid, 16) <= int('0xbff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0xb00', 16) and caid <= int('0xbff', 16):
color="\c00eeee00"
except:
pass
res = color + 'CO'
res += "\c00??????"
return res
def createCryptoCryptoW(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0xd00', 16) and int(self.current_caid, 16) <= int('0xdff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0xd00', 16) and caid <= int('0xdff', 16):
color="\c00eeee00"
except:
pass
res = color + 'CW'
res += "\c00??????"
return res
def createCryptoPowerVU(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0xe00', 16) and int(self.current_caid, 16) <= int('0xeff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0xe00', 16) and caid <= int('0xeff', 16):
color="\c00eeee00"
except:
pass
res = color + 'P'
res += "\c00??????"
return res
def createCryptoBeta(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x1700', 16) and int(self.current_caid, 16) <= int('0x17ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x1700', 16) and caid <= int('0x17ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'B'
res += "\c00??????"
return res
def createCryptoNagra(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x1800', 16) and int(self.current_caid, 16) <= int('0x18ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x1800', 16) and caid <= int('0x18ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'N'
res += "\c00??????"
return res
def createCryptoBiss(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x2600', 16) and int(self.current_caid, 16) <= int('0x26ff', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x2600', 16) and caid <= int('0x26ff', 16):
color="\c00eeee00"
except:
pass
res = color + 'BI'
res += "\c00??????"
return res
def createCryptoDre(self, info):
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
if int(self.current_caid, 16) >= int('0x4ae0', 16) and int(self.current_caid, 16) <= int('0x4ae1', 16):
color="\c004c7d3f"
else:
color = "\c009?9?9?"
try:
for caid in available_caids:
if caid >= int('0x4ae0', 16) and caid <= int('0x4ae1', 16):
color="\c00eeee00"
except:
pass
res = color + 'DC'
res += "\c00??????"
return res
def createCryptoSpecial(self, info):
caid_name = "FTA"
try:
for caid_entry in self.caid_data:
if int(self.current_caid, 16) >= int(caid_entry[0], 16) and int(self.current_caid, 16) <= int(caid_entry[1], 16):
caid_name = caid_entry[2]
break
return caid_name + ":%04x:%04x:%04x:%04x" % (int(self.current_caid,16), int(self.current_provid,16), info.getInfo(iServiceInformation.sSID), int(self.current_ecmpid,16))
except:
pass
return ""
def createResolution(self, info):
xres = info.getInfo(iServiceInformation.sVideoWidth)
if xres == -1:
return ""
yres = info.getInfo(iServiceInformation.sVideoHeight)
mode = ("i", "p", "")[info.getInfo(iServiceInformation.sProgressive)]
fps = str((info.getInfo(iServiceInformation.sFrameRate) + 500) / 1000)
return str(xres) + "x" + str(yres) + mode + fps
def createVideoCodec(self, info):
return ("MPEG2", "MPEG4", "MPEG1", "MPEG4-II", "VC1", "VC1-SM", "")[info.getInfo(iServiceInformation.sVideoType)]
def createPIDInfo(self, info):
vpid = info.getInfo(iServiceInformation.sVideoPID)
apid = info.getInfo(iServiceInformation.sAudioPID)
pcrpid = info.getInfo(iServiceInformation.sPCRPID)
sidpid = info.getInfo(iServiceInformation.sSID)
if vpid < 0 : vpid = 0
if apid < 0 : apid = 0
if pcrpid < 0 : pcrpid = 0
if sidpid < 0 : sidpid = 0
return "Pids:%04d:%04d:%04d:%05d" % (vpid, apid, pcrpid, sidpid)
def createTransponderInfo(self, fedata, feraw):
return addspace(self.createTunerSystem(fedata)) + addspace(self.createFrequency(fedata)) + addspace(self.createPolarization(fedata)) \
+ addspace(self.createSymbolRate(fedata)) + addspace(self.createFEC(fedata)) + addspace(self.createModulation(fedata)) \
+ self.createOrbPos(feraw)
def createFrequency(self, fedata):
frequency = fedata.get("frequency")
if frequency:
return str(frequency / 1000)
return ""
def createSymbolRate(self, fedata):
symbolrate = fedata.get("symbol_rate")
if symbolrate:
return str(symbolrate / 1000)
return ""
def createPolarization(self, fedata):
polarization = fedata.get("polarization_abbreviation")
if polarization:
return polarization
return ""
def createFEC(self, fedata):
fec = fedata.get("fec_inner")
if fec:
return fec
return ""
def createModulation(self, fedata):
modulation = fedata.get("modulation")
if modulation:
return modulation
return ""
def createTunerType(self, feraw):
tunertype = feraw.get("tuner_type")
if tunertype:
return tunertype
return ""
def createTunerSystem(self, fedata):
tunersystem = fedata.get("system")
if tunersystem:
return tunersystem
return ""
def createOrbPos(self, feraw):
orbpos = feraw.get("orbital_position")
if orbpos > 1800:
return str((float(3600 - orbpos)) / 10.0) + "\xc2\xb0 W"
elif orbpos > 0:
return str((float(orbpos)) / 10.0) + "\xc2\xb0 E"
return ""
def createOrbPosOrTunerSystem(self, fedata,feraw):
orbpos = self.createOrbPos(feraw)
if orbpos is not "":
return orbpos
return self.createTunerSystem(fedata)
def createTransponderName(self,feraw):
orb_pos = ""
orbpos = feraw.get("orbital_position")
if orbpos > 1800:
if orbpos == 3590:
orb_pos = 'Thor/Intelsat'
elif orbpos == 3560:
orb_pos = 'Amos (4'
elif orbpos == 3550:
orb_pos = 'Atlantic Bird'
elif orbpos == 3530:
orb_pos = 'Nilesat/Atlantic Bird'
elif orbpos == 3520:
orb_pos = 'Atlantic Bird'
elif orbpos == 3475:
orb_pos = 'Atlantic Bird'
elif orbpos == 3460:
orb_pos = 'Express'
elif orbpos == 3450:
orb_pos = 'Telstar'
elif orbpos == 3420:
orb_pos = 'Intelsat'
elif orbpos == 3380:
orb_pos = 'Nss'
elif orbpos == 3355:
orb_pos = 'Intelsat'
elif orbpos == 3325:
orb_pos = 'Intelsat'
elif orbpos == 3300:
orb_pos = 'Hispasat'
elif orbpos == 3285:
orb_pos = 'Intelsat'
elif orbpos == 3170:
orb_pos = 'Intelsat'
elif orbpos == 3150:
orb_pos = 'Intelsat'
elif orbpos == 3070:
orb_pos = 'Intelsat'
elif orbpos == 3045:
orb_pos = 'Intelsat'
elif orbpos == 3020:
orb_pos = 'Intelsat 9'
elif orbpos == 2990:
orb_pos = 'Amazonas'
elif orbpos == 2900:
orb_pos = 'Star One'
elif orbpos == 2880:
orb_pos = 'AMC 6 (72'
elif orbpos == 2875:
orb_pos = 'Echostar 6'
elif orbpos == 2860:
orb_pos = 'Horizons'
elif orbpos == 2810:
orb_pos = 'AMC5'
elif orbpos == 2780:
orb_pos = 'NIMIQ 4'
elif orbpos == 2690:
orb_pos = 'NIMIQ 1'
elif orbpos == 3592:
orb_pos = 'Thor/Intelsat'
elif orbpos == 2985:
orb_pos = 'Echostar 3,12'
elif orbpos == 2830:
orb_pos = 'Echostar 8'
elif orbpos == 2630:
orb_pos = 'Galaxy 19'
elif orbpos == 2500:
orb_pos = 'Echostar 10,11'
elif orbpos == 2502:
orb_pos = 'DirectTV 5'
elif orbpos == 2410:
orb_pos = 'Echostar 7 Anik F3'
elif orbpos == 2391:
orb_pos = 'Galaxy 23'
elif orbpos == 2390:
orb_pos = 'Echostar 9'
elif orbpos == 2412:
orb_pos = 'DirectTV 7S'
elif orbpos == 2310:
orb_pos = 'Galaxy 27'
elif orbpos == 2311:
orb_pos = 'Ciel 2'
elif orbpos == 2120:
orb_pos = 'Echostar 2'
else:
orb_pos = str((float(3600 - orbpos)) / 10.0) + "W"
elif orbpos > 0:
if orbpos == 192:
orb_pos = 'Astra 1F'
elif orbpos == 130:
orb_pos = 'Hot Bird 6,7A,8'
elif orbpos == 235:
orb_pos = 'Astra 1E'
elif orbpos == 1100:
orb_pos = 'BSat 1A,2A'
elif orbpos == 1101:
orb_pos = 'N-Sat 110'
elif orbpos == 1131:
orb_pos = 'KoreaSat 5'
elif orbpos == 1440:
orb_pos = 'SuperBird 7,C2'
elif orbpos == 1006:
orb_pos = 'AsiaSat 2'
elif orbpos == 1030:
orb_pos = 'Express A2'
elif orbpos == 1056:
orb_pos = 'Asiasat 3S'
elif orbpos == 1082:
orb_pos = 'NSS 11'
elif orbpos == 881:
orb_pos = 'ST1'
elif orbpos == 900:
orb_pos = 'Yamal 201'
elif orbpos == 917:
orb_pos = 'Mesat'
elif orbpos == 950:
orb_pos = 'Insat 4B'
elif orbpos == 951:
orb_pos = 'NSS 6'
elif orbpos == 765:
orb_pos = 'Telestar'
elif orbpos == 785:
orb_pos = 'ThaiCom 5'
elif orbpos == 800:
orb_pos = 'Express'
elif orbpos == 830:
orb_pos = 'Insat 4A'
elif orbpos == 850:
orb_pos = 'Intelsat 709'
elif orbpos == 750:
orb_pos = 'Abs'
elif orbpos == 720:
orb_pos = 'Intelsat'
elif orbpos == 705:
orb_pos = 'Eutelsat W5'
elif orbpos == 685:
orb_pos = 'Intelsat'
elif orbpos == 620:
orb_pos = 'Intelsat 902'
elif orbpos == 600:
orb_pos = 'Intelsat 904'
elif orbpos == 570:
orb_pos = 'Nss'
elif orbpos == 530:
orb_pos = 'Express AM22'
elif orbpos == 480:
orb_pos = 'Eutelsat 2F2'
elif orbpos == 450:
orb_pos = 'Intelsat'
elif orbpos == 420:
orb_pos = 'Turksat 2A'
elif orbpos == 400:
orb_pos = 'Express AM1'
elif orbpos == 390:
orb_pos = 'Hellas Sat 2'
elif orbpos == 380:
orb_pos = 'Paksat 1'
elif orbpos == 360:
orb_pos = 'Eutelsat Sesat'
elif orbpos == 335:
orb_pos = 'Astra 1M'
elif orbpos == 330:
orb_pos = 'Eurobird 3'
elif orbpos == 328:
orb_pos = 'Galaxy 11'
elif orbpos == 315:
orb_pos = 'Astra 5A'
elif orbpos == 310:
orb_pos = 'Turksat'
elif orbpos == 305:
orb_pos = 'Arabsat'
elif orbpos == 285:
orb_pos = 'Eurobird 1'
elif orbpos == 284:
orb_pos = 'Eurobird/Astra'
elif orbpos == 282:
orb_pos = 'Eurobird/Astra'
elif orbpos == 1220:
orb_pos = 'AsiaSat'
elif orbpos == 1380:
orb_pos = 'Telstar 18'
elif orbpos == 260:
orb_pos = 'Badr 3/4'
elif orbpos == 255:
orb_pos = 'Eurobird 2'
elif orbpos == 215:
orb_pos = 'Eutelsat'
elif orbpos == 216:
orb_pos = 'Eutelsat W6'
elif orbpos == 210:
orb_pos = 'AfriStar 1'
elif orbpos == 160:
orb_pos = 'Eutelsat W2'
elif orbpos == 100:
orb_pos = 'Eutelsat W1'
elif orbpos == 90:
orb_pos = 'Eurobird 9'
elif orbpos == 70:
orb_pos = 'Eutelsat W3A'
elif orbpos == 50:
orb_pos = 'Sirius 4'
elif orbpos == 48:
orb_pos = 'Sirius 4'
elif orbpos == 30:
orb_pos = 'Telecom 2'
else:
orb_pos = str((float(orbpos)) / 10.0) + "E"
return orb_pos
def createProviderName(self,info):
return info.getInfoString(iServiceInformation.sProvider)
@cached
def getText(self):
service = self.source.service
if service is None:
return ""
info = service and service.info()
if not info:
return ""
if self.type == "CryptoInfo":
self.getCryptoInfo(info)
if int(config.usage.show_cryptoinfo.getValue()) > 0:
return addspace(self.createCryptoBar(info)) + self.createCryptoSpecial(info)
else:
return addspace(self.createCryptoBar(info)) + addspace(self.current_source) + self.createCryptoSpecial(info)
if self.type == "CryptoBar":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoBar(info)
else:
return ""
if self.type == "CryptoSeca":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoSeca(info)
else:
return ""
if self.type == "CryptoVia":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoVia(info)
else:
return ""
if self.type == "CryptoIrdeto":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoIrdeto(info)
else:
return ""
if self.type == "CryptoNDS":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoNDS(info)
else:
return ""
if self.type == "CryptoConax":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoConax(info)
else:
return ""
if self.type == "CryptoCryptoW":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoCryptoW(info)
else:
return ""
if self.type == "CryptoBeta":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoBeta(info)
else:
return ""
if self.type == "CryptoNagra":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoNagra(info)
else:
return ""
if self.type == "CryptoBiss":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoBiss(info)
else:
return ""
if self.type == "CryptoDre":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoDre(info)
else:
return ""
if self.type == "CryptoSpecial":
if int(config.usage.show_cryptoinfo.getValue()) > 0:
self.getCryptoInfo(info)
return self.createCryptoSpecial(info)
else:
return ""
if self.type == "ResolutionString":
return self.createResolution(info)
if self.type == "VideoCodec":
return self.createVideoCodec(info)
if self.updateFEdata:
feinfo = service.frontendInfo()
if feinfo:
self.feraw = feinfo.getAll(True)
if self.feraw:
self.fedata = ConvertToHumanReadable(self.feraw)
feraw = self.feraw
fedata = self.fedata
if not feraw or not fedata:
return ""
if self.type == "All":
self.getCryptoInfo(info)
if int(config.usage.show_cryptoinfo.getValue()) > 0:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata,feraw) + "\n"\
+ addspace(self.createCryptoBar(info)) + addspace(self.createCryptoSpecial(info)) + "\n"\
+ addspace(self.createPIDInfo(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
else:
return addspace(self.createProviderName(info)) + self.createTransponderInfo(fedata,feraw) + "\n" \
+ addspace(self.createCryptoBar(info)) + self.current_source + "\n" \
+ addspace(self.createCryptoSpecial(info)) + addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "ServiceInfo":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createFrequency(fedata)) + addspace(self.createPolarization(fedata))\
+ addspace(self.createSymbolRate(fedata)) + addspace(self.createFEC(fedata)) + addspace(self.createModulation(fedata)) + addspace(self.createOrbPos(feraw))\
+ addspace(self.createVideoCodec(info)) + self.createResolution(info)
if self.type == "TransponderInfo2line":
return addspace(self.createProviderName(info)) + addspace(self.createTunerSystem(fedata)) + addspace(self.createTransponderName(feraw)) + '\n'\
+ self.createFrequency(fedata) + addspace(" MHz") + addspace(self.createPolarization(fedata))\
+ addspace(self.createSymbolRate(fedata)) + self.createModulation(fedata) + '-' + addspace(self.createFEC(fedata))
if self.type == "TransponderInfo":
return self.createTransponderInfo(fedata,feraw)
if self.type == "TransponderFrequency":
return self.createFrequency(fedata)
if self.type == "TransponderSymbolRate":
return self.createSymbolRate(fedata)
if self.type == "TransponderPolarization":
return self.createPolarization(fedata)
if self.type == "TransponderFEC":
return self.createFEC(fedata)
if self.type == "TransponderModulation":
return self.createModulation(fedata)
if self.type == "OrbitalPosition":
return self.createOrbPos(feraw)
if self.type == "TunerType":
return self.createTunerType(feraw)
if self.type == "TunerSystem":
return self.createTunerSystem(fedata)
if self.type == "OrbitalPositionOrTunerSystem":
return self.createOrbPosOrTunerSystem(fedata,feraw)
if self.type == "PIDInfo":
return self.createPIDInfo(info)
return _("invalid type")
text = property(getText)
@cached
def getBool(self):
service = self.source.service
info = service and service.info()
if not info:
return False
request_caid = None
for x in self.ca_table:
if x[0] == self.type:
request_caid = x[1]
request_selected = x[2]
break
if request_caid is None:
return False
if info.getInfo(iServiceInformation.sIsCrypted) != 1:
return False
data = self.ecmdata.getEcmData()
if data is None:
return False
current_caid = data[1]
available_caids = info.getInfoObject(iServiceInformation.sCAIDs)
for caid_entry in self.caid_data:
if caid_entry[3] == request_caid:
if(request_selected):
if int(current_caid, 16) >= int(caid_entry[0], 16) and int(current_caid, 16) <= int(caid_entry[1], 16):
return True
else: # request available
try:
for caid in available_caids:
if caid >= int(caid_entry[0], 16) and caid <= int(caid_entry[1], 16):
return True
except:
pass
return False
boolean = property(getBool)
def changed(self, what):
if what[0] == self.CHANGED_SPECIFIC:
self.updateFEdata = False
if what[1] == iPlayableService.evNewProgramInfo:
self.updateFEdata = True
if what[1] == iPlayableService.evEnd:
self.feraw = self.fedata = None
Converter.changed(self, what)
elif what[0] == self.CHANGED_POLL and self.updateFEdata is not None:
self.updateFEdata = False
Converter.changed(self, what)
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Utility classes for Mutagen.
You should not rely on the interfaces here being stable. They are
intended for internal use in Mutagen only.
"""
import sys
import struct
import codecs
import errno
import decimal
from io import BytesIO
from collections import namedtuple
from contextlib import contextmanager
from functools import wraps
from fnmatch import fnmatchcase
_DEFAULT_BUFFER_SIZE = 2 ** 18
def endswith(text, end):
# usefull for paths which can be both, str and bytes
if isinstance(text, str):
if not isinstance(end, str):
end = end.decode("ascii")
else:
if not isinstance(end, bytes):
end = end.encode("ascii")
return text.endswith(end)
def reraise(tp, value, tb):
raise tp(value).with_traceback(tb)
def bchr(x):
return bytes([x])
def iterbytes(b):
return (bytes([v]) for v in b)
def intround(value):
"""Given a float returns a rounded int. Should give the same result on
both Py2/3
"""
return int(decimal.Decimal.from_float(
value).to_integral_value(decimal.ROUND_HALF_EVEN))
def is_fileobj(fileobj):
"""Returns:
bool: if an argument passed ot mutagen should be treated as a
file object
"""
return not (isinstance(fileobj, (str, bytes)) or
hasattr(fileobj, "__fspath__"))
def verify_fileobj(fileobj, writable=False):
"""Verifies that the passed fileobj is a file like object which
we can use.
Args:
writable (bool): verify that the file object is writable as well
Raises:
ValueError: In case the object is not a file object that is readable
(or writable if required) or is not opened in bytes mode.
"""
try:
data = fileobj.read(0)
except Exception:
if not hasattr(fileobj, "read"):
raise ValueError("%r not a valid file object" % fileobj)
raise ValueError("Can't read from file object %r" % fileobj)
if not isinstance(data, bytes):
raise ValueError(
"file object %r not opened in binary mode" % fileobj)
if writable:
try:
fileobj.write(b"")
except Exception:
if not hasattr(fileobj, "write"):
raise ValueError("%r not a valid file object" % fileobj)
raise ValueError("Can't write to file object %r" % fileobj)
def verify_filename(filename):
"""Checks of the passed in filename has the correct type.
Raises:
ValueError: if not a filename
"""
if is_fileobj(filename):
raise ValueError("%r not a filename" % filename)
def fileobj_name(fileobj):
"""
Returns:
text: A potential filename for a file object. Always a valid
path type, but might be empty or non-existent.
"""
value = getattr(fileobj, "name", u"")
if not isinstance(value, (str, bytes)):
value = str(value)
return value
def loadfile(method=True, writable=False, create=False):
"""A decorator for functions taking a `filething` as a first argument.
Passes a FileThing instance as the first argument to the wrapped function.
Args:
method (bool): If the wrapped functions is a method
writable (bool): If a filename is passed opens the file readwrite, if
passed a file object verifies that it is writable.
create (bool): If passed a filename that does not exist will create
a new empty file.
"""
def convert_file_args(args, kwargs):
filething = args[0] if args else None
filename = kwargs.pop("filename", None)
fileobj = kwargs.pop("fileobj", None)
return filething, filename, fileobj, args[1:], kwargs
def wrap(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
filething, filename, fileobj, args, kwargs = \
convert_file_args(args, kwargs)
with _openfile(self, filething, filename, fileobj,
writable, create) as h:
return func(self, h, *args, **kwargs)
@wraps(func)
def wrapper_func(*args, **kwargs):
filething, filename, fileobj, args, kwargs = \
convert_file_args(args, kwargs)
with _openfile(None, filething, filename, fileobj,
writable, create) as h:
return func(h, *args, **kwargs)
return wrapper if method else wrapper_func
return wrap
def convert_error(exc_src, exc_dest):
"""A decorator for reraising exceptions with a different type.
Mostly useful for IOError.
Args:
exc_src (type): The source exception type
exc_dest (type): The target exception type.
"""
def wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exc_dest:
raise
except exc_src as err:
reraise(exc_dest, err, sys.exc_info()[2])
return wrapper
return wrap
FileThing = namedtuple("FileThing", ["fileobj", "filename", "name"])
"""filename is None if the source is not a filename. name is a filename which
can be used for file type detection
"""
@contextmanager
def _openfile(instance, filething, filename, fileobj, writable, create):
"""yields a FileThing
Args:
filething: Either a file name, a file object or None
filename: Either a file name or None
fileobj: Either a file object or None
writable (bool): if the file should be opened
create (bool): if the file should be created if it doesn't exist.
implies writable
Raises:
MutagenError: In case opening the file failed
TypeError: in case neither a file name or a file object is passed
"""
assert not create or writable
# to allow stacked context managers, just pass the result through
if isinstance(filething, FileThing):
filename = filething.filename
fileobj = filething.fileobj
filething = None
if filething is not None:
if is_fileobj(filething):
fileobj = filething
elif hasattr(filething, "__fspath__"):
filename = filething.__fspath__()
if not isinstance(filename, (bytes, str)):
raise TypeError("expected __fspath__() to return a filename")
else:
filename = filething
if instance is not None:
# XXX: take "not writable" as loading the file..
if not writable:
instance.filename = filename
elif filename is None:
filename = getattr(instance, "filename", None)
if fileobj is not None:
verify_fileobj(fileobj, writable=writable)
yield FileThing(fileobj, filename, filename or fileobj_name(fileobj))
elif filename is not None:
verify_filename(filename)
inmemory_fileobj = False
try:
fileobj = open(filename, "rb+" if writable else "rb")
except IOError as e:
if writable and e.errno == errno.EOPNOTSUPP:
# Some file systems (gvfs over fuse) don't support opening
# files read/write. To make things still work read the whole
# file into an in-memory file like object and write it back
# later.
# https://github.com/quodlibet/mutagen/issues/300
try:
with open(filename, "rb") as fileobj:
fileobj = BytesIO(fileobj.read())
except IOError as e2:
raise MutagenError(e2)
inmemory_fileobj = True
elif create and e.errno == errno.ENOENT:
assert writable
try:
fileobj = open(filename, "wb+")
except IOError as e2:
raise MutagenError(e2)
else:
raise MutagenError(e)
with fileobj as fileobj:
yield FileThing(fileobj, filename, filename)
if inmemory_fileobj:
assert writable
data = fileobj.getvalue()
try:
with open(filename, "wb") as fileobj:
fileobj.write(data)
except IOError as e:
raise MutagenError(e)
else:
raise TypeError("Missing filename or fileobj argument")
class MutagenError(Exception):
"""Base class for all custom exceptions in mutagen
.. versionadded:: 1.25
"""
__module__ = "mutagen"
def total_ordering(cls):
"""Adds all possible ordering methods to a class.
Needs a working __eq__ and __lt__ and will supply the rest.
"""
assert "__eq__" in cls.__dict__
assert "__lt__" in cls.__dict__
cls.__le__ = lambda self, other: self == other or self < other
cls.__gt__ = lambda self, other: not (self == other or self < other)
cls.__ge__ = lambda self, other: not self < other
cls.__ne__ = lambda self, other: not self.__eq__(other)
return cls
def hashable(cls):
"""Makes sure the class is hashable.
Needs a working __eq__ and __hash__ and will add a __ne__.
"""
assert cls.__dict__["__hash__"] is not None
assert "__eq__" in cls.__dict__
cls.__ne__ = lambda self, other: not self.__eq__(other)
return cls
def enum(cls):
"""A decorator for creating an int enum class.
Makes the values a subclass of the type and implements repr/str.
The new class will be a subclass of int.
Args:
cls (type): The class to convert to an enum
Returns:
type: A new class
::
@enum
class Foo(object):
FOO = 1
BAR = 2
"""
assert cls.__bases__ == (object,)
d = dict(cls.__dict__)
new_type = type(cls.__name__, (int,), d)
new_type.__module__ = cls.__module__
map_ = {}
for key, value in d.items():
if key.upper() == key and isinstance(value, int):
value_instance = new_type(value)
setattr(new_type, key, value_instance)
map_[value] = key
def str_(self):
if self in map_:
return "%s.%s" % (type(self).__name__, map_[self])
return "%d" % int(self)
def repr_(self):
if self in map_:
return "<%s.%s: %d>" % (type(self).__name__, map_[self], int(self))
return "%d" % int(self)
setattr(new_type, "__repr__", repr_)
setattr(new_type, "__str__", str_)
return new_type
def flags(cls):
"""A decorator for creating an int flags class.
Makes the values a subclass of the type and implements repr/str.
The new class will be a subclass of int.
Args:
cls (type): The class to convert to an flags
Returns:
type: A new class
::
@flags
class Foo(object):
FOO = 1
BAR = 2
"""
assert cls.__bases__ == (object,)
d = dict(cls.__dict__)
new_type = type(cls.__name__, (int,), d)
new_type.__module__ = cls.__module__
map_ = {}
for key, value in d.items():
if key.upper() == key and isinstance(value, int):
value_instance = new_type(value)
setattr(new_type, key, value_instance)
map_[value] = key
def str_(self):
value = int(self)
matches = []
for k, v in map_.items():
if value & k:
matches.append("%s.%s" % (type(self).__name__, v))
value &= ~k
if value != 0 or not matches:
matches.append(str(value))
return " | ".join(matches)
def repr_(self):
return "<%s: %d>" % (str(self), int(self))
setattr(new_type, "__repr__", repr_)
setattr(new_type, "__str__", str_)
return new_type
@total_ordering
class DictMixin(object):
"""Implement the dict API using keys() and __*item__ methods.
Similar to UserDict.DictMixin, this takes a class that defines
__getitem__, __setitem__, __delitem__, and keys(), and turns it
into a full dict-like object.
UserDict.DictMixin is not suitable for this purpose because it's
an old-style class.
This class is not optimized for very large dictionaries; many
functions have linear memory requirements. I recommend you
override some of these functions if speed is required.
"""
def __iter__(self):
return iter(self.keys())
def __has_key(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
__contains__ = __has_key
def values(self):
return [self[k] for k in self.keys()]
def items(self):
return list(zip(self.keys(), self.values()))
def clear(self):
for key in list(self.keys()):
self.__delitem__(key)
def pop(self, key, *args):
if len(args) > 1:
raise TypeError("pop takes at most two arguments")
try:
value = self[key]
except KeyError:
if args:
return args[0]
else:
raise
del(self[key])
return value
def popitem(self):
for key in self.keys():
break
else:
raise KeyError("dictionary is empty")
return key, self.pop(key)
def update(self, other=None, **kwargs):
if other is None:
self.update(kwargs)
other = {}
try:
for key, value in other.items():
self.__setitem__(key, value)
except AttributeError:
for key, value in other:
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.items()))
def __eq__(self, other):
return dict(self.items()) == other
def __lt__(self, other):
return dict(self.items()) < other
__hash__ = object.__hash__
def __len__(self):
return len(self.keys())
class DictProxy(DictMixin):
def __init__(self, *args, **kwargs):
self.__dict = {}
super(DictProxy, self).__init__(*args, **kwargs)
def __getitem__(self, key):
return self.__dict[key]
def __setitem__(self, key, value):
self.__dict[key] = value
def __delitem__(self, key):
del(self.__dict[key])
def keys(self):
return self.__dict.keys()
def _fill_cdata(cls):
"""Add struct pack/unpack functions"""
funcs = {}
for key, name in [("b", "char"), ("h", "short"),
("i", "int"), ("q", "longlong")]:
for echar, esuffix in [("<", "le"), (">", "be")]:
esuffix = "_" + esuffix
for unsigned in [True, False]:
s = struct.Struct(echar + (key.upper() if unsigned else key))
get_wrapper = lambda f: lambda *a, **k: f(*a, **k)[0]
unpack = get_wrapper(s.unpack)
unpack_from = get_wrapper(s.unpack_from)
def get_unpack_from(s):
def unpack_from(data, offset=0):
return s.unpack_from(data, offset)[0], offset + s.size
return unpack_from
unpack_from = get_unpack_from(s)
pack = s.pack
prefix = "u" if unsigned else ""
if s.size == 1:
esuffix = ""
bits = str(s.size * 8)
if unsigned:
max_ = 2 ** (s.size * 8) - 1
min_ = 0
else:
max_ = 2 ** (s.size * 8 - 1) - 1
min_ = - 2 ** (s.size * 8 - 1)
funcs["%s%s_min" % (prefix, name)] = min_
funcs["%s%s_max" % (prefix, name)] = max_
funcs["%sint%s_min" % (prefix, bits)] = min_
funcs["%sint%s_max" % (prefix, bits)] = max_
funcs["%s%s%s" % (prefix, name, esuffix)] = unpack
funcs["%sint%s%s" % (prefix, bits, esuffix)] = unpack
funcs["%s%s%s_from" % (prefix, name, esuffix)] = unpack_from
funcs["%sint%s%s_from" % (prefix, bits, esuffix)] = unpack_from
funcs["to_%s%s%s" % (prefix, name, esuffix)] = pack
funcs["to_%sint%s%s" % (prefix, bits, esuffix)] = pack
for key, func in funcs.items():
setattr(cls, key, staticmethod(func))
class cdata(object):
"""C character buffer to Python numeric type conversions.
For each size/sign/endianness:
uint32_le(data)/to_uint32_le(num)/uint32_le_from(data, offset=0)
"""
error = struct.error
bitswap = b''.join(
bchr(sum(((val >> i) & 1) << (7 - i) for i in range(8)))
for val in range(256))
test_bit = staticmethod(lambda value, n: bool((value >> n) & 1))
_fill_cdata(cdata)
def get_size(fileobj):
"""Returns the size of the file.
The position when passed in will be preserved if no error occurs.
Args:
fileobj (fileobj)
Returns:
int: The size of the file
Raises:
IOError
"""
old_pos = fileobj.tell()
try:
fileobj.seek(0, 2)
return fileobj.tell()
finally:
fileobj.seek(old_pos, 0)
def read_full(fileobj, size):
"""Like fileobj.read but raises IOError if not all requested data is
returned.
If you want to distinguish IOError and the EOS case, better handle
the error yourself instead of using this.
Args:
fileobj (fileobj)
size (int): amount of bytes to read
Raises:
IOError: In case read fails or not enough data is read
"""
if size < 0:
raise ValueError("size must not be negative")
data = fileobj.read(size)
if len(data) != size:
raise IOError
return data
def seek_end(fileobj, offset):
"""Like fileobj.seek(-offset, 2), but will not try to go beyond the start
Needed since file objects from BytesIO will not raise IOError and
file objects from open() will raise IOError if going to a negative offset.
To make things easier for custom implementations, instead of allowing
both behaviors, we just don't do it.
Args:
fileobj (fileobj)
offset (int): how many bytes away from the end backwards to seek to
Raises:
IOError
"""
if offset < 0:
raise ValueError
if get_size(fileobj) < offset:
fileobj.seek(0, 0)
else:
fileobj.seek(-offset, 2)
def resize_file(fobj, diff, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Resize a file by `diff`.
New space will be filled with zeros.
Args:
fobj (fileobj)
diff (int): amount of size to change
Raises:
IOError
"""
fobj.seek(0, 2)
filesize = fobj.tell()
if diff < 0:
if filesize + diff < 0:
raise ValueError
# truncate flushes internally
fobj.truncate(filesize + diff)
elif diff > 0:
try:
while diff:
addsize = min(BUFFER_SIZE, diff)
fobj.write(b"\x00" * addsize)
diff -= addsize
fobj.flush()
except IOError as e:
if e.errno == errno.ENOSPC:
# To reduce the chance of corrupt files in case of missing
# space try to revert the file expansion back. Of course
# in reality every in-file-write can also fail due to COW etc.
# Note: IOError gets also raised in flush() due to buffering
fobj.truncate(filesize)
raise
def move_bytes(fobj, dest, src, count, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Moves data around using read()/write().
Args:
fileobj (fileobj)
dest (int): The destination offset
src (int): The source offset
count (int) The amount of data to move
Raises:
IOError: In case an operation on the fileobj fails
ValueError: In case invalid parameters were given
"""
if dest < 0 or src < 0 or count < 0:
raise ValueError
fobj.seek(0, 2)
filesize = fobj.tell()
if max(dest, src) + count > filesize:
raise ValueError("area outside of file")
if src > dest:
moved = 0
while count - moved:
this_move = min(BUFFER_SIZE, count - moved)
fobj.seek(src + moved)
buf = fobj.read(this_move)
fobj.seek(dest + moved)
fobj.write(buf)
moved += this_move
fobj.flush()
else:
while count:
this_move = min(BUFFER_SIZE, count)
fobj.seek(src + count - this_move)
buf = fobj.read(this_move)
fobj.seek(count + dest - this_move)
fobj.write(buf)
count -= this_move
fobj.flush()
def insert_bytes(fobj, size, offset, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Insert size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent.
Args:
fobj (fileobj)
size (int): The amount of space to insert
offset (int): The offset at which to insert the space
Raises:
IOError
"""
if size < 0 or offset < 0:
raise ValueError
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset
if movesize < 0:
raise ValueError
resize_file(fobj, size, BUFFER_SIZE)
move_bytes(fobj, offset + size, offset, movesize, BUFFER_SIZE)
def delete_bytes(fobj, size, offset, BUFFER_SIZE=_DEFAULT_BUFFER_SIZE):
"""Delete size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent.
Args:
fobj (fileobj)
size (int): The amount of space to delete
offset (int): The start of the space to delete
Raises:
IOError
"""
if size < 0 or offset < 0:
raise ValueError
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset - size
if movesize < 0:
raise ValueError
move_bytes(fobj, offset, offset + size, movesize, BUFFER_SIZE)
resize_file(fobj, -size, BUFFER_SIZE)
def resize_bytes(fobj, old_size, new_size, offset):
"""Resize an area in a file adding and deleting at the end of it.
Does nothing if no resizing is needed.
Args:
fobj (fileobj)
old_size (int): The area starting at offset
new_size (int): The new size of the area
offset (int): The start of the area
Raises:
IOError
"""
if new_size < old_size:
delete_size = old_size - new_size
delete_at = offset + new_size
delete_bytes(fobj, delete_size, delete_at)
elif new_size > old_size:
insert_size = new_size - old_size
insert_at = offset + old_size
insert_bytes(fobj, insert_size, insert_at)
def dict_match(d, key, default=None):
"""Like __getitem__ but works as if the keys() are all filename patterns.
Returns the value of any dict key that matches the passed key.
Args:
d (dict): A dict with filename patterns as keys
key (str): A key potentially matching any of the keys
default (object): The object to return if no pattern matched the
passed in key
Returns:
object: The dict value where the dict key matched the passed in key.
Or default if there was no match.
"""
if key in d and "[" not in key:
return d[key]
else:
for pattern, value in d.items():
if fnmatchcase(key, pattern):
return value
return default
def encode_endian(text, encoding, errors="strict", le=True):
"""Like text.encode(encoding) but always returns little endian/big endian
BOMs instead of the system one.
Args:
text (text)
encoding (str)
errors (str)
le (boolean): if little endian
Returns:
bytes
Raises:
UnicodeEncodeError
LookupError
"""
encoding = codecs.lookup(encoding).name
if encoding == "utf-16":
if le:
return codecs.BOM_UTF16_LE + text.encode("utf-16-le", errors)
else:
return codecs.BOM_UTF16_BE + text.encode("utf-16-be", errors)
elif encoding == "utf-32":
if le:
return codecs.BOM_UTF32_LE + text.encode("utf-32-le", errors)
else:
return codecs.BOM_UTF32_BE + text.encode("utf-32-be", errors)
else:
return text.encode(encoding, errors)
def decode_terminated(data, encoding, strict=True):
"""Returns the decoded data until the first NULL terminator
and all data after it.
Args:
data (bytes): data to decode
encoding (str): The codec to use
strict (bool): If True will raise ValueError in case no NULL is found
but the available data decoded successfully.
Returns:
Tuple[`text`, `bytes`]: A tuple containing the decoded text and the
remaining data after the found NULL termination.
Raises:
UnicodeError: In case the data can't be decoded.
LookupError:In case the encoding is not found.
ValueError: In case the data isn't null terminated (even if it is
encoded correctly) except if strict is False, then the decoded
string will be returned anyway.
"""
codec_info = codecs.lookup(encoding)
# normalize encoding name so we can compare by name
encoding = codec_info.name
# fast path
if encoding in ("utf-8", "iso8859-1"):
index = data.find(b"\x00")
if index == -1:
# make sure we raise UnicodeError first, like in the slow path
res = data.decode(encoding), b""
if strict:
raise ValueError("not null terminated")
else:
return res
return data[:index].decode(encoding), data[index + 1:]
# slow path
decoder = codec_info.incrementaldecoder()
r = []
for i, b in enumerate(iterbytes(data)):
c = decoder.decode(b)
if c == u"\x00":
return u"".join(r), data[i + 1:]
r.append(c)
else:
# make sure the decoder is finished
r.append(decoder.decode(b"", True))
if strict:
raise ValueError("not null terminated")
return u"".join(r), b""
class BitReaderError(Exception):
pass
class BitReader(object):
def __init__(self, fileobj):
self._fileobj = fileobj
self._buffer = 0
self._bits = 0
self._pos = fileobj.tell()
def bits(self, count):
"""Reads `count` bits and returns an uint, MSB read first.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object.
"""
if count < 0:
raise ValueError
if count > self._bits:
n_bytes = (count - self._bits + 7) // 8
data = self._fileobj.read(n_bytes)
if len(data) != n_bytes:
raise BitReaderError("not enough data")
for b in bytearray(data):
self._buffer = (self._buffer << 8) | b
self._bits += n_bytes * 8
self._bits -= count
value = self._buffer >> self._bits
self._buffer &= (1 << self._bits) - 1
assert self._bits < 8
return value
def bytes(self, count):
"""Returns a bytearray of length `count`. Works unaligned."""
if count < 0:
raise ValueError
# fast path
if self._bits == 0:
data = self._fileobj.read(count)
if len(data) != count:
raise BitReaderError("not enough data")
return data
return bytes(bytearray(self.bits(8) for _ in range(count)))
def skip(self, count):
"""Skip `count` bits.
Might raise BitReaderError if there wasn't enough data to skip,
but might also fail on the next bits() instead.
"""
if count < 0:
raise ValueError
if count <= self._bits:
self.bits(count)
else:
count -= self.align()
n_bytes = count // 8
self._fileobj.seek(n_bytes, 1)
count -= n_bytes * 8
self.bits(count)
def get_position(self):
"""Returns the amount of bits read or skipped so far"""
return (self._fileobj.tell() - self._pos) * 8 - self._bits
def align(self):
"""Align to the next byte, returns the amount of bits skipped"""
bits = self._bits
self._buffer = 0
self._bits = 0
return bits
def is_aligned(self):
"""If we are currently aligned to bytes and nothing is buffered"""
return self._bits == 0
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for iBMC Power interface."""
from unittest import mock
from oslo_utils import importutils
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules.ibmc import mappings
from ironic.drivers.modules.ibmc import utils
from ironic.tests.unit.drivers.modules.ibmc import base
constants = importutils.try_import('ibmc_client.constants')
ibmc_client = importutils.try_import('ibmc_client')
ibmc_error = importutils.try_import('ibmc_client.exceptions')
@mock.patch('oslo_utils.eventletutils.EventletEvent.wait',
lambda *args, **kwargs: None)
class IBMCPowerTestCase(base.IBMCTestCase):
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
properties = task.driver.get_properties()
for prop in utils.COMMON_PROPERTIES:
self.assertIn(prop, properties)
@mock.patch.object(utils, 'parse_driver_info', autospec=True)
def test_validate(self, mock_parse_driver_info):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.power.validate(task)
mock_parse_driver_info.assert_called_once_with(task.node)
@mock.patch.object(ibmc_client, 'connect', spec=object)
def test_get_power_state(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
expected_values = mappings.GET_POWER_STATE_MAP
for current, expected in expected_values.items():
# Mock
conn.system.get.return_value = mock.Mock(
power_state=current
)
# Asserts
self.assertEqual(expected,
task.driver.power.get_power_state(task))
conn.system.get.assert_called_once()
connect_ibmc.assert_called_once_with(**self.ibmc)
# Reset Mock
conn.system.get.reset_mock()
connect_ibmc.reset_mock()
@mock.patch.object(ibmc_client, 'connect', spec=object)
def test_set_power_state(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
state_mapping = mappings.SET_POWER_STATE_MAP
for (expect_state, reset_type) in state_mapping.items():
if expect_state in (states.POWER_OFF, states.SOFT_POWER_OFF):
final = constants.SYSTEM_POWER_STATE_OFF
transient = constants.SYSTEM_POWER_STATE_ON
else:
final = constants.SYSTEM_POWER_STATE_ON
transient = constants.SYSTEM_POWER_STATE_OFF
# Mocks
mock_system_get_results = (
[mock.Mock(power_state=transient)] * 3
+ [mock.Mock(power_state=final)])
conn.system.get.side_effect = mock_system_get_results
task.driver.power.set_power_state(task, expect_state)
# Asserts
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.reset.assert_called_once_with(reset_type)
self.assertEqual(4, conn.system.get.call_count)
# Reset Mocks
# TODO(Qianbiao.NG) why reset_mock does not reset call_count
connect_ibmc.reset_mock()
conn.system.get.reset_mock()
conn.system.reset.reset_mock()
@mock.patch.object(ibmc_client, 'connect', spec=object)
def test_set_power_state_not_reached(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.config(power_state_change_timeout=2, group='conductor')
state_mapping = mappings.SET_POWER_STATE_MAP
for (expect_state, reset_type) in state_mapping.items():
if expect_state in (states.POWER_OFF, states.SOFT_POWER_OFF):
final = constants.SYSTEM_POWER_STATE_OFF
transient = constants.SYSTEM_POWER_STATE_ON
else:
final = constants.SYSTEM_POWER_STATE_ON
transient = constants.SYSTEM_POWER_STATE_OFF
# Mocks
mock_system_get_results = (
[mock.Mock(power_state=transient)] * 5
+ [mock.Mock(power_state=final)])
conn.system.get.side_effect = mock_system_get_results
self.assertRaises(exception.PowerStateFailure,
task.driver.power.set_power_state,
task, expect_state)
# Asserts
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.reset.assert_called_once_with(reset_type)
# Reset Mocks
connect_ibmc.reset_mock()
conn.system.get.reset_mock()
conn.system.reset.reset_mock()
@mock.patch.object(ibmc_client, 'connect', spec=object)
def test_set_power_state_fail(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# Mocks
conn.system.reset.side_effect = (
ibmc_error.IBMCClientError
)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
# Asserts
self.assertRaisesRegex(
exception.IBMCError, 'set iBMC power state',
task.driver.power.set_power_state, task, states.POWER_ON)
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.reset.assert_called_once_with(constants.RESET_ON)
@mock.patch.object(ibmc_client, 'connect', spec=object)
def test_set_power_state_timeout(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.config(power_state_change_timeout=2, group='conductor')
# Mocks
conn.system.get.side_effect = (
[mock.Mock(power_state=constants.SYSTEM_POWER_STATE_OFF)] * 3
)
# Asserts
self.assertRaisesRegex(
exception.PowerStateFailure,
'Failed to set node power state to power on',
task.driver.power.set_power_state, task, states.POWER_ON)
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.reset.assert_called_once_with(constants.RESET_ON)
def test_get_supported_power_states(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
supported_power_states = (
task.driver.power.get_supported_power_states(task))
self.assertEqual(sorted(list(mappings.SET_POWER_STATE_MAP)),
sorted(supported_power_states))
@mock.patch('oslo_utils.eventletutils.EventletEvent.wait',
lambda *args, **kwargs: None)
class IBMCPowerRebootTestCase(base.IBMCTestCase):
@mock.patch.object(ibmc_client, 'connect', spec=object)
def test_reboot(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
expected_values = [
(constants.SYSTEM_POWER_STATE_OFF, constants.RESET_ON),
(constants.SYSTEM_POWER_STATE_ON,
constants.RESET_FORCE_RESTART)
]
# for (expect_state, reset_type) in state_mapping.items():
for current, reset_type in expected_values:
mock_system_get_results = [
# Initial state
mock.Mock(power_state=current),
# Transient state - powering off
mock.Mock(power_state=constants.SYSTEM_POWER_STATE_OFF),
# Final state - down powering off
mock.Mock(power_state=constants.SYSTEM_POWER_STATE_ON)
]
conn.system.get.side_effect = mock_system_get_results
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.reboot(task)
# Asserts
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.reset.assert_called_once_with(reset_type)
# Reset Mocks
connect_ibmc.reset_mock()
conn.system.get.reset_mock()
conn.system.reset.reset_mock()
@mock.patch.object(ibmc_client, 'connect', spec=object)
def test_reboot_not_reached(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
# Mocks
conn.system.get.return_value = mock.Mock(
power_state=constants.SYSTEM_POWER_STATE_OFF)
self.assertRaisesRegex(
exception.PowerStateFailure,
'Failed to set node power state to power on',
task.driver.power.reboot, task)
# Asserts
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.reset.assert_called_once_with(constants.RESET_ON)
@mock.patch.object(ibmc_client, 'connect', spec=object)
def test_reboot_fail(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# Mocks
conn.system.reset.side_effect = (
ibmc_error.IBMCClientError
)
conn.system.get.return_value = mock.Mock(
power_state=constants.SYSTEM_POWER_STATE_ON
)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
# Asserts
self.assertRaisesRegex(
exception.IBMCError, 'reboot iBMC',
task.driver.power.reboot, task)
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.get.assert_called_once()
conn.system.reset.assert_called_once_with(
constants.RESET_FORCE_RESTART)
@mock.patch.object(ibmc_client, 'connect', spec=object)
def test_reboot_timeout(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# Mocks
conn.system.get.side_effect = [mock.Mock(
power_state=constants.SYSTEM_POWER_STATE_OFF
)] * 5
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.config(power_state_change_timeout=2, group='conductor')
# Asserts
self.assertRaisesRegex(
exception.PowerStateFailure,
'Failed to set node power state to power on',
task.driver.power.reboot, task)
# Asserts
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.reset.assert_called_once_with(
constants.RESET_ON)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
#from morphforge.morphology.core import MorphPath
from morphforge.core import LocMgr
from morphforge.morphology.visitor import SectionIndexerDF
#from morphforge.simulation.neuron.objects.neuronrecordable import NEURONRecordableOnLocation
from random import randint
import pylab
class SummariserOptions(object):
def __init__(self):
self.include_overview = True
self.include_overview_neuron_to_neuron_diagram = True
self.include_overview_individual_neuron_table = True
self.include_overview_individual_synapse_table = True
self.include_overview_individual_stimuli = True
self.include_details = True
self.include_details_indvidual_neuron = True
self.include_details_indvidual_neuron_morphology = True
self.include_details_indvidual_neuron_morphology_mpl = True
"""
DocumentLayout:
Root:
/Simulation Overview
/Overview Diagram
/Input Summary
/KeyTraces
/Details:
/Entire Population1
/Cells:
/Population1
/Cell1, Cell2
/Population2
/Cell1, Cell2
/Chemical Synapses:
All Details: (By type)
By-Presynaptic
By-Post Synaptic
/Gap Junctions
All Details
/Stimulations
/Current Clamps
/Voltage Clamps
/Channel Dynamics
/Synaptic Dynamics
/Platform Information
"""
from morphforge.simulation.base import Simulation
# from morphforge.management import PluginMgr
try:
import mredoc as mrd
except ImportError:
print 'Unable to import mredoc, you will be unable to produce pdf/html summaries'
class SummariserObject(object):
@classmethod
def build(cls, obj):
raise NotImplementedError()
class SummariserLibrary(object):
summarisers = {}
@classmethod
def register_summariser(cls, channel_baseclass, summariser_class):
# Add it to the dictionary of summarisers:
cls.summarisers[channel_baseclass] = summariser_class
@classmethod
def get_summarisier(cls, obj):
possible_summarisers = []
for (ChlType, summarisier) in cls.summarisers.iteritems():
if issubclass(type(obj), ChlType):
possible_summarisers.append(summarisier)
if len(possible_summarisers) == 0:
return None
if len(possible_summarisers) == 1:
return possible_summarisers[0]
else:
assert False, 'I have to many options for summarising: ' \
+ str(obj)
class _DotSummaryUtils(object):
@classmethod
def save_dot(cls, graph, format, **kwargs):
from morphforge.core import ObjectLabeller
name = ObjectLabeller.get_next_unamed_object_name(type(graph))
tmp_dir = LocMgr.get_tmp_path()
fname = '%s/dotout_%s.%s' % (tmp_dir, name, format)
graph.write_pdf(fname, **kwargs)
return fname
class SimulationMRedoc(object):
@classmethod
def build(cls, obj, options=None):
from morphforge.simulation.base import SimulationResult
from morphforge.stdimports import TagViewer
result = None
if isinstance( obj, SimulationResult):
result = obj
obj = obj.simulation
elif isinstance( obj, Simulation):
pass
elif isinstance(list):
assert False
else:
assert False, "Unexpected object passed to SimulationMRedoc: %s" %(obj)
if options is None:
options = SummariserOptions()
sim_redoc = SimulationMRedoc(obj, options=options).mredoc
if result is None:
return sim_redoc
else:
return mrd.Section('Simulation Summary',
sim_redoc,
mrd.Section("Results",
mrd.Image( TagViewer(result).fig.fig, fig_size=(6,3), max_font_size=8, subplots_adjust={'left':0.25,'right':0.95}),
)
)
def __init__(self, obj, options):
assert isinstance(obj, Simulation)
self.sim = obj
self.options = options
self.mredoc = self.build_simulation()
def build_simulation(self):
title = 'Simulation Summary: %s'%self.sim._sim_desc_str()
return mrd.Section(title,
mrd.TableOfContents(),
self.build_simulation_overview(),
self.build_simulation_details(),
#PluginMgr.summarise_all(),
)
def build_simulation_overview(self):
return mrd.SectionNewPage('Simulation Overview',
self.build_population_overview(),
self.build_population_complete_dot(),
self.build_singlecell_overview())
def build_simulation_details(self):
return mrd.SectionNewPage("Simulation Details",
self.build_singlecell_details(),
self.build_population_details(),
self.build_details_channels(),
self.build_details_synaptic_templates(),
)
# Overview in terms of populations
# -------------------------------------
# The details of the simulation:
def build_population_overview(self):
if not self.sim.neuron_populations:
return None
table = mrd.VerticalColTable(
"Population | Size | Type """,
[(pop.pop_name, len(pop), ",".join(pop.cell_types)) for pop in self.sim.neuron_populations]
)
table2 = mrd.VerticalColTable(
"Population | Size | Type """,
[(pop.synapse_pop_name, len(pop), ",".join(pop.synapse_types)) for pop in self.sim.synapse_populations]
)
return mrd.Section("Population Overview",
table, table2,
#self.build_population_overview_dot(),
self.build_population_complete_dot()
)
def build_population_complete_dot(self):
return DOTWriter(self.sim).build_population_complete_dot()
@classmethod
def _build_population_cell_table(cls, population):
return cls._build_cell_table(cell_list=population)
@classmethod
def _build_cell_table(cls, cell_list):
table = mrd.VerticalColTable('Name|SA(um2)|\#sections(\#segs)|Regions(SA(um2):nseg)|\#Pre/post-synapse|\#Gap-juncs|Chls',
[(cell.name,
#cell.cell_type_str,
"%.0f" % (cell.morphology.surface_area),
"%d:%d" % (len(cell.morphology), cell.segmenter.get_num_segment_total(cell)),
" ".join(["%s(%d:%d)" % (rgn.name, rgn.surface_area, cell.segmenter.get_num_segment_region(rgn)) for rgn in cell.morphology.regions]),
"%d %d" % (len(cell.presynaptic_connections), len(cell.postsynaptic_connections)),
"%d" % len(cell.electrical_connections),
" ".join([chl.name for chl in cell.biophysics.get_all_channels_applied_to_cell()]),
) for cell in cell_list])
return table
@classmethod
def _build_synapse_table(cls, synapses_list):
table = mrd.VerticalColTable('Name|Trigger| PostSynaptic Cell|Receptor',
[(syn.name,
'%s'%( syn.get_trigger().get_summary_string() ),
'%s'%(syn.get_postsynaptic_mechanism().cell_location.get_location_description_str() ),
'%s'%(syn.get_postsynaptic_mechanism().get_summary_description() ),
) for syn in synapses_list])
return table
def build_population_details(self):
return mrd.Section('Population Details:',
*[self._build_population_details(pop) for pop in self.sim.neuron_populations]
)
def _build_population_details(self, pop):
return mrd.Section('Population: %s' % pop.pop_name,
self._build_population_cell_table(pop),
*[self.build_neuron_details(nrn) for nrn in pop]
)
# --------------------------------------------------------
# Single Cell Overview:
# --------------------------------------------------------
# The details of the simulation:
def build_singlecell_overview(self):
if self.sim.are_all_cells_in_pops:
return None
return mrd.HierachyScope(
self._build_singlecell_overview_cells(),
self._build_singlecell_overview_synapses(),
self._build_singlecell_overview_iclamps(),
self._build_singlecell_overview_vclamps())
def _build_singlecell_overview_cells(self):
return mrd.Section('Individual Cells',
self._build_cell_table(cell_list=self.sim.cells))
def _build_singlecell_overview_synapses(self):
return mrd.Section('Individual Synapses',
self._build_synapse_table(synapses_list=self.sim.synapses)
)
# Stim Tables:
def _build_singlecell_overview_stimtable(self, stims):
data = [(stim.name,
stim.location_summary_str,
stim.get_summary_description(),
) for stim in stims]
tbl = mrd.VerticalColTable('Name|Location|Description', data)
return tbl
def _build_singlecell_overview_iclamps(self):
return mrd.Section('Current Clamps',
self._build_singlecell_overview_stimtable(stims=self.sim.current_clamps))
def _build_singlecell_overview_vclamps(self):
return mrd.Section('Voltage Clamps',
self._build_singlecell_overview_stimtable(stims=self.sim.voltage_clamps))
def build_singlecell_details(self):
sub_sections = [self.build_neuron_details(nrn) for nrn in self.sim.cells]
return mrd.Section('Single Cell Details', *sub_sections)
@classmethod
def _build_details_channel(cls, chl):
sumcls = SummariserLibrary.get_summarisier(chl)
if not sumcls:
return mrd.Section('Summary of channel: %s <!! Summariser Missing !!>' % chl.name,
mrd.Paragraph('<Summariser Missing for type: %s>' % type(chl))
)
return sumcls.build(chl)
def build_details_channels(self):
channels = sorted( self.sim.get_all_channels(), key=lambda i: i.name)
return mrd.SectionNewPage('Channels Details',
*[ self._build_details_channel(chl) for chl in channels]
)
def _build_details_synaptic_templ(self,syn_tmpl):
#return 'Blah: %s' % syn.name
sumcls = SummariserLibrary.get_summarisier(syn_tmpl)
if not sumcls:
return mrd.Section('Summary of synaptic-template: %s <!! Summariser Missing !!>' % syn_tmpl.name,
mrd.Paragraph('<Summariser Missing for type: %s>' % type(syn_tmpl))
)
return sumcls.build(syn_tmpl)
def build_details_synaptic_templates(self):
synaptic_templates = self.sim.postsynaptic_templates
return mrd.SectionNewPage('Synaptic Template Details',
*[ self._build_details_synaptic_templ(syntemplate) for syntemplate in synaptic_templates]
)
# Individual Neuron details:
# -------------------------------
def _create_neuron_details_1_morphology(self, nrn):
morph = nrn.morphology
section_indexer = SectionIndexerDF(morph)
section_table = mrd.VerticalColTable(
'ID|Tags|Lateral Surface Area (um2)|Region|nseg|L|diam (prox/dist)',
[( section_indexer[sec],
sec.idtag,
'%.0f' % sec.area,
(sec.region.name if sec.region else ''),
nrn.cell_segmenter.get_num_segments(sec),
sec.length,
'%.1f/%.1f' % (sec.p_r * 2., sec.d_r * 2.)
) for sec in morph],
caption='%s:Morphology (Sections)' % nrn.name)
region_table = mrd.VerticalColTable(
'Region|Surface Area|\#Sections',
[(rgn.name, rgn.surface_area, len(rgn)) for rgn in nrn.morphology.regions],
caption='%s:Morphology (Regions)' % nrn.name
)
child_sections = [section_table, region_table]
# Include a picture with matplotlib?
if self.options.include_details_indvidual_neuron_morphology_mpl:
from morphforge.morphology.ui import MatPlotLibViewer
fig = MatPlotLibViewer(nrn.morphology, fig_kwargs={'figsize':(4, 4)}).fig
child_sections.append( mrd.Image(fig) )
return mrd.HierachyScope(*child_sections) #section_table, region_table, mrd.Image(fig), )
def _create_neuron_details_2b_pta(self, nrn):
passives = nrn.biophysics.get_applied_passives()
return mrd.VerticalColTable(
'PassiveProp|Priority|Targetter|Value',
[ (pta.passiveproperty,
pta.targetter.get_priority(),
pta.targetter.get_description(),
str(pta.value),
) for pta in passives],
caption='%s:Passive Properties' % nrn.name)
def _create_neuron_details_2_mta(self, nrn):
channels = nrn.biophysics.get_applied_mtas()
return mrd.VerticalColTable(
'Mechanism|Priority|Targetter|Applicator',
[ ( '%s ' % (mta.channel.name, ),
mta.targetter.get_priority(),
mta.targetter.get_description(),
mta.applicator.get_description(),
) for mta in channels],
caption='%s:Channels' % nrn.name)
def _create_neuron_details_3a_presynapses(self, nrn):
return mrd.VerticalColTable('Type|Distance From Soma', [],
caption='%s:Presynaptic Connections'
% nrn.name)
def _create_neuron_details_3b_postsynapses(self, nrn):
return mrd.VerticalColTable('Type|Distance From Soma', [],
caption='%s:Postsynaptic Connections'
% nrn.name)
def _create_neuron_details_3c_gapjunctions(self, nrn):
return mrd.VerticalColTable('Type|Distance From Soma', [],
caption='%s:Gap Junctions'
% nrn.name)
def _create_neuron_details_4_stimulation(self, nrn):
return mrd.VerticalColTable('Type|Distance From Soma', [],
caption='%s:Stimulation' % nrn.name)
def build_neuron_details(self, neuron):
#return mrd.SectionNewPage(
return mrd.Section(
'Neuron:%s' % neuron.name,
self._create_neuron_details_1_morphology(neuron),
self._create_neuron_details_2b_pta(neuron),
self._create_neuron_details_2_mta(neuron),
self._create_neuron_details_3a_presynapses(neuron),
self._create_neuron_details_3b_postsynapses(neuron),
self._create_neuron_details_3c_gapjunctions(neuron),
self._create_neuron_details_4_stimulation(neuron),
)
# -------------------------------
def build_connectivity_graph(synapse_pop, size=0.75):
import pylab
from matplotlib.ticker import MaxNLocator
prepop = synapse_pop.presynaptic_population
#if prepop:
# prepop_lut = prepop.build_cell_to_index_lut()
postpop = synapse_pop.postsynaptic_population
#postpop_lut = postpop.build_cell_to_index_lut()
connectivity = list()
for syn in synapse_pop:
if prepop:
pre_index = syn.get_presynaptic_cell().index_in_pop
else:
pre_index = 0
post_index = syn.get_postsynaptic_cell().index_in_pop
connectivity.append((pre_index, post_index))
prepop_len = (len(prepop) if prepop else 1)
postpop_len = len(postpop)
max_len = max( (prepop_len, postpop_len) )
figsize_raw =(size * (float(prepop_len)/max_len), size*(float(postpop_len)/max_len))
figsize = figsize_raw #figsize_raw[0]+0.75, figsize_raw[1]+0.75
print figsize
fig = pylab.figure(figsize=figsize, dpi=400 )
#ax = fig.add_subplot(1, 1, 1, aspect='equal')
ax = fig.add_axes([0, 0, 1, 1], aspect='equal')
xpts, ypts = zip(*connectivity)
ax.scatter(xpts, ypts, marker='s', s=7, edgecolors='none')
#ax.axis('equal')
ax.set_xlim(-0.5, prepop_len-0.5 )
ax.set_ylim(-0.5, postpop_len-0.5 )
ax.xaxis.set_major_locator(MaxNLocator(min(prepop_len, 3)))
ax.yaxis.set_major_locator(MaxNLocator(min(postpop_len, 3)))
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
#pylab.suptitle('Connectivity: %d synapses'%len(synapse_pop))
#pylab.show()
return fig
class DOTWriter(object):
def __init__(self, sim):
self.sim = sim
def build_population_complete_dot(self):
fig_count = 0
fig_out = '/tmp/dotimages/'
import pydot
graph = pydot.Dot('graphname', graph_type='digraph', size='7,7' , ratio='compress', compound='true', splines='true', sep='0.3' )
size = '0.55'
fontsize = '6'
kwargs_general = {
'fontsize': fontsize,
'fixedsize': 'True',
'width': size,
'height': size,
'fontname':'Helvetica'
}
cell_size='0.25'
kwargs_cell = { 'shape':'circle', 'fillcolor':'#80b3ffff', 'color':'#0066ffff', 'style':'filled', 'penwidth':'1', 'width':cell_size, 'height':cell_size }
kwargs_cc = {'shape':'circle', 'style':'filled', 'width':'0.05', }
kwargs_pop = {'style':'filled', 'color':'lightgrey', 'nodesep':'100' }
kwargs_synpop = {'shape':'none', 'fixedsize':'false' }
kwargs_synpop_img = {'shape':'square', 'labelloc':'b', 'scale':'false', 'fixedsize': 'true'}
kwargs_synpop_edge = {'penwidth':'3', 'color':'green', 'minlen':'50' }
kwargs_syn_post = { 'color':'darkgreen', 'penwidth':'1', 'arrowhead':'"tee"' }
kwargs_syn_pre_times = { 'color':'darkgreen',
'penwidth':'1',
'arrowhead':'"tee"' ,
'shape':'circle',
'color':'lightsalmon',
'style':'filled',
'width': '0.25',
'height': '0.25',
'fontsize': fontsize,
'fixedsize': 'True',
'fontname':'Helvetica'
}
# Map Simulation objects into dot objects:
obj2nodedict = {}
subgraphs = []
# Populations become subgraphs:
for population in self.sim.neuron_populations:
n = pydot.Cluster(population.pop_name, label=population.pop_name, **dict(kwargs_general.items() + kwargs_pop.items() ))
subgraphs.append(n)
obj2nodedict[population] = n
# Cells into Nodes
for cell in self.sim.cells:
n = pydot.Node(
cell.name,
label=cell.name if cell.population is None else '<%d>' % cell.index_in_pop,
**dict(kwargs_general.items()+ kwargs_cell.items())
)
obj2nodedict[cell] = n
if cell.population:
obj2nodedict[cell.population].add_node(n)
else:
graph.add_node(n)
for sg in subgraphs:
graph.add_subgraph(sg)
del subgraphs
# Synapse Populations are turned into a node, with edges from pre and
# to the post synaptic population:
for synpopindex, synpop in enumerate(self.sim.synapse_populations):
synpopcluster = pydot.Cluster('SynpopCluster'+synpop.synapse_pop_name)
# Create the connectivity graph:
connectivity_graph_figure = build_connectivity_graph(synpop)
fname = fig_out + '/synpop%d.png' % synpopindex
pylab.savefig(fname, dpi=400, bb_inches='tight')
n = pydot.Node(synpop.synapse_pop_name+'im', label='', image=fname, **dict(kwargs_general.items() + kwargs_synpop_img.items()))
synpopcluster.add_node(n)
label=''
label+= synpop.synapse_pop_name
len_prepop = len(synpop.presynaptic_population) if synpop.presynaptic_population else 1
pc_conn = 100. * len(synpop) / (len_prepop * len(synpop.postsynaptic_population))
#print pc_conn
#pc_conn=50.
#label+= '\\nType: %s'% (synpop.type)
label+= '\\nSynapses: %d (%d%%)'% (len(synpop), pc_conn )
#label= synpop.synapse_pop_name
n = pydot.Node(synpop.synapse_pop_name+'cap', label='"%s"'%label, **dict(kwargs_general.items() + kwargs_synpop.items()))
synpopcluster.add_node(n)
obj2nodedict[synpop] = synpopcluster
graph.add_subgraph(synpopcluster)
# Connect to pre- and post- synaptic pops:
post_pop = synpop.postsynaptic_population
e = pydot.Edge(synpopcluster.get_name(), obj2nodedict[post_pop].get_name(), **dict(kwargs_general.items() + kwargs_synpop_edge.items() ))
graph.add_edge(e)
pre_pop = synpop.presynaptic_population
if pre_pop is not None:
e = pydot.Edge( obj2nodedict[pre_pop].get_name(), synpopcluster.get_name(), **dict(kwargs_general.items() + kwargs_synpop_edge.items() ))
graph.add_edge(e)
else:
print 'NONE'
for (i, synapse) in enumerate(self.sim.synapses):
if synapse.population:
continue
pre_cell = synapse.get_presynaptic_cell()
post_cell = synapse.get_postsynaptic_cell()
if not pre_cell:
pre_n = pydot.Node(name='SpikeTimes%d' % i,
label='Spike-times',
**kwargs_syn_pre_times
)
graph.add_node(pre_n)
else:
pre_n = obj2nodedict[pre_cell]
post_n = obj2nodedict[post_cell]
syn_name = '%s' % synapse.name
e = pydot.Edge(pre_n, post_n, label=syn_name,
**dict(kwargs_general.items() + kwargs_syn_post.items() ) )
graph.add_edge(e)
stims = {}
# Simulations:
for cclamp in self.sim.current_clamps:
label = '"IClamp: %s\\n %s"' % (cclamp.name,
cclamp.location_summary_dot_str)
n = pydot.Node(
cclamp.name,
label=label,
**dict(kwargs_general.items()+ kwargs_cc.items())
)
stims[cclamp] = n
graph.add_node(n)
# Make the edge:
cell_node = obj2nodedict[cclamp.cell]
e = pydot.Edge(n, cell_node, label='', color='red', arrowhead='vee', arrowsize='0.75') # **kwargs)
graph.add_edge(e)
fname = _DotSummaryUtils.save_dot(graph, format='pdf', prog='fdp')
return mrd.Section(
'Diagram Overview',
mrd.Figure(mrd.Image(fname))
)
| |
from datetime import datetime, timedelta, timezone
import logging
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.db import models
from itunesiap.receipt import MissingFieldError
from itunesiap.exceptions import InvalidReceipt
import itunesiap
logger = logging.getLogger(__name__)
# FIXME: If someone unsubscribes, keep them off.
# FIXME: THIS IS TEMPORARY UNTIL WE FIX PAYMENT BUGS IN NEXT BIG UPDATE.
GRACE_PERIOD = timedelta(days=365 * 6)
def _receipt_date_to_datetime(receipt_date):
return datetime.fromtimestamp(int(receipt_date) / 1000.0)
class OutOfDateReceiptError(ValueError):
pass
def user_is_active_subscriber(user, with_grace_period=True):
if not user.is_authenticated:
return False
if user.username in settings.FREE_SUBSCRIPTION_USERNAMES:
return True
try:
subscription = Subscription.objects.get(
subscriber=user,
active=True,
)
except Subscription.DoesNotExist:
return False
# Customers on earlier versions of Manabi had bugs with subscriptions.
if subscription.created_at < datetime(year=2019, month=6, day=1):
return True
expires_date = subscription.expires_date
if with_grace_period:
expires_date += GRACE_PERIOD
unexpired = expires_date >= datetime.utcnow()
return unexpired
class SubscriptionManager(models.Manager):
def subscribe(
self, user, original_transaction_id, expires_date,
is_trial_period=False,
itunes_receipt=None,
receipt_info=None,
):
subscription, created = Subscription.objects.get_or_create(
subscriber=user,
defaults={
'expires_date': expires_date,
'original_transaction_id': original_transaction_id,
'latest_itunes_receipt': itunes_receipt,
},
)
if not created and (
not subscription.active
or subscription.expires_date.replace(tzinfo=timezone.utc)
< expires_date
):
subscription.active = True
subscription.expires_date = expires_date
subscription.is_trial_period = is_trial_period
if itunes_receipt is not None:
subscription.latest_itunes_receipt = itunes_receipt
if receipt_info is not None:
subscription.latest_receipt_info = receipt_info._
subscription.save()
def process_itunes_receipt(
self, user, itunes_receipt, log_purchase=True,
):
'''
Subscribes if valid.
If log_purchase is True, will log only if the receipt didn't already
exist.
Will raise InvalidReceipt error if invalid (it handles the case of
receipt being valid but expiring, where apple returns InvalidReceipt
with status 21006).
'''
try:
response = itunesiap.verify(
itunes_receipt,
password=settings.ITUNES_SHARED_SECRET,
exclude_old_transactions=True,
env=itunesiap.env.review)
except InvalidReceipt as receipt_error:
if receipt_error.status == 21006:
# Expired but valid receipt.
response = receipt_error
else:
InAppPurchaseLogItem.objects.get_or_create(
subscriber=user,
itunes_receipt=itunes_receipt)
raise receipt_error from None
except Exception as e:
InAppPurchaseLogItem.objects.get_or_create(
subscriber=user,
itunes_receipt=itunes_receipt)
raise e from None
try:
latest_receipt_info = response.latest_receipt_info
except MissingFieldError as e:
InAppPurchaseLogItem.objects.get_or_create(
subscriber=user,
itunes_receipt=itunes_receipt)
if getattr(response, 'status', None) == 21006:
# Expired but valid receipt.
Subscription.objects.filter(
subscriber=user,
original_transaction_id=response.receipt.original_transaction_id,
).update(
expires_date=response.receipt.expires_date,
active=False,
is_trial_period=False,
latest_receipt_info=response.receipt._,
)
return
else:
# Empty in_app array means no purchases made.
if len(response.receipt.in_app) == 0:
return
else:
raise e from None
if isinstance(latest_receipt_info, list):
latest_receipt_info = latest_receipt_info[-1]
original_transaction_id = (
latest_receipt_info['original_transaction_id'])
is_trial_period = (
latest_receipt_info['is_trial_period'].lower() == 'true')
if log_purchase:
log_item, created = InAppPurchaseLogItem.objects.get_or_create(
subscriber=user,
itunes_receipt=itunes_receipt,
original_transaction_id=original_transaction_id,
defaults={'receipt_info': latest_receipt_info._},
)
if not created and log_item.receipt_info != latest_receipt_info:
log_item.receipt_info = latest_receipt_info._
log_item.save()
self.model.objects.subscribe(
user,
original_transaction_id,
latest_receipt_info.expires_date,
is_trial_period=is_trial_period,
itunes_receipt=itunes_receipt,
receipt_info=latest_receipt_info,
)
logger.info('Processed iTunes receipt')
def process_itunes_subscription_update_notification(self, notification):
'''
Will raise InvalidReceipt error if invalid.
'''
shared_secret = notification['password']
receipt_info = notification['unified_receipt']['latest_receipt_info'][0]
notification_type = notification['notification_type']
logger.info(
'Notification type received',
extra={'notification_type': notification_type})
original_transaction_id = receipt_info.get('original_transaction_id')
log_item = SubscriptionUpdateNotificationLogItem.objects.create(
production_environment=(notification['environment'] == 'PROD'),
notification_type=notification_type,
itunes_receipt=notification['unified_receipt']['latest_receipt'],
receipt_info=receipt_info,
original_transaction_id=original_transaction_id)
if shared_secret not in [
settings.ITUNES_SHARED_SECRET,
settings.MANABI_SPECIFIC_ITUNES_SHARED_SECRET,
]:
logger.warning('Invalid iTunes shared secret')
raise PermissionDenied('Invalid iTunes shared secret.')
if notification['environment'] == 'PROD':
environment = itunesiap.env.production
else:
environment = itunesiap.env.sandbox
skip_updating_subscription = False
subscriptions = []
if notification_type in [
'DID_RECOVER', 'DID_RENEW', 'INTERACTIVE_RENEWAL',
]:
receipt = notification['unified_receipt']['latest_receipt']
itunesiap.verify(
receipt,
password=shared_secret,
exclude_old_transactions=True)
subscriptions = Subscription.objects.filter(
original_transaction_id=original_transaction_id)
for subscription in subscriptions:
subscription.active = True
elif notification_type == 'CANCEL':
receipt = itunes_receipt['unified_receipt']['latest_receipt']
itunesiap.verify(
receipt,
password=shared_secret,
exclude_old_transactions=True)
subscriptions = Subscription.objects.filter(
original_transaction_id=original_transaction_id)
for subscription in subscriptions:
subscription.active = False
elif notification_type == 'DID_CHANGE_RENEWAL_PREF':
# Customer changed the plan that takes affect at the next
# subscription renewal. Current active plan is not affected.
skip_updating_subscription = True
elif notification_type == 'DID_FAIL_TO_RENEW':
# Indicates a subscription that failed to renew due to a billing
# issue. Check is_in_billing_retry_period to know the current
# retry status of the subscription, and grace_period_expires_date
# to know the new service expiration date if the subscription is
# in a billing grace period.
expiration_intent = notification.get('expiration_intent')
if expiration_intent in ["1", "3"]:
# Customer canceled their subscription,
# or customer did not agree to a recent price increase.
subscription = Subscription.objects.filter(
original_transaction_id=original_transaction_id)
for subscription in subscriptions:
subscription.active = False
else:
skip_updating_subscription = True
elif notification_type == 'INITIAL_BUY':
# Doesn't have an original_transaction_id yet so it's useless.
# See https://forums.developer.apple.com/thread/98799
skip_updating_subscription = True
elif notification_type == 'DID_CHANGE_RENEWAL_STATUS':
# Indicates a change in the subscription renewal status.
skip_updating_subscription = True
if not skip_updating_subscription:
for subscription in subscriptions:
subscription.sandbox = environment == itunesiap.env.sandbox
subscription.latest_itunes_receipt = receipt
subscription.expires_date = _receipt_date_to_datetime(
receipt_info['expires_date_ms'])
subscription.is_trial_period = (
receipt_info['is_trial_period'].lower() == 'true')
subscription.save()
logger.info('Processed iTunes subscription update notification')
log_item.processed = True
log_item.save()
class Subscription(models.Model):
objects = SubscriptionManager()
subscriber = models.OneToOneField(
User, models.CASCADE, db_index=True, editable=False)
expires_date = models.DateTimeField()
active = models.BooleanField(default=True, blank=True)
is_trial_period = models.BooleanField(default=False, blank=True)
sandbox = models.BooleanField(default=False, blank=True)
original_transaction_id = models.CharField(max_length=300)
latest_itunes_receipt = models.TextField(blank=True)
latest_receipt_info = JSONField(editable=False, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
modified_at = models.DateTimeField(auto_now=True, editable=False)
class Meta:
ordering = ['-created_at']
def refresh_receipt_with_apple(self):
'''
Gets the latest receipt info from Apple and processes it.
'''
itunes_receipt = self.latest_itunes_receipt
if not itunes_receipt:
latest_log_item = (InAppPurchaseLogItem.objects
.filter(original_transaction_id=self.original_transaction_id)
.order_by('-created_at')
.first())
itunes_receipt = latest_log_item.itunes_receipt
Subscription.objects.process_itunes_receipt(
self.subscriber, itunes_receipt, log_purchase=True)
class InAppPurchaseLogItem(models.Model):
itunes_receipt = models.TextField(editable=False)
receipt_info = JSONField(editable=False, blank=True, null=True)
subscriber = models.ForeignKey(User, models.CASCADE, editable=False)
original_transaction_id = models.CharField(max_length=300, blank=True)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
class Meta:
ordering = ['-created_at']
def process_itunes_receipt(self):
'''
Useful for replaying a transaction that failed due to a bug.
'''
Subscription.objects.process_itunes_receipt(
self.subscriber, self.itunes_receipt, log_purchase=True)
class SubscriptionUpdateNotificationLogItem(models.Model):
notification_type = models.CharField(max_length=40, blank=True)
production_environment = models.BooleanField(default=True)
itunes_receipt = models.TextField(editable=False)
receipt_info = JSONField(editable=False)
original_transaction_id = models.CharField(max_length=300, blank=True)
processed = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
class Meta:
ordering = ['-created_at']
class PurchasedSubscriptionProduct:
def __init__(self, user, itunes_receipt):
self._user = user
self.itunes_receipt = itunes_receipt
def subscription_is_active(self):
return user_is_active_subscriber(self._user)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.