repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
simon-pepin/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
JunHe77/bigtop | bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/service_check.py | 10 | 6677 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management import *
import sys
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import re
import subprocess
from ambari_commons import os_utils
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.get_user_call_output import get_user_call_output
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
CURL_CONNECTION_TIMEOUT = '5'
class ServiceCheck(Script):
def service_check(self, env):
pass
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class ServiceCheckWindows(ServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
yarn_exe = os_utils.quote_path(os.path.join(params.yarn_home, "bin", "yarn.cmd"))
run_yarn_check_cmd = "cmd /C %s node -list" % yarn_exe
component_type = 'rm'
if params.hadoop_ssl_enabled:
component_address = params.rm_webui_https_address
else:
component_address = params.rm_webui_address
#temp_dir = os.path.abspath(os.path.join(params.hadoop_home, os.pardir)), "/tmp"
temp_dir = os.path.join(os.path.dirname(params.hadoop_home), "temp")
validateStatusFileName = "validateYarnComponentStatusWindows.py"
validateStatusFilePath = os.path.join(temp_dir, validateStatusFileName)
python_executable = sys.executable
validateStatusCmd = "%s %s %s -p %s -s %s" % (python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
if params.security_enabled:
kinit_cmd = "%s -kt %s %s;" % (params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
smoke_cmd = kinit_cmd + ' ' + validateStatusCmd
else:
smoke_cmd = validateStatusCmd
File(validateStatusFilePath,
content=StaticFile(validateStatusFileName)
)
Execute(smoke_cmd,
tries=3,
try_sleep=5,
logoutput=True
)
Execute(run_yarn_check_cmd, logoutput=True)
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class ServiceCheckDefault(ServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
params.HdfsResource(format("/user/{smokeuser}"),
type="directory",
action="create_on_execute",
owner=params.smokeuser,
mode=params.smoke_hdfs_user_mode,
)
if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
path_to_distributed_shell_jar = format("{stack_root}/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar")
else:
path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
yarn_distrubuted_shell_check_params = ["yarn org.apache.hadoop.yarn.applications.distributedshell.Client",
"-shell_command", "ls", "-num_containers", "{number_of_nm}",
"-jar", "{path_to_distributed_shell_jar}", "-timeout", "300000",
"--queue", "{service_check_queue_name}"]
yarn_distrubuted_shell_check_cmd = format(" ".join(yarn_distrubuted_shell_check_params))
if params.security_enabled:
kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
smoke_cmd = format("{kinit_cmd} {yarn_distrubuted_shell_check_cmd}")
else:
smoke_cmd = yarn_distrubuted_shell_check_cmd
return_code, out = shell.checked_call(smoke_cmd,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=params.smokeuser,
)
m = re.search("appTrackingUrl=(.*),\s", out)
app_url = m.group(1)
splitted_app_url = str(app_url).split('/')
for item in splitted_app_url:
if "application" in item:
application_name = item
for rm_webapp_address in params.rm_webapp_addresses_list:
info_app_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
get_app_info_cmd = "curl --negotiate -u : -ksL --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
user=params.smokeuser,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
)
# Handle HDP<2.2.8.1 where RM doesn't do automatic redirection from standby to active
if stdout.startswith("This is standby RM. Redirecting to the current active RM:"):
Logger.info(format("Skipped checking of {rm_webapp_address} since returned '{stdout}'"))
continue
try:
json_response = json.loads(stdout)
except Exception as e:
raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
if json_response is None or 'app' not in json_response or \
'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
raise Fail("Application " + app_url + " returns invalid data.")
if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
if __name__ == "__main__":
ServiceCheck().execute()
| apache-2.0 |
RackSec/ansible | lib/ansible/modules/windows/win_stat.py | 16 | 7006 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub, actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_stat
version_added: "1.7"
short_description: returns information about a Windows file
description:
- Returns information about a Windows file
options:
path:
description:
- The full path of the file/object to get the facts of; both forward and
back slashes are accepted.
required: yes
get_md5:
description:
- Whether to return the checksum sum of the file. Between Ansible 1.9
and 2.2 this is no longer an MD5, but a SHA1 instead. As of Ansible
2.3 this is back to an MD5. Will return None if host is unable to
use specified algorithm.
- This option is deprecated in Ansible 2.3 and is replaced with
C(checksum_algorithm=md5).
required: no
default: True
get_checksum:
description:
- Whether to return a checksum of the file (default sha1)
required: no
default: True
version_added: "2.1"
checksum_algorithm:
description:
- Algorithm to determine checksum of file. Will throw an error if
the host is unable to use specified algorithm.
required: no
default: sha1
choices: ['md5', 'sha1', 'sha256', 'sha384', 'sha512']
version_added: "2.3"
author: "Chris Church (@cchurch)"
'''
EXAMPLES = r'''
- name: Obtain information about a file
win_stat:
path: C:\foo.ini
register: file_info
# Obtain information about a folder
- win_stat:
path: C:\bar
register: folder_info
# Get MD5 checksum of a file
- win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: md5
register: md5_checksum
- debug:
var: md5_checksum.stat.checksum
# Get SHA1 checksum of file
- win_stat:
path: C:\foo.ini
get_checksum: yes
register: sha1_checksum
- debug:
var: sha1_checksum.stat.checksum
# Get SHA256 checksum of file
- win_stat:
path: C:\foo.ini
get_checksum: yes
checksum_algorithm: sha256
register: sha256_checksum
- debug:
var: sha256_checksum.stat.checksum
'''
RETURN = r'''
changed:
description: Whether anything was changed
returned: always
type: boolean
sample: True
stat:
description: dictionary containing all the stat data
returned: success
type: complex
contains:
attributes:
description: attributes of the file at path in raw form
returned: success, path exists
type: string
sample: "Archive, Hidden"
checksum:
description: The checksum of a file based on checksum_algorithm specified
returned: success, path exist, path is a file, get_checksum == True
checksum_algorithm specified is supported
type: string
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
creationtime:
description: the create time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
extension:
description: the extension of the file at path
returned: success, path exists, path is a file
type: string
sample: ".ps1"
isarchive:
description: if the path is ready for archiving or not
returned: success, path exists
type: boolean
sample: True
isdir:
description: if the path is a directory or not
returned: success, path exists
type: boolean
sample: True
ishidden:
description: if the path is hidden or not
returned: success, path exists
type: boolean
sample: True
islnk:
description: if the path is a symbolic link or junction or not
returned: success, path exists
type: boolean
sample: True
isreadonly:
description: if the path is read only or not
returned: success, path exists
type: boolean
sample: True
isshared:
description: if the path is shared or not
returned: success, path exists
type: boolean
sample: True
lastaccesstime:
description: the last access time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
lastwritetime:
description: the last modification time of the file represented in seconds since epoch
returned: success, path exists
type: float
sample: 1477984205.15
lnk_source:
description: the target of the symbolic link, will return null if not a link or the link is broken
return: success, path exists, file is a symbolic link
type: string
sample: C:\temp
md5:
description: The MD5 checksum of a file (Between Ansible 1.9 and 2.2 this was returned as a SHA1 hash)
returned: success, path exist, path is a file, get_md5 == True, md5 is supported
type: string
sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
owner:
description: the owner of the file
returned: success, path exists
type: string
sample: BUILTIN\Administrators
path:
description: the full absolute path to the file
returned: success, path exists
type: string
sample: BUILTIN\Administrators
sharename:
description: the name of share if folder is shared
returned: success, path exists, file is a directory and isshared == True
type: string
sample: file-share
size:
description: the size in bytes of a file or folder
returned: success, path exists, file is not a link
type: int
sample: 1024
'''
| gpl-3.0 |
bcl/pykickstart | pykickstart/handlers/f24.py | 3 | 5208 | #
# Chris Lumens <clumens@redhat.com>
#
# Copyright 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
__all__ = ["F24Handler"]
from pykickstart import commands
from pykickstart.base import BaseHandler
from pykickstart.version import F24
class F24Handler(BaseHandler):
version = F24
commandMap = {
"auth": commands.authconfig.FC3_Authconfig,
"authconfig": commands.authconfig.FC3_Authconfig,
"autopart": commands.autopart.F23_AutoPart,
"autostep": commands.autostep.FC3_AutoStep,
"bootloader": commands.bootloader.F21_Bootloader,
"btrfs": commands.btrfs.F23_BTRFS,
"cdrom": commands.cdrom.FC3_Cdrom,
"clearpart": commands.clearpart.F21_ClearPart,
"cmdline": commands.displaymode.FC3_DisplayMode,
"device": commands.device.F24_Device,
"deviceprobe": commands.deviceprobe.FC3_DeviceProbe,
"dmraid": commands.dmraid.F24_DmRaid,
"driverdisk": commands.driverdisk.F14_DriverDisk,
"eula": commands.eula.F20_Eula,
"fcoe": commands.fcoe.F13_Fcoe,
"firewall": commands.firewall.F20_Firewall,
"firstboot": commands.firstboot.FC3_Firstboot,
"graphical": commands.displaymode.FC3_DisplayMode,
"group": commands.group.F12_Group,
"halt": commands.reboot.F23_Reboot,
"harddrive": commands.harddrive.FC3_HardDrive,
"ignoredisk": commands.ignoredisk.F14_IgnoreDisk,
"install": commands.install.F20_Install,
"iscsi": commands.iscsi.F17_Iscsi,
"iscsiname": commands.iscsiname.FC6_IscsiName,
"keyboard": commands.keyboard.F18_Keyboard,
"lang": commands.lang.F19_Lang,
"liveimg": commands.liveimg.F19_Liveimg,
"logging": commands.logging.FC6_Logging,
"logvol": commands.logvol.F23_LogVol,
"mediacheck": commands.mediacheck.FC4_MediaCheck,
"method": commands.method.F19_Method,
"multipath": commands.multipath.F24_MultiPath,
"network": commands.network.F24_Network,
"nfs": commands.nfs.FC6_NFS,
"ostreesetup": commands.ostreesetup.F21_OSTreeSetup,
"part": commands.partition.F23_Partition,
"partition": commands.partition.F23_Partition,
"poweroff": commands.reboot.F23_Reboot,
"raid": commands.raid.F23_Raid,
"realm": commands.realm.F19_Realm,
"reboot": commands.reboot.F23_Reboot,
"repo": commands.repo.F21_Repo,
"reqpart": commands.reqpart.F23_ReqPart,
"rescue": commands.rescue.F10_Rescue,
"rootpw": commands.rootpw.F18_RootPw,
"selinux": commands.selinux.FC3_SELinux,
"services": commands.services.FC6_Services,
"shutdown": commands.reboot.F23_Reboot,
"skipx": commands.skipx.FC3_SkipX,
"sshpw": commands.sshpw.F24_SshPw,
"sshkey": commands.sshkey.F22_SshKey,
"text": commands.displaymode.FC3_DisplayMode,
"timezone": commands.timezone.F23_Timezone,
"updates": commands.updates.F7_Updates,
"upgrade": commands.upgrade.F20_Upgrade,
"url": commands.url.F18_Url,
"user": commands.user.F24_User,
"vnc": commands.vnc.F9_Vnc,
"volgroup": commands.volgroup.F21_VolGroup,
"xconfig": commands.xconfig.F14_XConfig,
"zerombr": commands.zerombr.F9_ZeroMbr,
"zfcp": commands.zfcp.F14_ZFCP,
}
dataMap = {
"BTRFSData": commands.btrfs.F23_BTRFSData,
"DriverDiskData": commands.driverdisk.F14_DriverDiskData,
"DeviceData": commands.device.F8_DeviceData,
"DmRaidData": commands.dmraid.FC6_DmRaidData,
"FcoeData": commands.fcoe.F13_FcoeData,
"GroupData": commands.group.F12_GroupData,
"IscsiData": commands.iscsi.F17_IscsiData,
"LogVolData": commands.logvol.F23_LogVolData,
"MultiPathData": commands.multipath.FC6_MultiPathData,
"NetworkData": commands.network.F22_NetworkData,
"PartData": commands.partition.F23_PartData,
"RaidData": commands.raid.F23_RaidData,
"RepoData": commands.repo.F21_RepoData,
"SshPwData": commands.sshpw.F24_SshPwData,
"SshKeyData": commands.sshkey.F22_SshKeyData,
"UserData": commands.user.F19_UserData,
"VolGroupData": commands.volgroup.F21_VolGroupData,
"ZFCPData": commands.zfcp.F14_ZFCPData,
}
| gpl-2.0 |
yohanko88/gem5-DC | src/arch/x86/isa/insts/simd64/floating_point/arithmetic/__init__.py | 91 | 2470 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["addition",
"subtraction",
"multiplication",
"accumulation",
"reciprocal_estimation",
"reciprocal_square_root"]
microcode = '''
# 64 bit multimedia instructions
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause |
codefarmer-cyk/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
hsolbrig/SNOMEDToOWL | SNOMEDCTToOWL/RF2Files/Transitive.py | 1 | 2725 | from typing import Dict, Set
from SNOMEDCTToOWL.SNOMEDToOWLConstants import RelationshipFilePrefix
class Transitive:
relationship_prefix = RelationshipFilePrefix
def __init__(self):
self._children = {} # parent -> set(children) Dict[int, Set[int]]
self._parents = {} # child -> set(parents) Dict[int, Set[int]]
self.__desc_cache = {} # parent -> set(descendants)
self.__ancestor_cache = {} # child -> set(ancestors)
@classmethod
def filtr(cls, fname: str) -> bool:
"""
Return true if this is a computed relationship file. Transitivity is always based on computed
:param fname: file name to test
:return: true if it should be processed
"""
return fname.startswith(cls.relationship_prefix)
def add(self, row: Dict) -> None:
"""
Add an RF2 relationship row to the Transitive file
:param row: row to add -- already tested for active
"""
child = int(row["sourceId"])
parent = int(row["destinationId"])
self._children.setdefault(parent, set()).add(child)
self._parents.setdefault(child, set()).add(parent)
def descendants_of(self, parent: int) -> Set[int]:
"""
Return all descendants of parent
:param parent: parent concept
:return: set of concepts
"""
return self._children.get(parent, set())\
.union(*[self.descendants_of(x) for x in self._children.get(parent, set())])
def is_descendant_of(self, desc: int, parent: int) -> bool:
"""
Determine whether desc is a descendant of parent
:param desc: descendant to test
:param parent: parent concept
:return: True or False
"""
if parent not in self.__desc_cache:
self.__desc_cache[parent] = self.descendants_of(parent)
return desc in self.__desc_cache[parent]
def is_descendant_or_self_of(self, desc: int, parent: int) -> bool:
"""
Determine whether desc is a descendant of the parent or is the parent itself
:param desc: descendant to test
:param parent: parent concept
:return: True or False
"""
return self.is_descendant_of(desc, parent) or desc == parent
def ancestors_of(self, child: int) -> Set[int]:
return self._parents.get(child, set())\
.union(*[self.ancestors_of(x) for x in self._parents.get(child, set())])
def is_ancestor_of(self, ancestor: int, child: int) -> bool:
if child not in self.__ancestor_cache:
self.__ancestor_cache[child] = self.ancestors_of(child)
return ancestor in self.__ancestor_cache[child]
| apache-2.0 |
ConservationInternational/ldmp-qgis-plugin | LDMP/processing_provider/carbon.py | 1 | 8231 | import numpy as np
from osgeo import gdal, osr
from qgis import processing
from qgis.core import (QgsGeometry,
QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingException,
QgsProcessingParameterFile,
QgsProcessingParameterFileDestination,
QgsProcessingParameterNumber,
QgsProcessingOutputString,
QgsProcessingOutputNumber)
from qgis.PyQt.QtCore import QCoreApplication
from LDMP import log
from LDMP.summary import calc_cell_area
class TCSummary(QgsProcessingAlgorithm):
"""
Used for summarizing results of output of the carbon change analysis.
"""
def tr(self, string):
return QCoreApplication.translate('processing\\carbon', string)
def createInstance(self):
# Must return a new copy of your algorithm.
return TCSummary()
def name(self):
return 'carbon_summary'
def displayName(self):
return self.tr('Carbon change summary')
def group(self):
return self.tr('Carbon change')
def groupId(self):
return 'trendsearth'
def shortHelpString(self):
return self.tr('Summarize output of a carbon change analysis')
def initAlgorithm(self, config=None):
# Inputs
self.addParameter(
QgsProcessingParameterFile(
'INPUT',
self.tr('Input carbon analysis file')
)
)
self.addParameter(
QgsProcessingParameterNumber(
'YEAR_START',
self.tr('Starting year')
)
)
self.addParameter(
QgsProcessingParameterNumber(
'YEAR_END',
self.tr('Ending year')
)
)
# Outputs
self.addOutput(
QgsProcessingOutputString(
'FOREST_LOSS',
self.tr('Forest loss per year in sq km.')
)
)
self.addOutput(
QgsProcessingOutputString(
'CARBON_LOSS',
self.tr('Carbon loss per year in tonnes of C')
)
)
self.addOutput(
QgsProcessingOutputNumber(
'CARBON_INITIAL',
self.tr('Initial tonnes of C')
)
)
self.addOutput(
QgsProcessingOutputNumber(
'AREA_FOREST',
self.tr('Area of forest in sq km')
)
)
self.addOutput(
QgsProcessingOutputNumber(
'AREA_NON_FOREST',
self.tr('Area of non-forest in sq km')
)
)
self.addOutput(
QgsProcessingOutputNumber(
'AREA_MISSING',
self.tr('Area of missing data in sq km')
)
)
self.addOutput(
QgsProcessingOutputNumber(
'AREA_WATER',
self.tr('Area of water in sq km')
)
)
self.addOutput(
QgsProcessingOutputNumber(
'AREA_SITE',
self.tr('Area of site in sq km')
)
)
def processAlgorithm(self, parameters, context, feedback):
src_file = self.parameterAsFile(parameters,'INPUT', context)
year_start = self.parameterAsInt(parameters,'YEAR_START', context)
year_end = self.parameterAsInt(parameters,'YEAR_END', context)
src_ds = gdal.Open(src_file)
band_f_loss = src_ds.GetRasterBand(1)
band_tc = src_ds.GetRasterBand(2)
block_sizes = band_f_loss.GetBlockSize()
xsize = band_f_loss.XSize
ysize = band_f_loss.YSize
n_out_bands = 1
x_block_size = block_sizes[0]
y_block_size = block_sizes[1]
src_gt = src_ds.GetGeoTransform()
# Width of cells in longitude
long_width = src_gt[1]
# Set initial lat ot the top left corner latitude
lat = src_gt[3]
# Width of cells in latitude
pixel_height = src_gt[5]
area_missing = 0
area_non_forest = 0
area_water = 0
area_site = 0
initial_forest_area = 0
initial_carbon_total = 0
forest_loss = np.zeros(year_end - year_start)
carbon_loss = np.zeros(year_end - year_start)
blocks = 0
for y in range(0, ysize, y_block_size):
if y + y_block_size < ysize:
rows = y_block_size
else:
rows = ysize - y
for x in range(0, xsize, x_block_size):
if feedback.isCanceled():
log("Processing of {} killed by user after processing {} out of {} blocks.".format(src_file, y, ysize))
break
feedback.setProgress(100 * (float(y) + (float(x)/xsize)*y_block_size) / ysize)
if x + x_block_size < xsize:
cols = x_block_size
else:
cols = xsize - x
f_loss_array = band_f_loss.ReadAsArray(x, y, cols, rows)
tc_array = band_tc.ReadAsArray(x, y, cols, rows)
# Caculate cell area for each horizontal line
cell_areas = np.array([calc_cell_area(lat + pixel_height*n, lat + pixel_height*(n + 1), long_width) for n in range(rows)])
cell_areas.shape = (cell_areas.size, 1)
# Make an array of the same size as the input arrays containing
# the area of each cell (which is identicalfor all cells ina
# given row - cell areas only vary among rows)
cell_areas_array = np.repeat(cell_areas, cols, axis=1)
initial_forest_pixels = (f_loss_array == 0) | (f_loss_array > (year_start - 2000))
# The site area includes everything that isn't masked
area_missing = area_missing + np.sum(((f_loss_array == -32768) | (tc_array == -32768)) * cell_areas_array)
area_water = area_water + np.sum((f_loss_array == -2) * cell_areas_array)
area_non_forest = area_non_forest + np.sum((f_loss_array == -1) * cell_areas_array)
area_site = area_site + np.sum((f_loss_array != -32767) * cell_areas_array)
initial_forest_area = initial_forest_area + np.sum(initial_forest_pixels * cell_areas_array)
initial_carbon_total = initial_carbon_total + np.sum(initial_forest_pixels * tc_array * (tc_array >= 0) * cell_areas_array)
for n in range(year_end - year_start):
# Note the codes are year - 2000
forest_loss[n] = forest_loss[n] + np.sum((f_loss_array == year_start - 2000 + n + 1) * cell_areas_array)
# Check units here - is tc_array in per m or per ha?
carbon_loss[n] = carbon_loss[n] + np.sum((f_loss_array == year_start - 2000 + n + 1) * tc_array * (tc_array >= 0) * cell_areas_array)
blocks += 1
lat += pixel_height * rows
feedback.setProgress(100)
if feedback.isCanceled():
return {}
else:
# Convert all area tables from meters into hectares
forest_loss = forest_loss * 1e-4
# Note that carbon is scaled by 10
carbon_loss = carbon_loss * 1e-4 / 10
area_missing = area_missing * 1e-4
area_water = area_water * 1e-4
area_non_forest = area_non_forest * 1e-4
area_site = area_site * 1e-4
initial_forest_area = initial_forest_area * 1e-4
# Note that carbon is scaled by 10
initial_carbon_total = initial_carbon_total * 1e-4 / 10
return {'FOREST_LOSS': np.array2string(forest_loss),
'CARBON_LOSS': np.array2string(carbon_loss),
'CARBON_INITIAL': initial_carbon_total,
'AREA_FOREST': initial_forest_area,
'AREA_NON_FOREST': area_non_forest,
'AREA_WATER': area_water,
'AREA_MISSING': area_missing,
'AREA_SITE': area_site}
| gpl-2.0 |
hasgeek/funnel | migrations/versions/887db555cca9_adding_uuid_to_commentset.py | 1 | 1769 | """Adding uuid to commentset.
Revision ID: 887db555cca9
Revises: 222b78a8508d
Create Date: 2020-05-08 19:16:15.324555
"""
from uuid import uuid4
from alembic import op
from sqlalchemy.sql import column, table
from sqlalchemy_utils import UUIDType
import sqlalchemy as sa
from progressbar import ProgressBar
import progressbar.widgets
# revision identifiers, used by Alembic.
revision = '887db555cca9'
down_revision = '222b78a8508d'
branch_labels = None
depends_on = None
commentset = table(
'commentset', column('id', sa.Integer()), column('uuid', UUIDType(binary=False))
)
def get_progressbar(label, maxval):
return ProgressBar(
maxval=maxval,
widgets=[
label,
': ',
progressbar.widgets.Percentage(),
' ',
progressbar.widgets.Bar(),
' ',
progressbar.widgets.ETA(),
' ',
],
)
def upgrade():
conn = op.get_bind()
op.add_column(
'commentset', sa.Column('uuid', UUIDType(binary=False), nullable=True)
)
count = conn.scalar(sa.select([sa.func.count('*')]).select_from(commentset))
progress = get_progressbar("Commentsets", count)
progress.start()
items = conn.execute(sa.select([commentset.c.id]))
for counter, item in enumerate(items):
conn.execute(
sa.update(commentset).where(commentset.c.id == item.id).values(uuid=uuid4())
)
progress.update(counter)
progress.finish()
op.alter_column('commentset', 'uuid', nullable=False)
op.create_unique_constraint('commentset_uuid_key', 'commentset', ['uuid'])
def downgrade():
op.drop_constraint('commentset_uuid_key', 'commentset', type_='unique')
op.drop_column('commentset', 'uuid')
| agpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/test/test_MimeWriter.py | 138 | 7593 | """Test program for MimeWriter module.
The test program was too big to comfortably fit in the MimeWriter
class, so it's here in its own file.
This should generate Barry's example, modulo some quotes and newlines.
"""
import unittest, StringIO
from test.test_support import run_unittest, import_module
import_module("MimeWriter", deprecated=True)
from MimeWriter import MimeWriter
SELLER = '''\
INTERFACE Seller-1;
TYPE Seller = OBJECT
DOCUMENTATION "A simple Seller interface to test ILU"
METHODS
price():INTEGER,
END;
'''
BUYER = '''\
class Buyer:
def __setup__(self, maxprice):
self._maxprice = maxprice
def __main__(self, kos):
"""Entry point upon arrival at a new KOS."""
broker = kos.broker()
# B4 == Barry's Big Bass Business :-)
seller = broker.lookup('Seller_1.Seller', 'B4')
if seller:
price = seller.price()
print 'Seller wants $', price, '... '
if price > self._maxprice:
print 'too much!'
else:
print "I'll take it!"
else:
print 'no seller found here'
''' # Don't ask why this comment is here
STATE = '''\
# instantiate a buyer instance and put it in a magic place for the KOS
# to find.
__kp__ = Buyer()
__kp__.__setup__(500)
'''
SIMPLE_METADATA = [
("Interpreter", "python"),
("Interpreter-Version", "1.3"),
("Owner-Name", "Barry Warsaw"),
("Owner-Rendezvous", "bwarsaw@cnri.reston.va.us"),
("Home-KSS", "kss.cnri.reston.va.us"),
("Identifier", "hdl://cnri.kss/my_first_knowbot"),
("Launch-Date", "Mon Feb 12 16:39:03 EST 1996"),
]
COMPLEX_METADATA = [
("Metadata-Type", "complex"),
("Metadata-Key", "connection"),
("Access", "read-only"),
("Connection-Description", "Barry's Big Bass Business"),
("Connection-Id", "B4"),
("Connection-Direction", "client"),
]
EXTERNAL_METADATA = [
("Metadata-Type", "complex"),
("Metadata-Key", "generic-interface"),
("Access", "read-only"),
("Connection-Description", "Generic Interface for All Knowbots"),
("Connection-Id", "generic-kp"),
("Connection-Direction", "client"),
]
OUTPUT = '''\
From: bwarsaw@cnri.reston.va.us
Date: Mon Feb 12 17:21:48 EST 1996
To: kss-submit@cnri.reston.va.us
MIME-Version: 1.0
Content-Type: multipart/knowbot;
boundary="801spam999";
version="0.1"
This is a multi-part message in MIME format.
--801spam999
Content-Type: multipart/knowbot-metadata;
boundary="802spam999"
--802spam999
Content-Type: message/rfc822
KP-Metadata-Type: simple
KP-Access: read-only
KPMD-Interpreter: python
KPMD-Interpreter-Version: 1.3
KPMD-Owner-Name: Barry Warsaw
KPMD-Owner-Rendezvous: bwarsaw@cnri.reston.va.us
KPMD-Home-KSS: kss.cnri.reston.va.us
KPMD-Identifier: hdl://cnri.kss/my_first_knowbot
KPMD-Launch-Date: Mon Feb 12 16:39:03 EST 1996
--802spam999
Content-Type: text/isl
KP-Metadata-Type: complex
KP-Metadata-Key: connection
KP-Access: read-only
KP-Connection-Description: Barry's Big Bass Business
KP-Connection-Id: B4
KP-Connection-Direction: client
INTERFACE Seller-1;
TYPE Seller = OBJECT
DOCUMENTATION "A simple Seller interface to test ILU"
METHODS
price():INTEGER,
END;
--802spam999
Content-Type: message/external-body;
access-type="URL";
URL="hdl://cnri.kss/generic-knowbot"
Content-Type: text/isl
KP-Metadata-Type: complex
KP-Metadata-Key: generic-interface
KP-Access: read-only
KP-Connection-Description: Generic Interface for All Knowbots
KP-Connection-Id: generic-kp
KP-Connection-Direction: client
--802spam999--
--801spam999
Content-Type: multipart/knowbot-code;
boundary="803spam999"
--803spam999
Content-Type: text/plain
KP-Module-Name: BuyerKP
class Buyer:
def __setup__(self, maxprice):
self._maxprice = maxprice
def __main__(self, kos):
"""Entry point upon arrival at a new KOS."""
broker = kos.broker()
# B4 == Barry's Big Bass Business :-)
seller = broker.lookup('Seller_1.Seller', 'B4')
if seller:
price = seller.price()
print 'Seller wants $', price, '... '
if price > self._maxprice:
print 'too much!'
else:
print "I'll take it!"
else:
print 'no seller found here'
--803spam999--
--801spam999
Content-Type: multipart/knowbot-state;
boundary="804spam999"
KP-Main-Module: main
--804spam999
Content-Type: text/plain
KP-Module-Name: main
# instantiate a buyer instance and put it in a magic place for the KOS
# to find.
__kp__ = Buyer()
__kp__.__setup__(500)
--804spam999--
--801spam999--
'''
class MimewriterTest(unittest.TestCase):
def test(self):
buf = StringIO.StringIO()
# Toplevel headers
toplevel = MimeWriter(buf)
toplevel.addheader("From", "bwarsaw@cnri.reston.va.us")
toplevel.addheader("Date", "Mon Feb 12 17:21:48 EST 1996")
toplevel.addheader("To", "kss-submit@cnri.reston.va.us")
toplevel.addheader("MIME-Version", "1.0")
# Toplevel body parts
f = toplevel.startmultipartbody("knowbot", "801spam999",
[("version", "0.1")], prefix=0)
f.write("This is a multi-part message in MIME format.\n")
# First toplevel body part: metadata
md = toplevel.nextpart()
md.startmultipartbody("knowbot-metadata", "802spam999")
# Metadata part 1
md1 = md.nextpart()
md1.addheader("KP-Metadata-Type", "simple")
md1.addheader("KP-Access", "read-only")
m = MimeWriter(md1.startbody("message/rfc822"))
for key, value in SIMPLE_METADATA:
m.addheader("KPMD-" + key, value)
m.flushheaders()
del md1
# Metadata part 2
md2 = md.nextpart()
for key, value in COMPLEX_METADATA:
md2.addheader("KP-" + key, value)
f = md2.startbody("text/isl")
f.write(SELLER)
del md2
# Metadata part 3
md3 = md.nextpart()
f = md3.startbody("message/external-body",
[("access-type", "URL"),
("URL", "hdl://cnri.kss/generic-knowbot")])
m = MimeWriter(f)
for key, value in EXTERNAL_METADATA:
md3.addheader("KP-" + key, value)
md3.startbody("text/isl")
# Phantom body doesn't need to be written
md.lastpart()
# Second toplevel body part: code
code = toplevel.nextpart()
code.startmultipartbody("knowbot-code", "803spam999")
# Code: buyer program source
buyer = code.nextpart()
buyer.addheader("KP-Module-Name", "BuyerKP")
f = buyer.startbody("text/plain")
f.write(BUYER)
code.lastpart()
# Third toplevel body part: state
state = toplevel.nextpart()
state.addheader("KP-Main-Module", "main")
state.startmultipartbody("knowbot-state", "804spam999")
# State: a bunch of assignments
st = state.nextpart()
st.addheader("KP-Module-Name", "main")
f = st.startbody("text/plain")
f.write(STATE)
state.lastpart()
# End toplevel body parts
toplevel.lastpart()
self.assertEqual(buf.getvalue(), OUTPUT)
def test_main():
run_unittest(MimewriterTest)
if __name__ == '__main__':
test_main()
| mit |
jahangir091/geodash26 | geonode/geoserver/management/__init__.py | 6 | 1812 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.db.models import signals
from django.utils.translation import ugettext_noop as _
import logging
logger = logging.getLogger(__name__)
if "notification" in settings.INSTALLED_APPS:
import notification
def create_notice_types(app, created_models, verbosity, **kwargs):
notification.models.NoticeType.create("layer_uploaded", _("Layer Uploaded"), _("A layer was uploaded"))
notification.models.NoticeType.create("layer_comment", _("Comment on Layer"), _("A layer was commented on"))
notification.models.NoticeType.create("layer_rated", _("Rating for Layer"), _("A rating was given to a layer"))
signals.post_migrate.connect(create_notice_types, sender=notification)
logger.info("Notifications Configured for geonode.layers.managment.commands")
else:
logger.info("Skipping creation of NoticeTypes for geonode.layers.management.commands, since notification app was \
not found.")
| gpl-3.0 |
brigittebigi/proceed | proceed/src/wxgui/frames/import_wizard.py | 1 | 18211 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ ___ ___ ____ ____ __
# | \ | \ | | / | | | \ Automatic
# |__/ |__/ | | | |__ |__ | | Conference
# | |\_ | | | | | | | Proceedings
# | | \ |___| \___ |___ |___ |__/ Generator
# ==========================================================
#
# http://www.lpl-aix.fr/~bigi/
#
# ---------------------------------------------------------------------------
# developed at:
#
# Laboratoire Parole et Langage
#
# Copyright (C) 2013-2014 Brigitte Bigi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# Proceed is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Proceed is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Proceed. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
__docformat__ = "epytext"
# ---------------------------------------------------------------------------
import wx
import wx.lib.newevent
import wx.wizard
import logging
import os.path
import sys
sys.path.append( os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( os.path.abspath(__file__))))), "src") )
from wxgui.cutils.imageutils import spBitmap
from wxgui.sp_consts import HEADER_FONTSIZE
from wxgui.sp_consts import FRAME_STYLE
from wxgui.sp_consts import FRAME_TITLE
from wxgui.sp_icons import IMPORT_EXPORT_ICON
from wxgui.sp_icons import GRID_ICON
from wxgui.sp_icons import TEX_ICON
from wxgui.sp_icons import WWW_ICON
from DataIO.Read.reader import Reader
from DataIO.Write.writer import Writer
from structs.prefs import Preferences
from structs.abstracts_themes import all_themes
from wxgui.frames.processprogress import ProcessProgressDialog
# ---------------------------------------------------------------------------
ImportFinishedEvent, EVT_IMPORT_WIZARD_FINISHED = wx.lib.newevent.NewEvent()
ImportFinishedCommandEvent, EVT_IMPORT_WIZARD_FINISHED_COMMAND = wx.lib.newevent.NewCommandEvent()
# ---------------------------------------------------------------------------
class ImportWizard( wx.wizard.Wizard ):
def __init__(self, parent):
wx.wizard.Wizard.__init__(self, parent, -1, title=FRAME_TITLE+" - Import", style=FRAME_STYLE)
self.output = ""
self.page0 = InputPage(self)
self.page0.SetName("input")
self.page1 = OutputPage(self)
self.page1.SetName("output")
self.page2 = LatexPage(self)
self.page2.SetName("latex")
wx.wizard.WizardPageSimple.Chain(self.page0, self.page1)
wx.wizard.WizardPageSimple.Chain(self.page1, self.page2)
self.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGED, self.onPageChanged)
self.Bind(wx.wizard.EVT_WIZARD_FINISHED, self.onFinished)
wx.CallAfter(self.SetSize,(520,440))
self.RunWizard(self.page0)
self.Destroy()
#----------------------------------------------------------------------
def onPageChanged(self, event):
""""""
page = event.GetPage()
if page.GetName() == "output":
if not os.path.exists(self.page0.urlFld.GetValue()):
wx.MessageBox("A valid input file name is required.", 'Info', wx.OK | wx.ICON_INFORMATION)
self.RunWizard(self.page0)
return
else:
p = ProcessProgressDialog(self)
p.Show()
arguments = {}
arguments['readername'] = self.page0.confname
arguments['filename'] = self.page0.urlFld.GetValue()
arguments['authorsfilename'] = self.page0.urlauthFld.GetValue()
arguments['progress'] = p
try:
self.reader = Reader( arguments )
p.close()
except Exception as e:
wx.MessageBox("Error while reading file:\n%s"%str(e), 'Info', wx.OK | wx.ICON_INFORMATION)
self.Destroy()
elif page.GetName() == "latex":
# if len(self.page1.urlFld.GetValue().strip()):
# wx.MessageBox("A directory is required.", 'Info', wx.OK | wx.ICON_INFORMATION)
# self.RunWizard(self.page1)
# return
self.output = self.page1.urlFld.GetValue().strip()
if not os.path.exists( self.output ):
try:
os.mkdir( self.output )
except Exception as e:
wx.MessageBox("Error while creating output directory:\n%s"%str(e), 'Info', wx.OK | wx.ICON_INFORMATION)
self.RunWizard(self.page1)
return
try:
self.writer = Writer( self.reader.docs )
self.writer.set_status( self.page1.status )
if self.page1.exportcsv:
self.writer.writeCSV( self.output )
if self.page1.exporthtml:
self.writer.writeHTML( self.output )
except Exception as e:
wx.MessageBox("Error while creating output files:\n%s"%str(e), 'Info', wx.OK | wx.ICON_INFORMATION)
self.RunWizard(self.page1)
return
#----------------------------------------------------------------------
def onFinished(self, event):
""""""
if self.page2.export is True:
# Create preferences
prefs = Preferences()
theme = all_themes.get_theme( self.page2.theme )
prefs.SetTheme( theme )
prefs.SetValue('COMPILER', 'str', self.page2.compiler.strip())
# Write as LaTeX in the same dir as proceed CSV files
p = ProcessProgressDialog(self)
p.Show()
self.writer.set_progress(p)
self.writer.writeLaTeX_as_Dir( self.output, prefs )
self.writer.set_progress(None)
p.close()
evt = ImportFinishedEvent(path=self.output)
evt.SetEventObject(self)
wx.PostEvent(self.GetParent(), evt)
#----------------------------------------------------------------------
# ----------------------------------------------------------------------------
class InputPage(wx.wizard.WizardPageSimple):
""" Parameters for the input data. """
def __init__(self, parent):
"""
Constructor.
"""
wx.wizard.WizardPageSimple.__init__(self, parent)
sizer = wx.BoxSizer(wx.VERTICAL)
self.dirname = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
title_layout = wx.BoxSizer(wx.HORIZONTAL)
bmp = wx.BitmapButton(self, bitmap=spBitmap(IMPORT_EXPORT_ICON, 32), style=wx.NO_BORDER)
font = wx.Font(HEADER_FONTSIZE, wx.MODERN, wx.NORMAL, wx.BOLD, False, u'Consolas')
title_label = wx.StaticText(self, label="File to import and related information:", style=wx.ALIGN_CENTER)
title_label.SetFont( font )
title_layout.Add(bmp, flag=wx.TOP|wx.RIGHT|wx.ALIGN_RIGHT, border=5)
title_layout.Add(title_label, flag=wx.EXPAND|wx.ALL|wx.ALIGN_CENTER_VERTICAL, border=5)
sizer.Add(title_layout, 0, flag=wx.ALL, border=0)
sizer.Add((-1, 10))
# --------- Conference web site
confnames = ['sciencesconf', 'easychair']
self.confname = 'sciencesconf'
readername = wx.RadioBox(self, label=" The file to import comes from: ", size=(410,-1), choices=confnames, majorDimension=1)
readername.SetSelection( 0 )
readername.Bind(wx.EVT_RADIOBOX, self.onConfName)
sizer.Add(readername, 0, flag=wx.ALL, border=0)
sizer.Add((-1, 10))
# --------- Input file name
hBox = wx.BoxSizer(wx.HORIZONTAL)
hBox.Add(wx.StaticText(self, label="File name:", size=(100,30)), flag=wx.TOP|wx.ALIGN_CENTER_VERTICAL, border=5)
self.urlFld = wx.TextCtrl(self, size=(300,30))
hBox.Add(self.urlFld, 1, flag=wx.LEFT, border=2)
checkBtn = wx.Button(self, -1, "Choose...", size=(80,30))
checkBtn.Bind(wx.EVT_BUTTON, lambda evt, temp="input": self.onOpen(evt, temp) )
hBox.Add(checkBtn, 0, flag=wx.LEFT, border=10)
sizer.Add(hBox, flag=wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP)
sizer.Add((-1, 10))
# --------- Input file name for authors
hBox = wx.BoxSizer(wx.HORIZONTAL)
self.authtext = wx.StaticText(self, label="Authors file:", size=(100,30))
hBox.Add(self.authtext, flag=wx.TOP|wx.ALIGN_CENTER_VERTICAL, border=5)
self.urlauthFld = wx.TextCtrl(self, size=(300,30))
hBox.Add(self.urlauthFld, 1, flag=wx.LEFT, border=2)
self.checkauthBtn = wx.Button(self, -1, "Choose...", size=(80,30))
self.checkauthBtn.Bind(wx.EVT_BUTTON, lambda evt, temp="author": self.onOpen(evt, temp) )
hBox.Add(self.checkauthBtn, 0, flag=wx.LEFT, border=10)
sizer.Add(hBox, flag=wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP)
self.enable()
self.Layout()
self.SetSizerAndFit(sizer)
def onOpen(self, event, temp):
filename = self.file_open()
if filename:
if temp == "input":
self.urlFld.SetValue(filename)
else:
self.urlauthFld.SetValue(filename)
def onConfName(self, event):
o = event.GetEventObject()
self.confname = o.GetStringSelection()
self.enable()
def enable(self):
if self.confname == 'easychair':
self.authtext.SetForegroundColour( wx.Colour(180,80,80))
self.checkauthBtn.Enable(True)
else:
self.authtext.SetForegroundColour( wx.Colour(128,128,128))
self.checkauthBtn.Enable(False)
def file_open(self):
with wx.FileDialog(self, "Choose a file to import", self.dirname,
"", "*.*", wx.OPEN) as dlg:
if dlg.ShowModal() == wx.ID_OK:
directory, filename = dlg.GetDirectory(), dlg.GetFilename()
return os.path.join(directory, filename)
return None
# ----------------------------------------------------------------------------
class OutputPage(wx.wizard.WizardPageSimple):
""" Parameters for the output data. """
def __init__(self, parent):
"""
Constructor.
"""
wx.wizard.WizardPageSimple.__init__(self, parent)
self.urlFld = ""
self.dirname = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
sizer = wx.BoxSizer(wx.VERTICAL)
title_layout = wx.BoxSizer(wx.HORIZONTAL)
bmp = wx.BitmapButton(self, bitmap=spBitmap(GRID_ICON, 32), style=wx.NO_BORDER)
font = wx.Font(HEADER_FONTSIZE, wx.MODERN, wx.NORMAL, wx.BOLD, False, u'Consolas')
title_label = wx.StaticText(self, label="Where to save:", style=wx.ALIGN_CENTER)
title_label.SetFont( font )
title_layout.Add(bmp, flag=wx.TOP|wx.RIGHT|wx.ALIGN_RIGHT, border=5)
title_layout.Add(title_label, flag=wx.EXPAND|wx.ALL|wx.ALIGN_CENTER_VERTICAL, border=5)
sizer.Add(title_layout, 0, flag=wx.ALL, border=0)
sizer.Add((-1, 10))
# --------- Output directory
hBox = wx.BoxSizer(wx.HORIZONTAL)
hBox.Add(wx.StaticText(self, label="Directory:", size=(100,30)), flag=wx.TOP|wx.ALIGN_CENTER_VERTICAL, border=5)
self.urlFld = wx.TextCtrl(self, size=(300,30))
hBox.Add(self.urlFld, 1, flag=wx.LEFT, border=2)
checkBtn = wx.Button(self, -1, "Choose...", size=(80,30))
checkBtn.Bind(wx.EVT_BUTTON, self.onDirectory )
hBox.Add(checkBtn, 0, flag=wx.LEFT, border=10)
sizer.Add(hBox, flag=wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP)
sizer.Add((-1, 10))
self.SetSizer(sizer)
# ---------- Status
allstatus = ['init papers (status=0)', 'only accepted papers (status=1)']
self.status = 1
statusradio = wx.RadioBox(self, label=" Choose papers to save: ", size=(410,-1), choices=allstatus, majorDimension=1)
statusradio.SetSelection( 1 )
statusradio.Bind(wx.EVT_RADIOBOX, self.onStatus)
sizer.Add(statusradio, 0, flag=wx.ALL, border=0)
sizer.Add((-1, 20))
# ----------CSV
self.exportcsv = True
cbp = wx.CheckBox(self, label="Save as CSV files for Proceed", size=(300,-1))
cbp.SetValue(True)
cbp.Bind(wx.EVT_CHECKBOX, self.onExportAsCSV)
sizer.Add(cbp, 0, flag=wx.LEFT, border=0)
sizer.Add((-1, 10))
# ----------HTML
self.exporthtml = False
cbp = wx.CheckBox(self, label="Save the list of papers in HTML", size=(300,-1))
cbp.SetValue(False)
cbp.Bind(wx.EVT_CHECKBOX, self.onExportAsHTML)
sizer.Add(cbp, 0, flag=wx.LEFT, border=0)
self.SetSizerAndFit(sizer)
def onDirectory(self, event):
with wx.DirDialog(self, "Choose a directory to save in", self.dirname, style=wx.DD_CHANGE_DIR) as dlg:
if dlg.ShowModal() == wx.ID_OK:
self.urlFld.SetValue( dlg.GetPath() )
def onStatus(self, event):
o = event.GetEventObject()
self.status = o.GetSelection()
def onExportAsCSV(self, event):
o = event.GetEventObject()
self.exportcsv = bool( o.GetValue() )
def onExportAsHTML(self, event):
o = event.GetEventObject()
self.exporthtml = bool( o.GetValue() )
# ----------------------------------------------------------------------------
class LatexPage(wx.wizard.WizardPageSimple):
""" Process the data. """
def __init__(self, parent):
"""
Constructor.
"""
wx.wizard.WizardPageSimple.__init__(self, parent)
self.urlFld = ""
self.dirname = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
sizer = wx.BoxSizer(wx.VERTICAL)
title_layout = wx.BoxSizer(wx.HORIZONTAL)
bmp = wx.BitmapButton(self, bitmap=spBitmap(GRID_ICON, 32), style=wx.NO_BORDER)
font = wx.Font(HEADER_FONTSIZE, wx.MODERN, wx.NORMAL, wx.BOLD, False, u'Consolas')
title_label = wx.StaticText(self, label="Save abstracts as LaTeX...", style=wx.ALIGN_CENTER)
title_label.SetFont( font )
title_layout.Add(bmp, flag=wx.TOP|wx.RIGHT|wx.ALIGN_RIGHT, border=5)
title_layout.Add(title_label, flag=wx.EXPAND|wx.ALL|wx.ALIGN_CENTER_VERTICAL, border=5)
sizer.Add(title_layout, 0, flag=wx.ALL, border=0)
sizer.Add((-1, 10))
# ----------CHECK
self.export = False
cbp = wx.CheckBox(self, label="Create each abstract as a LaTeX file", size=(300,-1))
cbp.SetValue(False)
cbp.Bind(wx.EVT_CHECKBOX, self.onExport)
sizer.Add(cbp, 0, flag=wx.LEFT, border=0)
sizer.Add((-1, 10))
# ------------- Theme
self.theme = 'basic'
thlist = sorted(all_themes.get_themes().keys())
self.themeradio = wx.RadioBox(self, label=" Choose a style: ", size=(410,-1), choices=thlist, majorDimension=1)
self.themeradio.SetSelection( thlist.index( 'basic' ) )
self.themeradio.Bind(wx.EVT_RADIOBOX, self.onTheme)
sizer.Add(self.themeradio, 0, flag=wx.LEFT, border=40)
sizer.Add((-1, 10))
# ------------- Compiler
self.compilers = ['pdflatex', 'xetex']
self.compiler = 'pdflatex'
self.comradio = wx.RadioBox(self, label=" Choose the LaTeX compiler: ", size=(410,-1), choices=self.compilers, majorDimension=1)
self.comradio.SetSelection( 0 )
self.comradio.Bind(wx.EVT_RADIOBOX, self.onCompiler)
sizer.Add(self.comradio, 0, flag=wx.LEFT, border=40)
sizer.Add((-1, 10))
# ------------- PDF
self.pdf = True
self.cbp = wx.CheckBox(self, label="Compile the LaTeX files", size=(300,-1))
self.cbp.SetValue(True)
self.cbp.Bind(wx.EVT_CHECKBOX, self.onPDFChange)
sizer.Add(self.cbp, 0, flag=wx.LEFT, border=40)
self.enable(False)
self.SetSizerAndFit(sizer)
def onCompiler(self, event):
o = event.GetEventObject()
self.compiler = o.GetStringSelection()
def onTheme(self, event):
o = event.GetEventObject()
self.theme = o.GetStringSelection()
def onPDFChange(self, event):
o = event.GetEventObject()
self.pdf = bool( o.GetValue() )
def onExport(self, event):
o = event.GetEventObject()
self.export = bool( o.GetValue() )
self.enable(self.export)
def enable(self, value):
if value is False:
self.themeradio.SetForegroundColour(wx.Colour(128,128,128))
self.comradio.SetForegroundColour(wx.Colour(128,128,128))
else:
self.themeradio.SetForegroundColour(wx.Colour(80,80,200))
self.comradio.SetForegroundColour(wx.Colour(80,80,200))
for i in range(len(all_themes.get_themes().keys())):
self.themeradio.EnableItem(i,value)
for i in range(len(self.compilers)):
self.comradio.EnableItem(i,value)
self.cbp.Enable(value)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
app = wx.App(False)
ImportWizard(None)
app.MainLoop()
#----------------------------------------------------------------------
| gpl-3.0 |
welshjf/python-bitcoinlib | bitcoin/base58.py | 24 | 4154 | # Copyright (C) 2011 Sam Rushing
# Copyright (C) 2013-2014 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
"""Base58 encoding and decoding"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
_bchr = chr
_bord = ord
if sys.version > '3':
long = int
_bchr = lambda x: bytes([x])
_bord = lambda x: x
import binascii
import bitcoin.core
B58_DIGITS = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
class Base58Error(Exception):
pass
class InvalidBase58Error(Base58Error):
"""Raised on generic invalid base58 data, such as bad characters.
Checksum failures raise Base58ChecksumError specifically.
"""
pass
def encode(b):
"""Encode bytes to a base58-encoded string"""
# Convert big-endian bytes to integer
n = int('0x0' + binascii.hexlify(b).decode('utf8'), 16)
# Divide that integer into bas58
res = []
while n > 0:
n, r = divmod(n, 58)
res.append(B58_DIGITS[r])
res = ''.join(res[::-1])
# Encode leading zeros as base58 zeros
czero = b'\x00'
if sys.version > '3':
# In Python3 indexing a bytes returns numbers, not characters.
czero = 0
pad = 0
for c in b:
if c == czero:
pad += 1
else:
break
return B58_DIGITS[0] * pad + res
def decode(s):
"""Decode a base58-encoding string, returning bytes"""
if not s:
return b''
# Convert the string to an integer
n = 0
for c in s:
n *= 58
if c not in B58_DIGITS:
raise InvalidBase58Error('Character %r is not a valid base58 character' % c)
digit = B58_DIGITS.index(c)
n += digit
# Convert the integer to bytes
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = binascii.unhexlify(h.encode('utf8'))
# Add padding back.
pad = 0
for c in s[:-1]:
if c == B58_DIGITS[0]: pad += 1
else: break
return b'\x00' * pad + res
class Base58ChecksumError(Base58Error):
"""Raised on Base58 checksum errors"""
pass
class CBase58Data(bytes):
"""Base58-encoded data
Includes a version and checksum.
"""
def __new__(cls, s):
k = decode(s)
verbyte, data, check0 = k[0:1], k[1:-4], k[-4:]
check1 = bitcoin.core.Hash(verbyte + data)[:4]
if check0 != check1:
raise Base58ChecksumError('Checksum mismatch: expected %r, calculated %r' % (check0, check1))
return cls.from_bytes(data, _bord(verbyte[0]))
def __init__(self, s):
"""Initialize from base58-encoded string
Note: subclasses put your initialization routines here, but ignore the
argument - that's handled by __new__(), and .from_bytes() will call
__init__() with None in place of the string.
"""
@classmethod
def from_bytes(cls, data, nVersion):
"""Instantiate from data and nVersion"""
if not (0 <= nVersion <= 255):
raise ValueError('nVersion must be in range 0 to 255 inclusive; got %d' % nVersion)
self = bytes.__new__(cls, data)
self.nVersion = nVersion
return self
def to_bytes(self):
"""Convert to bytes instance
Note that it's the data represented that is converted; the checkum and
nVersion is not included.
"""
return b'' + self
def __str__(self):
"""Convert to string"""
vs = _bchr(self.nVersion) + self
check = bitcoin.core.Hash(vs)[0:4]
return encode(vs + check)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
__all__ = (
'B58_DIGITS',
'Base58Error',
'InvalidBase58Error',
'encode',
'decode',
'Base58ChecksumError',
'CBase58Data',
)
| lgpl-3.0 |
philips/shortbread | Godeps/_workspace/src/github.com/libgit2/git2go/vendor/libgit2/tests/generate.py | 27 | 7418 | #!/usr/bin/env python
#
# Copyright (c) Vicent Marti. All rights reserved.
#
# This file is part of clar, distributed under the ISC license.
# For full terms see the included COPYING file.
#
from __future__ import with_statement
from string import Template
import re, fnmatch, os, codecs, pickle
class Module(object):
class Template(object):
def __init__(self, module):
self.module = module
def _render_callback(self, cb):
if not cb:
return ' { NULL, NULL }'
return ' { "%s", &%s }' % (cb['short_name'], cb['symbol'])
class DeclarationTemplate(Template):
def render(self):
out = "\n".join("extern %s;" % cb['declaration'] for cb in self.module.callbacks) + "\n"
if self.module.initialize:
out += "extern %s;\n" % self.module.initialize['declaration']
if self.module.cleanup:
out += "extern %s;\n" % self.module.cleanup['declaration']
return out
class CallbacksTemplate(Template):
def render(self):
out = "static const struct clar_func _clar_cb_%s[] = {\n" % self.module.name
out += ",\n".join(self._render_callback(cb) for cb in self.module.callbacks)
out += "\n};\n"
return out
class InfoTemplate(Template):
def render(self):
return Template(
r"""
{
"${clean_name}",
${initialize},
${cleanup},
${cb_ptr}, ${cb_count}, ${enabled}
}"""
).substitute(
clean_name = self.module.clean_name(),
initialize = self._render_callback(self.module.initialize),
cleanup = self._render_callback(self.module.cleanup),
cb_ptr = "_clar_cb_%s" % self.module.name,
cb_count = len(self.module.callbacks),
enabled = int(self.module.enabled)
)
def __init__(self, name):
self.name = name
self.mtime = 0
self.enabled = True
self.modified = False
def clean_name(self):
return self.name.replace("_", "::")
def _skip_comments(self, text):
SKIP_COMMENTS_REGEX = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE)
def _replacer(match):
s = match.group(0)
return "" if s.startswith('/') else s
return re.sub(SKIP_COMMENTS_REGEX, _replacer, text)
def parse(self, contents):
TEST_FUNC_REGEX = r"^(void\s+(test_%s__(\w+))\(\s*void\s*\))\s*\{"
contents = self._skip_comments(contents)
regex = re.compile(TEST_FUNC_REGEX % self.name, re.MULTILINE)
self.callbacks = []
self.initialize = None
self.cleanup = None
for (declaration, symbol, short_name) in regex.findall(contents):
data = {
"short_name" : short_name,
"declaration" : declaration,
"symbol" : symbol
}
if short_name == 'initialize':
self.initialize = data
elif short_name == 'cleanup':
self.cleanup = data
else:
self.callbacks.append(data)
return self.callbacks != []
def refresh(self, path):
self.modified = False
try:
st = os.stat(path)
# Not modified
if st.st_mtime == self.mtime:
return True
self.modified = True
self.mtime = st.st_mtime
with open(path) as fp:
raw_content = fp.read()
except IOError:
return False
return self.parse(raw_content)
class TestSuite(object):
def __init__(self, path):
self.path = path
def should_generate(self, path):
if not os.path.isfile(path):
return True
if any(module.modified for module in self.modules.values()):
return True
return False
def find_modules(self):
modules = []
for root, _, files in os.walk(self.path):
module_root = root[len(self.path):]
module_root = [c for c in module_root.split(os.sep) if c]
tests_in_module = fnmatch.filter(files, "*.c")
for test_file in tests_in_module:
full_path = os.path.join(root, test_file)
module_name = "_".join(module_root + [test_file[:-2]])
modules.append((full_path, module_name))
return modules
def load_cache(self):
path = os.path.join(self.path, '.clarcache')
cache = {}
try:
fp = open(path, 'rb')
cache = pickle.load(fp)
fp.close()
except (IOError, ValueError):
pass
return cache
def save_cache(self):
path = os.path.join(self.path, '.clarcache')
with open(path, 'wb') as cache:
pickle.dump(self.modules, cache)
def load(self, force = False):
module_data = self.find_modules()
self.modules = {} if force else self.load_cache()
for path, name in module_data:
if name not in self.modules:
self.modules[name] = Module(name)
if not self.modules[name].refresh(path):
del self.modules[name]
def disable(self, excluded):
for exclude in excluded:
for module in self.modules.values():
name = module.clean_name()
if name.startswith(exclude):
module.enabled = False
module.modified = True
def suite_count(self):
return len(self.modules)
def callback_count(self):
return sum(len(module.callbacks) for module in self.modules.values())
def write(self):
output = os.path.join(self.path, 'clar.suite')
if not self.should_generate(output):
return False
with open(output, 'w') as data:
for module in self.modules.values():
t = Module.DeclarationTemplate(module)
data.write(t.render())
for module in self.modules.values():
t = Module.CallbacksTemplate(module)
data.write(t.render())
suites = "static struct clar_suite _clar_suites[] = {" + ','.join(
Module.InfoTemplate(module).render() for module in sorted(self.modules.values(), key=lambda module: module.name)
) + "\n};\n"
data.write(suites)
data.write("static const size_t _clar_suite_count = %d;\n" % self.suite_count())
data.write("static const size_t _clar_callback_count = %d;\n" % self.callback_count())
suite.save_cache()
return True
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-f', '--force', dest='force', default=False)
parser.add_option('-x', '--exclude', dest='excluded', action='append', default=[])
options, args = parser.parse_args()
for path in args or ['.']:
suite = TestSuite(path)
suite.load(options.force)
suite.disable(options.excluded)
if suite.write():
print("Written `clar.suite` (%d tests in %d suites)" % (suite.callback_count(), suite.suite_count()))
| apache-2.0 |
mch/python-ant | src/ant/core/message.py | 1 | 18462 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011, Martín Raúl Villalba
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
##############################################################################
# pylint: disable=missing-docstring
from __future__ import division, absolute_import, print_function, unicode_literals
from struct import pack, unpack
from six import with_metaclass
from ant.core import constants
from ant.core.constants import MESSAGE_TX_SYNC, RESPONSE_NO_ERROR
from ant.core.exceptions import MessageError
class MessageType(type):
def __init__(cls, name, bases, dict_):
super(MessageType, cls).__init__(name, bases, dict_)
type_ = cls.type
if type_ is not None:
cls.TYPES[type_] = cls
def __call__(cls, *args, **kwargs):
if cls.type is not None:
return super(MessageType, cls).__call__(*args, **kwargs)
type_ = kwargs.get('type')
if type_ is None:
raise RuntimeError("Message' cannot be untyped")
del kwargs['type']
msgType = cls.TYPES.get(type_)
if msgType is not None:
return msgType(*args, **kwargs)
if 0x00 <= type_ <= 0xFF:
msg = super(MessageType, cls).__call__(*args, **kwargs)
msg.type = type_
return msg
else:
raise MessageError('Could not set type (type out of range).',
internal=Message.CORRUPTED)
MSG_HEADER_SIZE = 3
MSG_FOOTER_SIZE = 1
class Message(with_metaclass(MessageType)):
TYPES = {}
type = None
INCOMPLETE = 'incomplete'
CORRUPTED = 'corrupted'
MALFORMED = 'malformed'
def __init__(self, payload=None):
self._payload = None
self.payload = payload if payload is not None else bytearray()
@property
def payload(self):
return self._payload
@payload.setter
def payload(self, payload):
if len(payload) > 9:
raise MessageError('Could not set payload (payload too long).',
internal=Message.MALFORMED)
self._payload = payload
@property
def checksum(self):
checksum = MESSAGE_TX_SYNC ^ len(self._payload) ^ self.type
for byte in self._payload:
checksum ^= byte
return checksum
def encode(self):
raw, payload = bytearray(len(self)), self._payload
raw[0:MSG_HEADER_SIZE-1] = (MESSAGE_TX_SYNC, len(payload), self.type)
raw[MSG_HEADER_SIZE:-MSG_FOOTER_SIZE] = payload
raw[-1] = self.checksum
return raw
@classmethod
def decode(cls, raw):
raw = bytearray(raw)
if len(raw) < 5:
raise MessageError('Could not decode. Message length should be >=5 bytes but was %d.' % len(raw),
internal=Message.INCOMPLETE)
sync, length, type_ = raw[:MSG_HEADER_SIZE]
if sync != MESSAGE_TX_SYNC:
raise MessageError('Could not decode. Expected TX sync but got 0x%.2x.' % sync,
internal=Message.CORRUPTED)
if len(raw) < (length + MSG_HEADER_SIZE + MSG_FOOTER_SIZE):
raise MessageError('Could not decode. Message length should be %d but was %d.' %
(length + MSG_HEADER_SIZE + MSG_FOOTER_SIZE, len(raw)),
internal=Message.INCOMPLETE)
msg = Message(type=type_) # pylint: disable=unexpected-keyword-arg
msg.payload = raw[MSG_HEADER_SIZE:length + MSG_HEADER_SIZE]
if msg.checksum != raw[length + MSG_HEADER_SIZE]:
raise MessageError('Could not decode. Checksum should be 0x%.2x but was 0x%.2x.' %
(raw[length + MSG_HEADER_SIZE], msg.checksum),
internal=Message.CORRUPTED)
return msg
def __len__(self):
return len(self._payload) + MSG_HEADER_SIZE + MSG_FOOTER_SIZE
def __str__(self, data=None):
rawstr = '<' + self.__class__.__name__
if data is not None:
rawstr += ': ' + data
return rawstr + '>'
class ChannelMessage(Message):
def __init__(self, payload=b'', number=0x00):
super(ChannelMessage, self).__init__(bytearray(1) + payload)
self.channelNumber = number
@property
def channelNumber(self):
return self._payload[0]
@channelNumber.setter
def channelNumber(self, number):
if (number > 0xFF) or (number < 0x00):
raise MessageError('Could not set channel number. Should be 0 to 255 but was %s.' % number)
self._payload[0] = number
def __str__(self, data=None):
rawstr = "C(%d)" % self.channelNumber
if data is not None:
rawstr += ': ' + data
return super(ChannelMessage, self).__str__(data=rawstr)
# Config messages
class ChannelUnassignMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_UNASSIGN
def __init__(self, number=0x00):
super(ChannelUnassignMessage, self).__init__(number=number)
class ChannelAssignMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_ASSIGN
def __init__(self, number=0x00, channelType=0x00, network=0x00):
super(ChannelAssignMessage, self).__init__(payload=bytearray(2), number=number)
self.channelType = channelType
self.networkNumber = network
@property
def channelType(self):
return self._payload[1]
@channelType.setter
def channelType(self, type_):
self._payload[1] = type_
@property
def networkNumber(self):
return self._payload[2]
@networkNumber.setter
def networkNumber(self, number):
self._payload[2] = number
class ChannelIDMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_ID
def __init__(self, number=0x00, device_number=0x0000, device_type=0x00,
trans_type=0x00):
super(ChannelIDMessage, self).__init__(payload=bytearray(4), number=number)
self.deviceNumber = device_number
self.deviceType = device_type
self.transmissionType = trans_type
@property
def deviceNumber(self):
return unpack(b'<H', bytes(self._payload[1:3]))[0]
@deviceNumber.setter
def deviceNumber(self, device_number):
self._payload[1:3] = pack(b'<H', device_number)
@property
def deviceType(self):
return self._payload[3]
@deviceType.setter
def deviceType(self, device_type):
self._payload[3] = device_type
@property
def transmissionType(self):
return self._payload[4]
@transmissionType.setter
def transmissionType(self, trans_type):
self._payload[4] = trans_type
class ChannelPeriodMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_PERIOD
def __init__(self, number=0x00, period=8192):
super(ChannelPeriodMessage, self).__init__(payload=bytearray(2), number=number)
self.channelPeriod = period
@property
def channelPeriod(self):
return unpack('<H', bytes(self._payload[1:3]))[0]
@channelPeriod.setter
def channelPeriod(self, period):
self._payload[1:3] = pack('<H', period)
class ChannelSearchTimeoutMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_SEARCH_TIMEOUT
def __init__(self, number=0x00, timeout=0xFF):
super(ChannelSearchTimeoutMessage, self).__init__(payload=bytearray(1),
number=number)
self.timeout = timeout
@property
def timeout(self):
return self._payload[1]
@timeout.setter
def timeout(self, timeout):
self._payload[1] = timeout
class ChannelFrequencyMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_FREQUENCY
def __init__(self, number=0x00, frequency=66):
super(ChannelFrequencyMessage, self).__init__(payload=bytearray(1), number=number)
self.frequency = frequency
@property
def frequency(self):
return self._payload[1]
@frequency.setter
def frequency(self, frequency):
self._payload[1] = frequency
class ChannelTXPowerMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_TX_POWER
def __init__(self, number=0x00, power=0x00):
super(ChannelTXPowerMessage, self).__init__(payload=bytearray(1), number=number)
self.power = power
@property
def power(self):
return self._payload[1]
@power.setter
def power(self, power):
self._payload[1] = power
class NetworkKeyMessage(Message):
type = constants.MESSAGE_NETWORK_KEY
def __init__(self, number=0x00, key=b'\x00' * 8):
super(NetworkKeyMessage, self).__init__(payload=bytearray(9))
self.number = number
self.key = key
@property
def number(self):
return self._payload[0]
@number.setter
def number(self, number):
self._payload[0] = number
@property
def key(self):
return self._payload[1:]
@key.setter
def key(self, key):
self._payload[1:] = key
class TXPowerMessage(Message):
type = constants.MESSAGE_TX_POWER
def __init__(self, power=0x00):
super(TXPowerMessage, self).__init__(payload=bytearray(2))
self.power = power
@property
def power(self):
return self._payload[1]
@power.setter
def power(self, power):
self._payload[1] = power
# Control messages
class SystemResetMessage(Message):
type = constants.MESSAGE_SYSTEM_RESET
def __init__(self):
super(SystemResetMessage, self).__init__(payload=bytearray(1))
class ChannelOpenMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_OPEN
def __init__(self, number=0x00):
super(ChannelOpenMessage, self).__init__(number=number)
class ChannelCloseMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_CLOSE
def __init__(self, number=0x00):
super(ChannelCloseMessage, self).__init__(number=number)
class ChannelRequestMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_REQUEST
def __init__(self, number=0x00, messageID=constants.MESSAGE_CHANNEL_STATUS):
super(ChannelRequestMessage, self).__init__(payload=bytearray(1), number=number)
self.messageID = messageID
@property
def messageID(self):
return self._payload[1]
@messageID.setter
def messageID(self, messageID):
if (messageID > 0xFF) or (messageID < 0x00):
raise MessageError('Could not set message ID. Should be 0 to 255 but was %s.' % messageID)
self._payload[1] = messageID
# Data messages
class ChannelBroadcastDataMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_BROADCAST_DATA
def __init__(self, number=0x00, data=b'\x00' * 7):
super(ChannelBroadcastDataMessage, self).__init__(payload=data, number=number)
@property
def data(self):
return self._payload[1:9]
class ChannelAcknowledgedDataMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_ACKNOWLEDGED_DATA
def __init__(self, number=0x00, data=b'\x00' * 7):
super(ChannelAcknowledgedDataMessage, self).__init__(payload=data, number=number)
@property
def data(self):
return self._payload[1:9]
class ChannelBurstDataMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_BURST_DATA
def __init__(self, number=0x00, data=b'\x00' * 7):
super(ChannelBurstDataMessage, self).__init__(payload=data, number=number)
@property
def data(self):
return self._payload[1:9]
# Channel event messages
class ChannelEventResponseMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_EVENT
def __init__(self, number=0x00, message_id=0x00, message_code=0x00):
super(ChannelEventResponseMessage, self).__init__(payload=bytearray(2),
number=number)
self.messageID = message_id
self.messageCode = message_code
@property
def messageID(self):
return self._payload[1]
@messageID.setter
def messageID(self, message_id):
if (message_id > 0xFF) or (message_id < 0x00):
raise MessageError('Could not set message ID. Should be 0 to 255 but was %s.' % message_id)
self._payload[1] = message_id
@property
def messageCode(self):
return self._payload[2]
@messageCode.setter
def messageCode(self, message_code):
if (message_code > 0xFF) or (message_code < 0x00):
raise MessageError('Could not set message code. Should be 0 to 255 but was %s.' % message_code)
self._payload[2] = message_code
def __str__(self): # pylint: disable=W0221
msgCode = self.messageCode
if self.messageID != 1:
return "<ChannelResponse: '%s' on C(%d): %s>" % (
self.TYPES[self.messageID].__name__, self.channelNumber,
'OK' if msgCode == RESPONSE_NO_ERROR else '0x%.2x' % msgCode)
return "<ChannelEvent: C(%d): 0x%.2x>" % (self.channelNumber, msgCode)
# Requested response messages
class ChannelStatusMessage(ChannelMessage):
type = constants.MESSAGE_CHANNEL_STATUS
def __init__(self, number=0x00, status=0x00):
super(ChannelStatusMessage, self).__init__(payload=bytearray(1), number=number)
self.status = status
@property
def status(self):
return self._payload[1]
@status.setter
def status(self, status):
if (status > 0xFF) or (status < 0x00):
raise MessageError('Could not set channel status. Should be 0 to 255 but was %s.' % status)
self._payload[1] = status
class VersionMessage(Message):
type = constants.MESSAGE_VERSION
def __init__(self, version=b'\x00' * 9):
super(VersionMessage, self).__init__(payload=bytearray(9))
self.version = version
@property
def version(self):
return self._payload
@version.setter
def version(self, version):
if len(version) != 9:
raise MessageError('Could not set ANT version (expected 9 bytes).')
self.payload = bytearray(version)
class StartupMessage(Message):
type = constants.MESSAGE_STARTUP
def __init__(self, startupMessage=0x00):
super(StartupMessage, self).__init__(payload=bytearray(1))
self.startupMessage = startupMessage
@property
def startupMessage(self):
return self._payload[0]
@startupMessage.setter
def startupMessage(self, startupMessage):
if (startupMessage > 0xFF) or (startupMessage < 0x00):
raise MessageError('Could not set start-up message. Should be 0 to 255 but was %s.' % startupMessage)
self._payload[0] = startupMessage
class CapabilitiesMessage(Message):
type = constants.MESSAGE_CAPABILITIES
def __init__(self, max_channels=0x00, max_nets=0x00, std_opts=0x00,
adv_opts=0x00, adv_opts2=0x00):
super(CapabilitiesMessage, self).__init__(payload=bytearray(4))
self.maxChannels = max_channels
self.maxNetworks = max_nets
self.stdOptions = std_opts
self.advOptions = adv_opts
if adv_opts2 is not None:
self.advOptions2 = adv_opts2
@property
def maxChannels(self):
return self._payload[0]
@maxChannels.setter
def maxChannels(self, num):
if (num > 0xFF) or (num < 0x00):
raise MessageError('Could not set max channels. Should be 0 to 255 but was %s.' % num)
self._payload[0] = num
@property
def maxNetworks(self):
return self._payload[1]
@maxNetworks.setter
def maxNetworks(self, num):
if (num > 0xFF) or (num < 0x00):
raise MessageError('Could not set max networks. Should be 0 to 255 but was %s.' % num)
self._payload[1] = num
@property
def stdOptions(self):
return self._payload[2]
@stdOptions.setter
def stdOptions(self, num):
if (num > 0xFF) or (num < 0x00):
raise MessageError('Could not set std options. Should be 0 to 255 but was %s.' % num)
self._payload[2] = num
@property
def advOptions(self):
return self._payload[3]
@advOptions.setter
def advOptions(self, num):
if (num > 0xFF) or (num < 0x00):
raise MessageError('Could not set adv options. Should be 0 to 255 but was %s.' % num)
self._payload[3] = num
@property
def advOptions2(self):
return self._payload[4] if len(self._payload) == 5 else 0x00
@advOptions2.setter
def advOptions2(self, num):
if (num > 0xFF) or (num < 0x00):
raise MessageError('Could not set adv options 2. Should be 0 to 255 but was %s.' % num)
if len(self._payload) == 4:
self._payload.append(0)
self._payload[4] = num
class SerialNumberMessage(Message):
type = constants.MESSAGE_SERIAL_NUMBER
def __init__(self, serial=b'\x00' * 4):
super(SerialNumberMessage, self).__init__()
self.serialNumber = serial
@property
def serialNumber(self):
return self._payload
@serialNumber.setter
def serialNumber(self, serial):
if len(serial) != 4:
raise MessageError('Could not set serial number (expected 4 bytes).')
self.payload = bytearray(serial)
| mit |
plasticantifork/PS2Devs | retweet.py | 1 | 1421 | #!/usr/bin/python
import tweepy
import ConfigParser
import sys, os
config = ConfigParser.SafeConfigParser()
config.read(os.path.join(sys.path[0], 'config'))
auth = tweepy.OAuthHandler(config.get('auth','consumer_key'), config.get('auth','consumer_secret'))
auth.set_access_token(config.get('auth','access_token'), config.get('auth','access_token_secret'))
api = tweepy.API(auth)
twitterQuery = config.get('search','query')
try:
with open(os.path.join(sys.path[0], 'lastTweetId'), 'r') as f:
sinceId = f.read()
except IOError:
sinceId = ''
timelineIterator = tweepy.Cursor(api.search, q=twitterQuery, since_id=sinceId).items()
timeline = []
for status in timelineIterator:
timeline.append(status)
try:
lastTweetId = timeline[0].id
except IndexError:
lastTweetId = sinceId
rtCounter = 0
errCounter = 0
timeline.reverse()
for status in timeline:
try:
print '(%(date)s) %(name)s: %(message)s' % \
{ 'date' : status.created_at,
'name' : status.author.screen_name.encode('utf-8'),
'message' : status.text.encode('utf-8') }
api.retweet(status.id)
rtCounter += 1
except tweepy.error.TweepError as e:
errCounter += 1
print e
continue
if errCounter != 0:
print '%d errors occurred' % errCounter
with open(os.path.join(sys.path[0], 'lastTweetId'), 'w') as file:
file.write(str(lastTweetId))
| mit |
rcbops/python-django-buildpackage | django/contrib/staticfiles/handlers.py | 160 | 2359 | import urllib
from urlparse import urlparse
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
def __init__(self, application, base_dir=None):
self.application = application
if base_dir:
self.base_dir = base_dir
else:
self.base_dir = self.get_base_dir()
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return (self.base_url[2] != path and
path.startswith(self.base_url[2]) and not self.base_url[1])
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return urllib.url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(environ['PATH_INFO']):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
| bsd-3-clause |
carhaas/cdec-semparse | realtime/realtime.py | 5 | 3404 | #!/usr/bin/env python
import argparse
import logging
import sys
import threading
import time
import rt
ABOUT = '''Realtime adaptive translation with cdec (See README.md)
Code by Michael Denkowski
Citation:
@InProceedings{realtime,
author = {Michael Denkowski and Chris Dyer and Alon Lavie},
title = {Learning from Post-Editing: Online Model Adaptation for Statistical Machine Translation},
booktitle = {Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics}
year = {2014},
}
'''
class Parser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write(ABOUT)
self.print_help()
sys.stderr.write('\n{}\n'.format(message))
sys.exit(2)
def handle_line(translator, line, output, ctx_name):
res = translator.command_line(line, ctx_name)
if res:
output.write('{}\n'.format(res))
output.flush()
def test1(translator, input, output, ctx_name):
inp = open(input)
out = open(output, 'w')
for line in inp:
handle_line(translator, line.strip(), out, ctx_name)
out.close()
def debug(translator, input):
# Test 1: multiple contexts
threads = []
for i in range(4):
t = threading.Thread(target=test1, args=(translator, input, '{}.out.{}'.format(input, i), str(i)))
threads.append(t)
t.start()
time.sleep(30)
# Test 2: flood
out = open('{}.out.flood'.format(input), 'w')
inp = open(input)
while True:
line = inp.readline()
if not line:
break
line = line.strip()
t = threading.Thread(target=handle_line, args=(translator, line.strip(), out, None))
threads.append(t)
t.start()
time.sleep(1)
translator.drop_ctx(None)
# Join test threads
for t in threads:
t.join()
def main():
parser = Parser()
parser.add_argument('-c', '--config', required=True, help='Config directory')
parser.add_argument('-s', '--state', help='Load state file to default context (saved incremental data)')
parser.add_argument('-n', '--normalize', help='Normalize text (tokenize, translate, detokenize)', action='store_true')
parser.add_argument('-T', '--temp', help='Temp directory (default /tmp)', default='/tmp')
parser.add_argument('-a', '--cache', help='Grammar cache size (default 5)', default='5')
parser.add_argument('-v', '--verbose', help='Info to stderr', action='store_true')
parser.add_argument('-D', '--debug-test', help='Run debug tests on input file')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.INFO)
with rt.RealtimeTranslator(args.config, tmpdir=args.temp, cache_size=int(args.cache), norm=args.normalize) as translator:
# Debugging
if args.debug_test:
debug(translator, args.debug_test)
return
# Load state if given
if args.state:
rtd.load_state(state)
# Read lines and commands
while True:
line = sys.stdin.readline()
if not line:
break
line = line.strip()
res = translator.command_line(line)
if res:
sys.stdout.write('{}\n'.format(res))
sys.stdout.flush()
if __name__ == '__main__':
main()
| apache-2.0 |
minhphung171093/OpenERP_V7 | openerp/addons/portal/wizard/share_wizard.py | 13 | 10414 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
UID_ROOT = SUPERUSER_ID
SHARED_DOCS_MENU = "Documents"
SHARED_DOCS_CHILD_MENU = "Shared Documents"
class share_wizard_portal(osv.TransientModel):
"""Inherited share wizard to automatically create appropriate
menus in the selected portal upon sharing with a portal group."""
_inherit = "share.wizard"
def _user_type_selection(self, cr, uid, context=None):
selection = super(share_wizard_portal, self)._user_type_selection(cr, uid, context=context)
selection.extend([('existing',_('Users you already shared with')),
('groups',_('Existing Groups (e.g Portal Groups)'))])
return selection
_columns = {
'user_ids': fields.many2many('res.users', 'share_wizard_res_user_rel', 'share_id', 'user_id', 'Existing users', domain=[('share', '=', True)]),
'group_ids': fields.many2many('res.groups', 'share_wizard_res_group_rel', 'share_id', 'group_id', 'Existing groups', domain=[('share', '=', False)]),
}
def _check_preconditions(self, cr, uid, wizard_data, context=None):
if wizard_data.user_type == 'existing':
self._assert(wizard_data.user_ids,
_('Please select at least one user to share with'),
context=context)
elif wizard_data.user_type == 'groups':
self._assert(wizard_data.group_ids,
_('Please select at least one group to share with'),
context=context)
return super(share_wizard_portal, self)._check_preconditions(cr, uid, wizard_data, context=context)
def _create_or_get_submenu_named(self, cr, uid, parent_menu_id, menu_name, context=None):
if context is None:
context = {}
Menus = self.pool.get('ir.ui.menu')
if not parent_menu_id and context.get('group_id'):
cxt = dict(context)
cxt['ir.ui.menu.full_list'] = True
parent_menu_ids = Menus.search(cr, SUPERUSER_ID,
[('groups_id', 'in', [context.get('group_id')]), ('parent_id', '=', False)], limit=1, context=cxt)
parent_menu_id = parent_menu_ids and parent_menu_ids[0] or False
if not parent_menu_id:
return False
parent_menu = Menus.browse(cr, uid, parent_menu_id) # No context
menu_id = None
max_seq = 10
for child_menu in parent_menu.child_id:
max_seq = max(max_seq, child_menu.sequence)
if child_menu.name == menu_name:
menu_id = child_menu.id
break
if not menu_id:
# not found, create it
menu_id = Menus.create(cr, UID_ROOT,
{'name': menu_name,
'parent_id': parent_menu.id,
'sequence': max_seq + 10, # at the bottom
})
return menu_id
def _sharing_root_menu_id(self, cr, uid, portal, context=None):
"""Create or retrieve root ID of sharing menu in portal menu
:param portal: browse_record of shared group, constructed with a context WITHOUT language
"""
if context is None:
context = {}
ctx = dict(context, group_id=portal.id)
parent_menu_id = self._create_or_get_submenu_named(cr, uid, False, SHARED_DOCS_MENU, context=ctx)
if parent_menu_id:
child_menu_id = self._create_or_get_submenu_named(cr, uid, parent_menu_id, SHARED_DOCS_CHILD_MENU, context=context)
return child_menu_id
def _create_shared_data_menu(self, cr, uid, wizard_data, portal, context=None):
"""Create sharing menus in portal menu according to share wizard options.
:param wizard_data: browse_record of share.wizard
:param portal: browse_record of shared group, constructed with a context WITHOUT language
"""
root_menu_id = self._sharing_root_menu_id(cr, uid, portal, context=context)
if not root_menu_id:
# no specific parent menu, cannot create the sharing menu at all.
return
# Create the shared action and menu
action_def = self._shared_action_def(cr, uid, wizard_data, context=None)
action_id = self.pool.get('ir.actions.act_window').create(cr, UID_ROOT, action_def)
menu_data = {'name': action_def['name'],
'sequence': 10,
'action': 'ir.actions.act_window,'+str(action_id),
'parent_id': root_menu_id,
'icon': 'STOCK_JUSTIFY_FILL'}
menu_id = self.pool.get('ir.ui.menu').create(cr, UID_ROOT, menu_data)
return menu_id
def _create_share_users_group(self, cr, uid, wizard_data, context=None):
# Override of super() to handle the possibly selected "existing users"
# and "existing groups".
# In both cases, we call super() to create the share group, but when
# sharing with existing groups, we will later delete it, and copy its
# access rights and rules to the selected groups.
super_result = super(share_wizard_portal,self)._create_share_users_group(cr, uid, wizard_data, context=context)
# For sharing with existing groups, we don't create a share group, instead we'll
# alter the rules of the groups so they can see the shared data
if wizard_data.group_ids:
# get the list of portals and the related groups to install their menus.
res_groups = self.pool.get('res.groups')
all_portal_group_ids = res_groups.search(cr, UID_ROOT, [('is_portal', '=', True)])
# populate result lines with the users of each group and
# setup the menu for portal groups
for group in wizard_data.group_ids:
if group.id in all_portal_group_ids:
self._create_shared_data_menu(cr, uid, wizard_data, group, context=context)
for user in group.users:
new_line = {'user_id': user.id,
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
selected_group_ids = [x.id for x in wizard_data.group_ids]
res_groups.write(cr, SUPERUSER_ID, selected_group_ids, {'implied_ids': [(4, super_result[0])]})
elif wizard_data.user_ids:
# must take care of existing users, by adding them to the new group, which is super_result[0],
# and adding the shortcut
selected_user_ids = [x.id for x in wizard_data.user_ids]
self.pool.get('res.users').write(cr, UID_ROOT, selected_user_ids, {'groups_id': [(4, super_result[0])]})
self._setup_action_and_shortcut(cr, uid, wizard_data, selected_user_ids, make_home=False, context=context)
# populate the result lines for existing users too
for user in wizard_data.user_ids:
new_line = { 'user_id': user.id,
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
return super_result
def copy_share_group_access_and_delete(self, cr, wizard_data, share_group_id, context=None):
# In the case of sharing with existing groups, the strategy is to copy
# access rights and rules from the share group, so that we can
if not wizard_data.group_ids: return
Groups = self.pool.get('res.groups')
Rules = self.pool.get('ir.rule')
Rights = self.pool.get('ir.model.access')
share_group = Groups.browse(cr, UID_ROOT, share_group_id)
share_rule_ids = [r.id for r in share_group.rule_groups]
for target_group in wizard_data.group_ids:
# Link the rules to the group. This is appropriate because as of
# v6.1, the algorithm for combining them will OR the rules, hence
# extending the visible data.
Rules.write(cr, UID_ROOT, share_rule_ids, {'groups': [(4,target_group.id)]})
_logger.debug("Linked sharing rules from temporary sharing group to group %s", target_group)
# Copy the access rights. This is appropriate too because
# groups have the UNION of all permissions granted by their
# access right lines.
for access_line in share_group.model_access:
Rights.copy(cr, UID_ROOT, access_line.id, default={'group_id': target_group.id})
_logger.debug("Copied access rights from temporary sharing group to group %s", target_group)
# finally, delete it after removing its users
Groups.write(cr, UID_ROOT, [share_group_id], {'users': [(6,0,[])]})
Groups.unlink(cr, UID_ROOT, [share_group_id])
_logger.debug("Deleted temporary sharing group %s", share_group_id)
def _finish_result_lines(self, cr, uid, wizard_data, share_group_id, context=None):
super(share_wizard_portal,self)._finish_result_lines(cr, uid, wizard_data, share_group_id, context=context)
self.copy_share_group_access_and_delete(cr, wizard_data, share_group_id, context=context)
share_wizard_portal()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
GustavoHennig/ansible | hacking/tests/gen_distribution_version_testcase.py | 58 | 1877 | #!/usr/bin/env python
"""
This script generated test_cases for test_distribution_version.py.
To do so it outputs the relevant files from /etc/*release, the output of platform.dist() and the current ansible_facts regarding the distribution version.
This assumes a working ansible version in the path.
"""
import platform
import os.path
import subprocess
import json
import sys
filelist = [
'/etc/oracle-release',
'/etc/slackware-version',
'/etc/redhat-release',
'/etc/vmware-release',
'/etc/openwrt_release',
'/etc/system-release',
'/etc/alpine-release',
'/etc/release',
'/etc/arch-release',
'/etc/os-release',
'/etc/SuSE-release',
'/etc/gentoo-release',
'/etc/os-release',
'/etc/lsb-release',
'/etc/altlinux-release',
'/etc/os-release',
'/etc/coreos/update.conf',
]
fcont = {}
for f in filelist:
if os.path.exists(f):
s = os.path.getsize(f)
if s > 0 and s < 10000:
with open(f) as fh:
fcont[f] = fh.read()
dist = platform.dist()
facts = ['distribution', 'distribution_version', 'distribution_release', 'distribution_major_version', 'os_family']
try:
ansible_out = subprocess.check_output(
['ansible', 'localhost', '-m', 'setup'])
except subprocess.CalledProcessError as e:
print("ERROR: ansible run failed, output was: \n")
print(e.output)
sys.exit(e.returncode)
parsed = json.loads(ansible_out[ansible_out.index('{'):])
ansible_facts = {}
for fact in facts:
try:
ansible_facts[fact] = parsed['ansible_facts']['ansible_'+fact]
except:
ansible_facts[fact] = "N/A"
nicename = ansible_facts['distribution'] + ' ' + ansible_facts['distribution_version']
output = {
'name': nicename,
'input': fcont,
'platform.dist': dist,
'result': ansible_facts,
}
print(json.dumps(output, indent=4))
| gpl-3.0 |
barnsnake351/neutron | neutron/tests/tempest/common/generator/valid_generator.py | 34 | 2931 | # Copyright 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
import neutron.tests.tempest.common.generator.base_generator as base
LOG = logging.getLogger(__name__)
class ValidTestGenerator(base.BasicGeneratorSet):
@base.generator_type("string")
@base.simple_generator
def generate_valid_string(self, schema):
size = schema.get("minLength", 1)
# TODO(dkr mko): handle format and pattern
return "x" * size
@base.generator_type("integer")
@base.simple_generator
def generate_valid_integer(self, schema):
# TODO(dkr mko): handle multipleOf
if "minimum" in schema:
minimum = schema["minimum"]
if "exclusiveMinimum" not in schema:
return minimum
else:
return minimum + 1
if "maximum" in schema:
maximum = schema["maximum"]
if "exclusiveMaximum" not in schema:
return maximum
else:
return maximum - 1
return 0
@base.generator_type("object")
@base.simple_generator
def generate_valid_object(self, schema):
obj = {}
for k, v in six.iteritems(schema["properties"]):
obj[k] = self.generate_valid(v)
return obj
def generate(self, schema):
schema_type = schema["type"]
if isinstance(schema_type, list):
if "integer" in schema_type:
schema_type = "integer"
else:
raise Exception("non-integer list types not supported")
result = []
if schema_type not in self.types_dict:
raise TypeError("generator (%s) doesn't support type: %s"
% (self.__class__.__name__, schema_type))
for generator in self.types_dict[schema_type]:
ret = generator(schema)
if ret is not None:
if isinstance(ret, list):
result.extend(ret)
elif isinstance(ret, tuple):
result.append(ret)
else:
raise Exception("generator (%s) returns invalid result: %s"
% (generator, ret))
return result
def generate_valid(self, schema):
return self.generate(schema)[0][1]
| apache-2.0 |
ufal/lindat-kontext | lib/conclib.py | 2 | 20954 | # Copyright (c) 2003-2014 Pavel Rychly, Vojtech Kovar, Milos Jakubicek, Milos Husak, Vit Baisa
# Copyright (c) 2014 Institute of the Czech National Corpus
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
import os
import sys
import time
import logging
try:
import cPickle as pickle
except ImportError:
import pickle
import manatee
import settings
from translation import ugettext as _
from pyconc import PyConc
from kwiclib import tokens2strclass
from l10n import import_string
import plugins
from concworker import GeneralWorker
import corplib
TASK_TIME_LIMIT = settings.get_int('calc_backend', 'task_time_limit', 300)
class ConcCalculationControlException(Exception):
pass
def _wait_for_conc(cache_map, q, subchash, minsize):
"""
Called by webserver process (i.e. not by the background worker).
Waits in a loop until a minimal acceptable cached concordance occurs
(i.e. in general this does not wait for the complete concordance -
the fact depends on the 'minisize' parameter; if -1 then only whole conc. is
accepted).
arguments:
cache_map -- a CacheMapping instance
q -- a query list
subchash -- a hash of a subcorpus (if any)
minsize -- what intermediate concordance size we will wait for (-1 => whole conc.)
"""
hard_limit = 70 # num iterations to wait for at least something
i = 1
while _min_conc_unfinished(cache_map, q, subchash, minsize) and i < hard_limit:
time.sleep(i * 0.1)
i += 1
if not os.path.isfile(cache_map.cache_file_path(subchash, q)) and i >= hard_limit:
raise ConcCalculationControlException('Hard limit for intermediate concordance exceeded.')
def _min_conc_unfinished(cache_map, q, subchash, minsize):
"""
Return True if a specific concordance calculation
has not reached a minimal viewable size yet.
Otherwise it returns False (= we can show a partial
result).
In case the calculation finished due to an error
the function throws a ConcCalculationControlException.
"""
status = cache_map.get_calc_status(subchash, q)
if status is None:
cache_map.del_full_entry(subchash, q)
raise ConcCalculationControlException(
'Missing status information (invalid cache entry has been removed).')
status.test_error()
return not status.has_some_result(minsize=minsize)
def _contains_shuffle_seq(q_ops):
"""
Tests whether the provided query sequence contains a subsequence
of 'shuffle' operation (e.g. on ['foo', 'bar', 'f', 'f', 'something'] returns True)
"""
prev_shuffle = False
for item in q_ops:
if item == 'f':
if prev_shuffle:
return True
else:
prev_shuffle = True
else:
prev_shuffle = False
return False
def _cancel_async_task(cache_map, subchash, q):
cachefile = cache_map.cache_file_path(subchash, q)
status = cache_map.get_calc_status(subchash, q)
backend = settings.get('calc_backend', 'type')
if backend == 'multiprocessing':
logging.getLogger(__name__).warning('Unable to cancel async task in multiprocessing mode')
elif backend in ('celery', 'konserver') and status:
import bgcalc
try:
if status.task_id:
app = bgcalc.calc_backend_client(settings)
app.control.revoke(status.task_id, terminate=True, signal='SIGKILL')
except IOError:
pass
cache_map.del_entry(subchash, q)
_del_silent(cachefile)
def _del_silent(path):
try:
os.remove(path)
except (OSError, TypeError):
pass
def _get_cached_conc(corp, subchash, q, minsize):
"""
Loads a concordance from cache. The function
tries to find at least a sublist of 'q' (starting
from zero) to avoid full concordance search if
possible.
arguments:
corp -- a respective manatee.Corpus object
subchash -- a subcorpus hash (generated by PyConc)
q -- a query representation list
minsize -- a minimum concordance size to return immediately (synchronously)
returns:
a 2-tuple [an index within 'q' where to start with non-cached results], [a concordance instance]
"""
start_time = time.time()
q = tuple(q)
cache_map = plugins.runtime.CONC_CACHE.instance.get_mapping(corp)
cache_map.refresh_map()
calc_status = cache_map.get_calc_status(subchash, q)
if calc_status:
if calc_status.error is None:
corp_mtime = corplib.corp_mtime(corp)
if calc_status.created - corp_mtime < 0:
logging.getLogger(__name__).warning(
'Removed outdated cache file (older than corpus indices)')
cache_map.del_full_entry(subchash, q)
else:
logging.getLogger(__name__).warning(
'Removed failed calculation cache record (error: {0}'.format(calc_status.error))
cache_map.del_full_entry(subchash, q)
if _contains_shuffle_seq(q):
srch_from = 1
else:
srch_from = len(q)
ans = (0, None)
# try to find the most complete cached operation
# (e.g. query + filter + sample)
for i in range(srch_from, 0, -1):
cachefile = cache_map.cache_file_path(subchash, q[:i])
if cachefile:
try:
_wait_for_conc(cache_map=cache_map, subchash=subchash, q=q[:i], minsize=minsize)
except ConcCalculationControlException as ex:
_cancel_async_task(cache_map, subchash, q[:i])
logging.getLogger(__name__).warning(
'Removed broken concordance cache record. Original error: %s' % (ex,))
continue
conccorp = corp
for qq in reversed(q[:i]): # find the right main corp, if aligned
if qq.startswith('x-'):
conccorp = manatee.Corpus(qq[2:])
break
conc = None
try:
if not _min_conc_unfinished(cache_map=cache_map, subchash=subchash, q=q[:i], minsize=minsize):
conc = PyConc(conccorp, 'l', cachefile, orig_corp=corp)
except (ConcCalculationControlException, manatee.FileAccessError) as ex:
logging.getLogger(__name__).error(
'Failed to join unfinished calculation: {0}'.format(ex))
_cancel_async_task(cache_map, subchash, q[:i])
continue
ans = (i, conc)
break
logging.getLogger(__name__).debug('get_cached_conc(%s, [%s]) -> %s, %01.4f'
% (corp.corpname, ','.join(q), 'hit' if ans[1] else 'miss',
time.time() - start_time))
return ans
def _get_async_conc(corp, user_id, q, save, subchash, samplesize, fullsize, minsize):
"""
Note: 'save' argument is present because of bonito-open-3.45.11 compatibility but it is
currently not used ----- TODO remove it
"""
backend = settings.get('calc_backend', 'type')
if backend == 'multiprocessing':
from concworker import mp
mp.create_task(user_id, corp, subchash, q, samplesize).start()
elif backend in ('celery', 'konserver'):
import bgcalc
app = bgcalc.calc_backend_client(settings)
ans = app.send_task('worker.conc_register', (user_id, corp.corpname, getattr(corp, 'subcname', None),
subchash, q, samplesize, TASK_TIME_LIMIT), time_limit=10) # register should be fast
ans.get() # = wait for task registration
else:
raise ValueError('Unknown concordance calculation backend: %s' % (backend,))
cache_map = plugins.runtime.CONC_CACHE.instance.get_mapping(corp)
try:
_wait_for_conc(cache_map=cache_map, subchash=subchash, q=q, minsize=minsize)
except Exception as e:
_cancel_async_task(cache_map, subchash, q)
raise e
return PyConc(corp, 'l', cache_map.cache_file_path(subchash, q))
def _get_sync_conc(worker, corp, q, save, subchash, samplesize):
status = worker.create_new_calc_status()
conc = worker.compute_conc(corp, q, samplesize)
conc.sync() # wait for the computation to finish
status.finished = True
status.concsize = conc.size()
if save:
cache_map = plugins.runtime.CONC_CACHE.instance.get_mapping(corp)
cachefile, stored_pidfile = cache_map.add_to_map(
subchash, q[:1], conc.size(), calc_status=status)
conc.save(cachefile)
# update size in map file
cache_map.add_to_map(subchash, q[:1], conc.size())
return conc
def get_conc(corp, user_id, minsize=None, q=None, fromp=0, pagesize=0, async=0, save=0, samplesize=0):
"""
corp -- a respective manatee.Corpus object
user_id -- database user ID
minsize -- a min size of concordance to return immediately (even if it is not finished yet)
(-1 => whole concordance)
q -- a tuple/list containing an extended query representation
(e.g. ['aword,[] within <doc id="foo" />', 'p0 ...'])
fromp -- a page offset
pagesize -- a page size (in lines, related to 'fromp')
async -- if 1 then KonText spawns an asynchronous process to calculate the concordance
and will provide results as they are ready
save -- specifies whether to use a caching mechanism
samplesize -- ?
"""
if not q:
return None
q = tuple(q)
if not minsize:
if len(q) > 1: # subsequent concordance processing by its methods
# needs whole concordance
minsize = -1
else:
minsize = fromp * pagesize
subchash = getattr(corp, 'subchash', None)
conc = None
fullsize = -1
# try to locate concordance in cache
if save:
calc_from, conc = _get_cached_conc(corp, subchash, q, minsize)
if calc_from == len(q):
save = 0
if not conc and q[0][0] == 'R': # online sample
q_copy = list(q)
q_copy[0] = q[0][1:]
q_copy = tuple(q_copy)
t, c = _get_cached_conc(corp, subchash, q_copy, -1)
if c:
fullsize = c.fullsize()
else:
calc_from = 1
async = 0
worker = GeneralWorker()
# cache miss or not used
if not conc:
calc_from = 1
if async and len(q) == 1: # asynchronous processing
conc = _get_async_conc(corp=corp, user_id=user_id, q=q, save=save, subchash=subchash,
samplesize=samplesize, fullsize=fullsize, minsize=minsize)
else:
conc = _get_sync_conc(worker=worker, corp=corp, q=q, save=save, subchash=subchash,
samplesize=samplesize)
# save additional concordance actions to cache (e.g. sample)
for act in range(calc_from, len(q)):
command, args = q[act][0], q[act][1:]
conc.exec_command(command, args)
if command in 'gae': # user specific/volatile actions, cannot save
save = 0
if save:
cache_map = plugins.runtime.CONC_CACHE.instance.get_mapping(corp)
cachefile, stored_status = cache_map.add_to_map(subchash, q[:act + 1], conc.size(),
calc_status=worker.create_new_calc_status())
if stored_status and not stored_status.finished:
_wait_for_conc(cache_map=cache_map, subchash=subchash, q=q[:act + 1], minsize=-1)
elif not stored_status:
conc.save(cachefile)
cache_map.update_calc_status(
subchash, q[:act + 1], dict(finished=True, concsize=conc.size()))
return conc
def conc_is_sorted(q):
ans = True
for item in q:
if item[0] in ('r', 'f'):
ans = False
elif item[0] in ('s', ):
ans = True
return ans
def get_conc_desc(corpus, q=None, subchash=None, translate=True, skip_internals=True):
"""
arguments:
corpus -- an extended version (corpname attribute must be present) of
manatee.Corpus object as provided by corplib.CorpusManager.get_Corpus
q -- tuple/list of query elements
subchash -- hashed subcorpus name as provided by corplib.CorpusManager.get_Corpus
translate -- if True then all the messages are translated according to the current
thread's locale information
"""
cache_map = plugins.runtime.CONC_CACHE.instance.get_mapping(corpus)
q = tuple(q)
def get_size(pos):
return cache_map.get_stored_size(subchash, q[:pos + 1])
def is_aligned_op(query_items, pos):
return (query_items[pos].startswith('x-') and query_items[pos + 1] == 'p0 0 1 []' and
query_items[pos + 2].startswith('x-'))
def detect_internal_op(qx, pos):
if pos > len(qx) - 3 or not skip_internals:
return False, get_size(pos)
align_end = 0
for j in range(pos, len(qx) - 2, 3):
if is_aligned_op(qx, j):
align_end = j + 2
return align_end > 0, get_size(align_end)
if q is None:
q = []
def _t(s): return _(s) if translate else lambda s: s
desctext = {'q': _t('Query'),
'a': _t('Query'),
'r': _t('Random sample'),
's': _t('Sort'),
'f': _t('Shuffle'),
'D': _('Remove nested matches'),
'F': _('First hits in documents'),
'n': _t('Negative filter'),
'N': _t('Negative filter (excluding KWIC)'),
'p': _t('Positive filter'),
'P': _t('Positive filter (excluding KWIC)'),
'x': _t('Switch KWIC'),
}
desc = []
i = 0
while i < len(q):
is_align_op, size = detect_internal_op(q, i)
# in case of aligned corpus (= 3 operations) we update previous
# user operation and ignore the rest (as it is just an internal operation
# a common user does not understand).
if is_align_op:
last_user_op_idx = i - 1
while is_align_op:
if last_user_op_idx >= 0:
tmp = desc[last_user_op_idx]
desc[last_user_op_idx] = tmp[:4] + (size,) + tmp[-1:]
i += 3 # ignore aligned corpus operation, i is now the next valid operation
is_align_op, size = detect_internal_op(q, i)
if i > len(q) - 1:
break
size = get_size(i)
opid = q[i][0]
args = q[i][1:]
url1 = [('q', qi) for qi in q[:i]]
url2 = [('q', qi) for qi in q[:i + 1]]
op = desctext.get(opid)
if opid == 's' and args[0] != '*' and i > 0:
sortopt = {'-1<0': 'left context',
'0<0~': 'node',
'1>0~': 'right context'}
sortattrs = args.split()
if len(sortattrs) > 2:
op = 'Multilevel Sort'
args = '%s in %s' % (sortattrs[0].split('/')[0],
sortopt.get(sortattrs[1][:4], sortattrs[1]))
url1.append(('skey', {'-1': 'lc', '0<': 'kw', '1>': 'rc'}.get(sortattrs[1][:2], '')))
elif opid == 'f':
size = ''
args = _('enabled')
elif opid == 'X': # aligned corpora changes (<= orig_size) total size
desc[-1] = desc[-1][:4] + (size,) + desc[-1][5:]
if op:
desc.append((op, args, url1, url2, size, opid))
i += 1
return desc
def get_full_ref(corp, pos):
corpus_encoding = corp.get_conf('ENCODING')
data = {}
refs = [(n == '#' and ('#', str(pos)) or
(n, corp.get_attr(n).pos2str(pos)))
for n in corp.get_conf('FULLREF').split(',') if n != settings.get('corpora', 'speech_segment_struct_attr')]
data['Refs'] = [{'name': n == '#' and _('Token number') or corp.get_conf(n + '.LABEL') or n,
'val': import_string(v, corpus_encoding)} for n, v in refs]
for n, v in refs:
data[n.replace('.', '_')] = import_string(v, corpus_encoding)
return data
def get_detail_context(corp, pos, hitlen=1, detail_left_ctx=40, detail_right_ctx=40,
attrs=None, structs='', detail_ctx_incr=60):
data = {}
corpus_encoding = corp.get_conf('ENCODING')
wrapdetail = corp.get_conf('WRAPDETAIL')
if wrapdetail:
data['wrapdetail'] = '<%s>' % wrapdetail
if not wrapdetail in structs.split(','):
data['deletewrap'] = True
structs = wrapdetail + ',' + structs
else:
data['wrapdetail'] = ''
try:
maxdetail = int(corp.get_conf('MAXDETAIL'))
if maxdetail == 0:
maxdetail = int(corp.get_conf('MAXCONTEXT'))
if maxdetail == 0:
maxdetail = sys.maxint
except:
maxdetail = 0
if maxdetail:
if detail_left_ctx > maxdetail:
detail_left_ctx = maxdetail
if detail_right_ctx > maxdetail:
detail_right_ctx = maxdetail
if detail_left_ctx > pos:
detail_left_ctx = pos
query_attrs = 'word' if attrs is None else ','.join(attrs)
cr = manatee.CorpRegion(corp, query_attrs, structs)
region_left = tokens2strclass(cr.region(pos - detail_left_ctx, pos))
region_kwic = tokens2strclass(cr.region(pos, pos + hitlen))
region_right = tokens2strclass(cr.region(pos + hitlen,
pos + hitlen + detail_right_ctx))
for seg in region_left + region_kwic + region_right:
seg['str'] = import_string(seg['str'].replace(
'===NONE===', ''), from_encoding=corpus_encoding)
for seg in region_kwic:
if not seg['class']:
seg['class'] = 'coll'
data['content'] = region_left + region_kwic + region_right
refbase = [('pos', pos)]
if hitlen != 1:
refbase.append(('hitlen', hitlen))
data['expand_left_args'] = dict(refbase + [('detail_left_ctx', detail_left_ctx + detail_ctx_incr),
('detail_right_ctx', detail_right_ctx)])
data['expand_right_args'] = dict(refbase + [('detail_left_ctx', detail_left_ctx),
('detail_right_ctx', detail_right_ctx + detail_ctx_incr)])
data['righttoleft'] = corp.get_conf('RIGHTTOLEFT')
data['pos'] = pos
data['maxdetail'] = maxdetail
return data
def fcs_scan(corpname, scan_query, max_ter, start):
"""
aux function for federated content search: operation=scan
"""
if not scan_query:
raise Exception(7, 'scan_query', 'Mandatory parameter not supplied')
query = scan_query.replace('+', ' ') # convert URL spaces
exact_match = False
if 'exact' in query.lower() and not '=' in query: # lemma ExacT "dog"
pos = query.lower().index('exact') # first occurence of EXACT
query = query[:pos] + '=' + query[pos + 5:] # 1st exact > =
exact_match = True
corp = manatee.Corpus(corpname)
attrs = corp.get_conf('ATTRLIST').split(',') # list of available attrs
try:
if '=' in query:
attr, value = query.split('=')
attr = attr.strip()
value = value.strip()
else: # must be in format attr = value
raise Exception
if '"' in attr:
raise Exception
if '"' in value:
if value[0] == '"' and value[-1] == '"':
value = value[1:-1].strip()
else:
raise Exception
except Exception:
raise Exception(10, scan_query, 'Query syntax error')
if not attr in attrs:
raise Exception(16, attr, 'Unsupported index')
import corplib
if exact_match:
wlpattern = '^' + value + '$'
else:
wlpattern = '.*' + value + '.*'
wl = corplib.wordlist(corp, wlattr=attr, wlpat=wlpattern, wlsort='f')
return [(d['str'], d['freq']) for d in wl][start:][:max_ter]
def sort_line_groups(conc, group_ids):
ids = manatee.IntVector()
strs = manatee.StrVector()
for g in group_ids:
ids.append(g)
strs.append('%05d' % g)
conc.linegroup_sort(ids, strs)
| gpl-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Pygments-2.0.2/pygments/styles/colorful.py | 135 | 2778 | # -*- coding: utf-8 -*-
"""
pygments.styles.colorful
~~~~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by CodeRay.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class ColorfulStyle(Style):
"""
A colorful style, inspired by CodeRay.
"""
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "#888",
Comment.Preproc: "#579",
Comment.Special: "bold #cc0000",
Keyword: "bold #080",
Keyword.Pseudo: "#038",
Keyword.Type: "#339",
Operator: "#333",
Operator.Word: "bold #000",
Name.Builtin: "#007020",
Name.Function: "bold #06B",
Name.Class: "bold #B06",
Name.Namespace: "bold #0e84b5",
Name.Exception: "bold #F00",
Name.Variable: "#963",
Name.Variable.Instance: "#33B",
Name.Variable.Class: "#369",
Name.Variable.Global: "bold #d70",
Name.Constant: "bold #036",
Name.Label: "bold #970",
Name.Entity: "bold #800",
Name.Attribute: "#00C",
Name.Tag: "#070",
Name.Decorator: "bold #555",
String: "bg:#fff0f0",
String.Char: "#04D bg:",
String.Doc: "#D42 bg:",
String.Interpol: "bg:#eee",
String.Escape: "bold #666",
String.Regex: "bg:#fff0ff #000",
String.Symbol: "#A60 bg:",
String.Other: "#D20",
Number: "bold #60E",
Number.Integer: "bold #00D",
Number.Float: "bold #60E",
Number.Hex: "bold #058",
Number.Oct: "bold #40E",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "#F00 bg:#FAA"
}
| mit |
pyspeckit/pyspeckit | pyspeckit/cubes/mapplot.py | 4 | 16759 | """
MapPlot
-------
Make plots of the cube and interactively connect them to spectrum plotting.
This is really an interactive component of the package; nothing in here is
meant for publication-quality plots, but more for user interactive analysis.
That said, the plotter makes use of `APLpy <https://github.com/aplpy/aplpy>`_,
so it is possible to make publication-quality plots.
:author: Adam Ginsburg
:date: 03/17/2011
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function
import matplotlib
import matplotlib.figure
import numpy as np
import copy
import itertools
import six
try:
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
pywcsOK = True
except ImportError:
try:
import pyfits
import pywcs
pywcsOK = True
except ImportError:
pywcsOK = False
try:
import aplpy
icanhasaplpy = True
except: # aplpy fails with generic exceptions instead of ImportError
icanhasaplpy = False
from . import cubes
class MapPlotter(object):
"""
Class to plot a spectrum
See `mapplot` for use documentation; this docstring is only for
initialization.
"""
def __init__(self, Cube=None, figure=None, doplot=False, **kwargs):
"""
Create a map figure for future plotting
"""
import matplotlib.pyplot
self._pyplot = matplotlib.pyplot
# figure out where to put the plot
if isinstance(figure,matplotlib.figure.Figure):
self.figure = figure
elif type(figure) is int:
self.figure = self._pyplot.figure(figure)
else:
self.figure = None
self.axis = None
self.FITSFigure = None
self._click_marks = []
self._circles = []
self._clickX = None
self._clickY = None
self.overplot_colorcycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y'])
self.overplot_linestyle = '-'
self.Cube = Cube
if self.Cube is not None:
self.header = cubes.flatten_header(self.Cube.header, delete=True)
if pywcsOK:
self.wcs = pywcs.WCS(self.header)
if doplot: self.mapplot(**kwargs)
def __call__(self, **kwargs):
""" see mapplot """
return self.mapplot(**kwargs)
def mapplot(self, convention='calabretta', colorbar=True, useaplpy=True,
vmin=None, vmax=None, cmap=None, plotkwargs={}, **kwargs):
"""
Plot up a map based on an input data cube.
The map to be plotted is selected using `makeplane`.
The `estimator` keyword argument is passed to that function.
The plotted map, once shown, is interactive. You can click on it with any
of the three mouse buttons.
Button 1 or keyboard '1':
Plot the selected pixel's spectrum in another window. Mark the
clicked pixel with an 'x'
Button 2 or keyboard 'o':
Overplot a second (or third, fourth, fifth...) spectrum in the
external plot window
Button 3:
Disconnect the interactive viewer
You can also click-and-drag with button 1 to average over a circular
region. This same effect can be achieved by using the 'c' key to
set the /c/enter of a circle and the 'r' key to set its /r/adius (i.e.,
hover over the center and press 'c', then hover some distance away and
press 'r').
Parameters
----------
convention : 'calabretta' or 'griesen'
The default projection to assume for Galactic data when plotting
with aplpy.
colorbar : bool
Whether to show a colorbar
plotkwargs : dict, optional
A dictionary of keyword arguments to pass to aplpy.show_colorscale
or matplotlib.pyplot.imshow
useaplpy : bool
Use aplpy if a FITS header is available
vmin, vmax: float or None
Override values for the vmin/vmax values. Will be automatically
determined if left as None
.. todo:
Allow mapplot in subfigure
"""
if (self.figure is None):
self.figure = self._pyplot.figure()
elif (not self._pyplot.fignum_exists(self.figure.number)):
self.figure = self._pyplot.figure()
else:
self._disconnect()
self.figure.clf()
# this is where the map is created; everything below this is just plotting
self.makeplane(**kwargs)
# have tot pop out estimator so that kwargs can be passed to imshow
if 'estimator' in kwargs:
kwargs.pop('estimator')
# Below here is all plotting stuff
if vmin is None: vmin = self.plane[self.plane==self.plane].min()
if vmax is None: vmax = self.plane[self.plane==self.plane].max()
if icanhasaplpy and useaplpy:
self.fitsfile = pyfits.PrimaryHDU(data=self.plane,header=self.header)
self.FITSFigure = aplpy.FITSFigure(self.fitsfile,figure=self.figure,convention=convention)
self.FITSFigure.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
if hasattr(self.FITSFigure, '_ax1'):
self.axis = self.FITSFigure._ax1
else:
self.axis = self.FITSFigure.ax
if colorbar:
try:
self.FITSFigure.add_colorbar()
except Exception as ex:
print("ERROR: Could not create colorbar! Error was %s" % str(ex))
self._origin = 0 # FITS convention
# TODO: set _origin to 1 if using PIXEL units, not real wcs
else:
self.axis = self.figure.add_subplot(111)
if hasattr(self,'colorbar') and self.colorbar is not None:
if self.colorbar.ax in self.axis.figure.axes:
self.axis.figure.delaxes(self.colorbar.ax)
self.axis.imshow(self.plane, vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
if colorbar:
try:
self.colorbar = self._pyplot.colorbar(self.axis.images[0])
except Exception as ex:
print("ERROR: Could not create colorbar! Error was %s" % str(ex))
self._origin = 0 # normal convention
self.canvas = self.axis.figure.canvas
self._connect()
def _connect(self):
""" Connect click, click up (release click), and key press to events """
self.clickid = self.canvas.callbacks.connect('button_press_event',self.click)
self.clickupid = self.canvas.callbacks.connect('button_release_event',self.plot_spectrum)
self.keyid = self.canvas.callbacks.connect('key_press_event',self.plot_spectrum)
def _disconnect(self):
""" Disconnect click, click up (release click), and key press from events """
if hasattr(self,'canvas'):
self.canvas.mpl_disconnect(self.clickid)
self.canvas.mpl_disconnect(self.clickupid)
self.canvas.mpl_disconnect(self.keyid)
def makeplane(self, estimator=np.nanmean):
"""
Create a "plane" view of the cube, either by slicing or projecting it
or by showing a slice from the best-fit model parameter cube.
Parameters
----------
estimator : [ function | 'max' | 'int' | FITS filename | integer | slice ]
A non-pythonic, non-duck-typed variable. If it's a function, apply that function
along the cube's spectral axis to obtain an estimate (e.g., mean, min, max, etc.).
'max' will do the same thing as passing np.max
'int' will attempt to integrate the image (which is why I didn't duck-type)
(integrate means sum and multiply by dx)
a .fits filename will be read using pyfits (so you can make your own cover figure)
an integer will get the n'th slice in the parcube if it exists
If it's a slice, slice the input data cube along the Z-axis with this slice
"""
# THIS IS A HACK!!! isinstance(a function, function) must be a thing...
FUNCTION = type(np.max)
# estimator is NOT duck-typed
if type(estimator) is FUNCTION:
self.plane = estimator(self.Cube.cube,axis=0)
elif isinstance(estimator, six.string_types):
if estimator == 'max':
self.plane = self.Cube.cube.max(axis=0)
elif estimator == 'int':
dx = np.abs(self.Cube.xarr[1:] - self.Cube.xarr[:-1])
dx = np.concatenate([dx,[dx[-1]]])
self.plane = (self.Cube.cube * dx[:,np.newaxis,np.newaxis]).sum(axis=0)
elif estimator[-5:] == ".fits":
self.plane = pyfits.getdata(estimator)
elif type(estimator) is slice:
self.plane = self.Cube.cube[estimator,:,:]
elif type(estimator) is int:
if hasattr(self.Cube,'parcube'):
self.plane = self.Cube.parcube[estimator,:,:]
if self.plane is None:
raise ValueError("Invalid estimator %s" % (str(estimator)))
if np.sum(np.isfinite(self.plane)) == 0:
raise ValueError("Map is all NaNs or infs. Check your estimator or your input cube.")
def click(self,event):
"""
Record location of downclick
"""
if event.inaxes:
self._clickX = np.round(event.xdata) - self._origin
self._clickY = np.round(event.ydata) - self._origin
def plot_spectrum(self, event, plot_fit=True):
"""
Connects map cube to Spectrum...
"""
self.event = event
if event.inaxes:
clickX = np.round(event.xdata) - self._origin
clickY = np.round(event.ydata) - self._origin
# grab toolbar info so that we don't do anything if a tool is selected
tb = self.canvas.toolbar
if tb.mode != '':
return
elif event.key is not None:
if event.key == 'c':
self._center = (clickX-1,clickY-1)
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
elif event.key == 'r':
x,y = self._center
self._add_circle(x,y,clickX,clickY)
self.circle(x,y,clickX-1,clickY-1)
elif event.key == 'o':
clickX,clickY = round(clickX),round(clickY)
print("OverPlotting spectrum from point %i,%i" % (clickX-1,clickY-1))
color = next(self.overplot_colorcycle)
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.key in ('1','2'):
event.button = int(event.key)
event.key = None
self.plot_spectrum(event)
elif (hasattr(event,'button') and event.button in (1,2)
and not (self._clickX == clickX and self._clickY == clickY)):
if event.button == 1:
self._remove_circle()
clear=True
color = 'k'
linestyle = 'steps-mid'
else:
color = next(self.overplot_colorcycle)
linestyle = self.overplot_linestyle
clear=False
rad = ( (self._clickX-clickX)**2 + (self._clickY-clickY)**2 )**0.5
print("Plotting circle from point %i,%i to %i,%i (r=%f)" % (self._clickX,self._clickY,clickX,clickY,rad))
self._add_circle(self._clickX,self._clickY,clickX,clickY)
self.circle(self._clickX,self._clickY,clickX,clickY,clear=clear,linestyle=linestyle,color=color)
elif hasattr(event,'button') and event.button is not None:
if event.button==1:
clickX,clickY = round(clickX),round(clickY)
print("Plotting spectrum from point %i,%i" % (clickX,clickY))
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
self.Cube.plot_spectrum(clickX,clickY,clear=True)
if plot_fit: self.Cube.plot_fit(clickX, clickY, silent=True)
elif event.button==2:
clickX,clickY = round(clickX),round(clickY)
print("OverPlotting spectrum from point %i,%i" % (clickX,clickY))
color = next(self.overplot_colorcycle)
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX,clickY,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.button==3:
print("Disconnecting GAIA-like tool")
self._disconnect()
else:
print("Call failed for some reason: ")
print("event: ",event)
else:
pass
# never really needed... warn("Click outside of axes")
def _add_click_mark(self,x,y,clear=False,color='k'):
"""
Add an X at some position
"""
if clear:
self._clear_click_marks()
if self.FITSFigure is not None:
label = 'xmark%i' % (len(self._click_marks)+1)
x,y = self.FITSFigure.pixel2world(x,y)
self.FITSFigure.show_markers(x,y,marker='x',c=color,layer=label)
self._click_marks.append( label )
else:
self._click_marks.append( self.axis.plot(x,y,'kx') )
self.refresh()
def _clear_click_marks(self):
"""
Remove all marks added by previous clicks
"""
if self.FITSFigure is not None:
for mark in self._click_marks:
if mark in self.FITSFigure._layers:
self.FITSFigure.remove_layer(mark)
else:
for mark in self._click_marks:
self._click_marks.remove(mark)
if mark in self.axis.lines:
self.axis.lines.remove(mark)
self.refresh()
def _add_circle(self,x,y,x2,y2,**kwargs):
"""
"""
if self.FITSFigure is not None:
x,y = self.FITSFigure.pixel2world(x,y)
x2,y2 = self.FITSFigure.pixel2world(x2,y2)
r = (np.linalg.norm(np.array([x,y])-np.array([x2,y2])))
#self.FITSFigure.show_markers(x,y,s=r,marker='o',facecolor='none',edgecolor='black',layer='circle')
layername = "circle%02i" % len(self._circles)
self.FITSFigure.show_circles(x,y,r,edgecolor='black',facecolor='none',layer=layername,**kwargs)
self._circles.append(layername)
else:
r = np.linalg.norm(np.array([x,y])-np.array([x2,y2]))
circle = matplotlib.patches.Circle([x,y],radius=r,**kwargs)
self._circles.append( circle )
self.axis.patches.append(circle)
self.refresh()
def _remove_circle(self):
"""
"""
if self.FITSFigure is not None:
for layername in self._circles:
if layername in self.FITSFigure._layers:
self.FITSFigure.remove_layer(layername)
else:
for circle in self._circles:
if circle in self.axis.patches:
self.axis.patches.remove(circle)
self._circles.remove(circle)
self.refresh()
def refresh(self):
if self.axis is not None:
self.axis.figure.canvas.draw()
def circle(self,x1,y1,x2,y2,**kwargs):
"""
Plot the spectrum of a circular aperture
"""
r = (np.linalg.norm(np.array([x1,y1])-np.array([x2,y2])))
self.Cube.plot_apspec([x1,y1,r],**kwargs)
#self.Cube.data = cubes.extract_aperture( self.Cube.cube, [x1,y1,r] , coordsys=None )
#self.Cube.plotter()
def copy(self, parent=None):
"""
Create a copy of the map plotter with blank (uninitialized) axis & figure
[ parent ]
A spectroscopic axis instance that is the parent of the specfit
instance. This needs to be specified at some point, but defaults
to None to prevent overwriting a previous plot.
"""
newmapplot = copy.copy(self)
newmapplot.Cube = parent
newmapplot.axis = None
newmapplot.figure = None
return newmapplot
| mit |
laszlocsomor/tensorflow | tensorflow/python/kernel_tests/sparsemask_op_test.py | 133 | 1835 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SparseMaskTest(test.TestCase):
def testBasic(self):
values = np.random.rand(4, 4).astype(np.single)
indices = np.array([0, 2, 3, 4], dtype=np.int32)
mask_indices = np.array([0], dtype=np.int32)
out_values = values[1:, :]
out_indices = np.array([2, 3, 4], dtype=np.int32)
with self.test_session() as sess:
values_tensor = ops.convert_to_tensor(values)
indices_tensor = ops.convert_to_tensor(indices)
mask_indices_tensor = ops.convert_to_tensor(mask_indices)
t = ops.IndexedSlices(values_tensor, indices_tensor)
masked_t = array_ops.sparse_mask(t, mask_indices_tensor)
tf_out_values, tf_out_indices = sess.run(
[masked_t.values, masked_t.indices])
self.assertAllEqual(tf_out_values, out_values)
self.assertAllEqual(tf_out_indices, out_indices)
if __name__ == "__main__":
test.main()
| apache-2.0 |
hsgui/interest-only | deeplearning/reinforcementlearning/keyboardAgents.py | 46 | 3045 | # keyboardAgents.py
# -----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from game import Agent
from game import Directions
import random
class KeyboardAgent(Agent):
"""
An agent controlled by the keyboard.
"""
# NOTE: Arrow keys also work.
WEST_KEY = 'a'
EAST_KEY = 'd'
NORTH_KEY = 'w'
SOUTH_KEY = 's'
STOP_KEY = 'q'
def __init__( self, index = 0 ):
self.lastMove = Directions.STOP
self.index = index
self.keys = []
def getAction( self, state):
from graphicsUtils import keys_waiting
from graphicsUtils import keys_pressed
keys = keys_waiting() + keys_pressed()
if keys != []:
self.keys = keys
legal = state.getLegalActions(self.index)
move = self.getMove(legal)
if move == Directions.STOP:
# Try to move in the same direction as before
if self.lastMove in legal:
move = self.lastMove
if (self.STOP_KEY in self.keys) and Directions.STOP in legal: move = Directions.STOP
if move not in legal:
move = random.choice(legal)
self.lastMove = move
return move
def getMove(self, legal):
move = Directions.STOP
if (self.WEST_KEY in self.keys or 'Left' in self.keys) and Directions.WEST in legal: move = Directions.WEST
if (self.EAST_KEY in self.keys or 'Right' in self.keys) and Directions.EAST in legal: move = Directions.EAST
if (self.NORTH_KEY in self.keys or 'Up' in self.keys) and Directions.NORTH in legal: move = Directions.NORTH
if (self.SOUTH_KEY in self.keys or 'Down' in self.keys) and Directions.SOUTH in legal: move = Directions.SOUTH
return move
class KeyboardAgent2(KeyboardAgent):
"""
A second agent controlled by the keyboard.
"""
# NOTE: Arrow keys also work.
WEST_KEY = 'j'
EAST_KEY = "l"
NORTH_KEY = 'i'
SOUTH_KEY = 'k'
STOP_KEY = 'u'
def getMove(self, legal):
move = Directions.STOP
if (self.WEST_KEY in self.keys) and Directions.WEST in legal: move = Directions.WEST
if (self.EAST_KEY in self.keys) and Directions.EAST in legal: move = Directions.EAST
if (self.NORTH_KEY in self.keys) and Directions.NORTH in legal: move = Directions.NORTH
if (self.SOUTH_KEY in self.keys) and Directions.SOUTH in legal: move = Directions.SOUTH
return move
| gpl-2.0 |
blakfeld/ansible | examples/scripts/yaml_to_ini.py | 175 | 7609 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible import errors
from ansible import utils
import os
import yaml
import sys
class InventoryParserYaml(object):
''' Host inventory parser for ansible '''
def __init__(self, filename=C.DEFAULT_HOST_LIST):
sys.stderr.write("WARNING: YAML inventory files are deprecated in 0.6 and will be removed in 0.7, to migrate" +
" download and run https://github.com/ansible/ansible/blob/devel/examples/scripts/yaml_to_ini.py\n")
fh = open(filename)
data = fh.read()
fh.close()
self._hosts = {}
self._parse(data)
def _make_host(self, hostname):
if hostname in self._hosts:
return self._hosts[hostname]
else:
host = Host(hostname)
self._hosts[hostname] = host
return host
# see file 'test/yaml_hosts' for syntax
def _parse(self, data):
# FIXME: refactor into subfunctions
all = Group('all')
ungrouped = Group('ungrouped')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
grouped_hosts = []
yaml = utils.parse_yaml(data)
# first add all groups
for item in yaml:
if type(item) == dict and 'group' in item:
group = Group(item['group'])
for subresult in item.get('hosts',[]):
if type(subresult) in [ str, unicode ]:
host = self._make_host(subresult)
group.add_host(host)
grouped_hosts.append(host)
elif type(subresult) == dict:
host = self._make_host(subresult['host'])
vars = subresult.get('vars',{})
if type(vars) == list:
for subitem in vars:
for (k,v) in subitem.items():
host.set_variable(k,v)
elif type(vars) == dict:
for (k,v) in subresult.get('vars',{}).items():
host.set_variable(k,v)
else:
raise errors.AnsibleError("unexpected type for variable")
group.add_host(host)
grouped_hosts.append(host)
vars = item.get('vars',{})
if type(vars) == dict:
for (k,v) in item.get('vars',{}).items():
group.set_variable(k,v)
elif type(vars) == list:
for subitem in vars:
if type(subitem) != dict:
raise errors.AnsibleError("expected a dictionary")
for (k,v) in subitem.items():
group.set_variable(k,v)
self.groups[group.name] = group
all.add_child_group(group)
# add host definitions
for item in yaml:
if type(item) in [ str, unicode ]:
host = self._make_host(item)
if host not in grouped_hosts:
ungrouped.add_host(host)
elif type(item) == dict and 'host' in item:
host = self._make_host(item['host'])
vars = item.get('vars', {})
if type(vars)==list:
varlist, vars = vars, {}
for subitem in varlist:
vars.update(subitem)
for (k,v) in vars.items():
host.set_variable(k,v)
groups = item.get('groups', {})
if type(groups) in [ str, unicode ]:
groups = [ groups ]
if type(groups)==list:
for subitem in groups:
if subitem in self.groups:
group = self.groups[subitem]
else:
group = Group(subitem)
self.groups[group.name] = group
all.add_child_group(group)
group.add_host(host)
grouped_hosts.append(host)
if host not in grouped_hosts:
ungrouped.add_host(host)
# make sure ungrouped.hosts is the complement of grouped_hosts
ungrouped_hosts = [host for host in ungrouped.hosts if host not in grouped_hosts]
if __name__ == "__main__":
if len(sys.argv) != 2:
print "usage: yaml_to_ini.py /path/to/ansible/hosts"
sys.exit(1)
result = ""
original = sys.argv[1]
yamlp = InventoryParserYaml(filename=sys.argv[1])
dirname = os.path.dirname(original)
group_names = [ g.name for g in yamlp.groups.values() ]
for group_name in sorted(group_names):
record = yamlp.groups[group_name]
if group_name == 'all':
continue
hosts = record.hosts
result = result + "[%s]\n" % record.name
for h in hosts:
result = result + "%s\n" % h.name
result = result + "\n"
groupfiledir = os.path.join(dirname, "group_vars")
if not os.path.exists(groupfiledir):
print "* creating: %s" % groupfiledir
os.makedirs(groupfiledir)
groupfile = os.path.join(groupfiledir, group_name)
print "* writing group variables for %s into %s" % (group_name, groupfile)
groupfh = open(groupfile, 'w')
groupfh.write(yaml.dump(record.get_variables()))
groupfh.close()
for (host_name, host_record) in yamlp._hosts.iteritems():
hostfiledir = os.path.join(dirname, "host_vars")
if not os.path.exists(hostfiledir):
print "* creating: %s" % hostfiledir
os.makedirs(hostfiledir)
hostfile = os.path.join(hostfiledir, host_record.name)
print "* writing host variables for %s into %s" % (host_record.name, hostfile)
hostfh = open(hostfile, 'w')
hostfh.write(yaml.dump(host_record.get_variables()))
hostfh.close()
# also need to keep a hash of variables per each host
# and variables per each group
# and write those to disk
newfilepath = os.path.join(dirname, "hosts.new")
fdh = open(newfilepath, 'w')
fdh.write(result)
fdh.close()
print "* COMPLETE: review your new inventory file and replace your original when ready"
print "* new inventory file saved as %s" % newfilepath
print "* edit group specific variables in %s/group_vars/" % dirname
print "* edit host specific variables in %s/host_vars/" % dirname
# now need to write this to disk as (oldname).new
# and inform the user
| gpl-3.0 |
isandlaTech/cohorte-devtools | org.cohorte.eclipse.runner.basic/files/jython/Lib/encodings/hex_codec.py | 528 | 2309 | """ Python 'hex_codec' Codec - 2-digit hex content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs, binascii
### Codec APIs
def hex_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.b2a_hex(input)
return (output, len(input))
def hex_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.a2b_hex(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return hex_encode(input,errors)
def decode(self, input,errors='strict'):
return hex_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return binascii.b2a_hex(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return binascii.a2b_hex(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hex',
encode=hex_encode,
decode=hex_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 |
rubyinhell/brython | www/src/Lib/sre_parse.py | 630 | 29657 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
from _sre import MAXREPEAT
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def __iter__(self):
return iter(self.data)
def dump(self, level=0):
nl = 1
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + op, end=' '); nl = 0
if op == "in":
# member sublanguage
print(); nl = 1
for op, a in av:
print((level+1)*" " + op, a)
elif op == "branch":
print(); nl = 1
i = 0
for a in av[1]:
if i > 0:
print(level*" " + "or")
a.dump(level+1); nl = 1
i = i + 1
elif isinstance(av, seqtypes):
for a in av:
if isinstance(a, SubPattern):
if not nl: print()
a.dump(level+1); nl = 1
else:
print(a, end=' ') ; nl = 0
else:
print(av, end=' ') ; nl = 0
if not nl: print()
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxsize
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + int(i) * av[0]
hi = hi + int(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxsize)), int(min(hi, sys.maxsize))
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index:self.index+1]
# Special case for the str8, since indexing returns a integer
# XXX This is only needed for test_bug_926075 in test_re.py
if char and not self.istext:
char = chr(char[0])
if char == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
if not self.istext:
c = chr(c)
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
gid = state.groupdict.get(name)
if gid is None:
raise error("unknown group name")
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error("unknown group name")
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("can't use UNICODE flag with a bytes pattern")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if isinstance(sep, str):
makechar = chr
else:
makechar = chr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group name")
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
if isinstance(source, str):
encode = lambda x: x
else:
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
encode = lambda x: x.encode('latin-1')
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = encode(s)
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)
| bsd-3-clause |
JanDintel/ansible | lib/ansible/template/safe_eval.py | 14 | 4160 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from six.moves import builtins
from ansible import constants as C
from ansible.plugins import filter_loader, test_loader
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if not sys.version.startswith('2.6'):
SAFE_NODES.union(
set(
(ast.Set,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, {}, dict(locals))
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
| gpl-3.0 |
TheWardoctor/Wardoctors-repo | script.module.urlresolver/lib/urlresolver/plugins/vidup_org.py | 6 | 1066 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __generic_resolver__ import GenericResolver
class VidUpResolver(GenericResolver):
name = "vidup.org"
domains = ["vidup.org"]
pattern = '(?://|\.)(vidup\.org)/(?:embed\.php\?file=)?([0-9a-zA-Z]+)'
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, 'http://{host}/embed.php?file={media_id}')
| apache-2.0 |
nuuuboo/odoo | addons/stock_landed_costs/stock_landed_costs.py | 77 | 19591 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.exceptions import Warning
from openerp.tools import float_compare, float_round
from openerp.tools.translate import _
import product
class stock_landed_cost(osv.osv):
_name = 'stock.landed.cost'
_description = 'Stock Landed Cost'
_inherit = 'mail.thread'
_track = {
'state': {
'stock_landed_costs.mt_stock_landed_cost_open': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done',
},
}
def _total_amount(self, cr, uid, ids, name, args, context=None):
result = {}
for cost in self.browse(cr, uid, ids, context=context):
total = 0.0
for line in cost.cost_lines:
total += line.price_unit
result[cost.id] = total
return result
def _get_cost_line(self, cr, uid, ids, context=None):
cost_to_recompute = []
for line in self.pool.get('stock.landed.cost.lines').browse(cr, uid, ids, context=context):
cost_to_recompute.append(line.cost_id.id)
return cost_to_recompute
def get_valuation_lines(self, cr, uid, ids, picking_ids=None, context=None):
picking_obj = self.pool.get('stock.picking')
lines = []
if not picking_ids:
return lines
for picking in picking_obj.browse(cr, uid, picking_ids):
for move in picking.move_lines:
#it doesn't make sense to make a landed cost for a product that isn't set as being valuated in real time at real cost
if move.product_id.valuation != 'real_time' or move.product_id.cost_method != 'real':
continue
total_cost = 0.0
total_qty = move.product_qty
weight = move.product_id and move.product_id.weight * move.product_qty
volume = move.product_id and move.product_id.volume * move.product_qty
for quant in move.quant_ids:
total_cost += quant.cost
vals = dict(product_id=move.product_id.id, move_id=move.id, quantity=move.product_uom_qty, former_cost=total_cost * total_qty, weight=weight, volume=volume)
lines.append(vals)
if not lines:
raise osv.except_osv(_('Error!'), _('The selected picking does not contain any move that would be impacted by landed costs. Landed costs are only possible for products configured in real time valuation with real price costing method. Please make sure it is the case, or you selected the correct picking'))
return lines
_columns = {
'name': fields.char('Name', track_visibility='always', readonly=True, copy=False),
'date': fields.date('Date', required=True, states={'done': [('readonly', True)]}, track_visibility='onchange', copy=False),
'picking_ids': fields.many2many('stock.picking', string='Pickings', states={'done': [('readonly', True)]}, copy=False),
'cost_lines': fields.one2many('stock.landed.cost.lines', 'cost_id', 'Cost Lines', states={'done': [('readonly', True)]}, copy=True),
'valuation_adjustment_lines': fields.one2many('stock.valuation.adjustment.lines', 'cost_id', 'Valuation Adjustments', states={'done': [('readonly', True)]}),
'description': fields.text('Item Description', states={'done': [('readonly', True)]}),
'amount_total': fields.function(_total_amount, type='float', string='Total', digits_compute=dp.get_precision('Account'),
store={
'stock.landed.cost': (lambda self, cr, uid, ids, c={}: ids, ['cost_lines'], 20),
'stock.landed.cost.lines': (_get_cost_line, ['price_unit', 'quantity', 'cost_id'], 20),
}, track_visibility='always'
),
'state': fields.selection([('draft', 'Draft'), ('done', 'Posted'), ('cancel', 'Cancelled')], 'State', readonly=True, track_visibility='onchange', copy=False),
'account_move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, copy=False),
'account_journal_id': fields.many2one('account.journal', 'Account Journal', required=True, states={'done': [('readonly', True)]}),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'stock.landed.cost'),
'state': 'draft',
'date': fields.date.context_today,
}
def _create_accounting_entries(self, cr, uid, line, move_id, qty_out, context=None):
product_obj = self.pool.get('product.template')
cost_product = line.cost_line_id and line.cost_line_id.product_id
if not cost_product:
return False
accounts = product_obj.get_product_accounts(cr, uid, line.product_id.product_tmpl_id.id, context=context)
debit_account_id = accounts['property_stock_valuation_account_id']
already_out_account_id = accounts['stock_account_output']
credit_account_id = line.cost_line_id.account_id.id or cost_product.property_account_expense.id or cost_product.categ_id.property_account_expense_categ.id
if not credit_account_id:
raise osv.except_osv(_('Error!'), _('Please configure Stock Expense Account for product: %s.') % (cost_product.name))
return self._create_account_move_line(cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=context)
def _create_account_move_line(self, cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=None):
"""
Generate the account.move.line values to track the landed cost.
Afterwards, for the goods that are already out of stock, we should create the out moves
"""
aml_obj = self.pool.get('account.move.line')
base_line = {
'name': line.name,
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': line.quantity,
}
debit_line = dict(base_line, account_id=debit_account_id)
credit_line = dict(base_line, account_id=credit_account_id)
diff = line.additional_landed_cost
if diff > 0:
debit_line['debit'] = diff
credit_line['credit'] = diff
else:
# negative cost, reverse the entry
debit_line['credit'] = -diff
credit_line['debit'] = -diff
aml_obj.create(cr, uid, debit_line, context=context)
aml_obj.create(cr, uid, credit_line, context=context)
#Create account move lines for quants already out of stock
if qty_out > 0:
debit_line = dict(debit_line,
name=(line.name + ": " + str(qty_out) + _(' already out')),
quantity=qty_out,
account_id=already_out_account_id)
credit_line = dict(credit_line,
name=(line.name + ": " + str(qty_out) + _(' already out')),
quantity=qty_out,
account_id=debit_account_id)
diff = diff * qty_out / line.quantity
if diff > 0:
debit_line['debit'] = diff
credit_line['credit'] = diff
else:
# negative cost, reverse the entry
debit_line['credit'] = -diff
credit_line['debit'] = -diff
aml_obj.create(cr, uid, debit_line, context=context)
aml_obj.create(cr, uid, credit_line, context=context)
return True
def _create_account_move(self, cr, uid, cost, context=None):
vals = {
'journal_id': cost.account_journal_id.id,
'period_id': self.pool.get('account.period').find(cr, uid, cost.date, context=context)[0],
'date': cost.date,
'ref': cost.name
}
return self.pool.get('account.move').create(cr, uid, vals, context=context)
def _check_sum(self, cr, uid, landed_cost, context=None):
"""
Will check if each cost line its valuation lines sum to the correct amount
and if the overall total amount is correct also
"""
costcor = {}
tot = 0
for valuation_line in landed_cost.valuation_adjustment_lines:
if costcor.get(valuation_line.cost_line_id):
costcor[valuation_line.cost_line_id] += valuation_line.additional_landed_cost
else:
costcor[valuation_line.cost_line_id] = valuation_line.additional_landed_cost
tot += valuation_line.additional_landed_cost
prec = self.pool['decimal.precision'].precision_get(cr, uid, 'Account')
# float_compare returns 0 for equal amounts
res = not bool(float_compare(tot, landed_cost.amount_total, precision_digits=prec))
for costl in costcor.keys():
if float_compare(costcor[costl], costl.price_unit, precision_digits=prec):
res = False
return res
def button_validate(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for cost in self.browse(cr, uid, ids, context=context):
if cost.state != 'draft':
raise Warning(_('Only draft landed costs can be validated'))
if not cost.valuation_adjustment_lines or not self._check_sum(cr, uid, cost, context=context):
raise osv.except_osv(_('Error!'), _('You cannot validate a landed cost which has no valid valuation lines.'))
move_id = self._create_account_move(cr, uid, cost, context=context)
quant_dict = {}
for line in cost.valuation_adjustment_lines:
if not line.move_id:
continue
per_unit = line.final_cost / line.quantity
diff = per_unit - line.former_cost_per_unit
quants = [quant for quant in line.move_id.quant_ids]
for quant in quants:
if quant.id not in quant_dict:
quant_dict[quant.id] = quant.cost + diff
else:
quant_dict[quant.id] += diff
for key, value in quant_dict.items():
print value
quant_obj.write(cr, uid, key, {'cost': value}, context=context)
qty_out = 0
for quant in line.move_id.quant_ids:
if quant.location_id.usage != 'internal':
qty_out += quant.qty
self._create_accounting_entries(cr, uid, line, move_id, qty_out, context=context)
self.write(cr, uid, cost.id, {'state': 'done', 'account_move_id': move_id}, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
cost = self.browse(cr, uid, ids, context=context)
if cost.state == 'done':
raise Warning(_('Validated landed costs cannot be cancelled, '
'but you could create negative landed costs to reverse them'))
return cost.write({'state': 'cancel'})
def unlink(self, cr, uid, ids, context=None):
# cancel or raise first
self.button_cancel(cr, uid, ids, context)
return super(stock_landed_cost, self).unlink(cr, uid, ids, context=context)
def compute_landed_cost(self, cr, uid, ids, context=None):
line_obj = self.pool.get('stock.valuation.adjustment.lines')
unlink_ids = line_obj.search(cr, uid, [('cost_id', 'in', ids)], context=context)
line_obj.unlink(cr, uid, unlink_ids, context=context)
digits = dp.get_precision('Product Price')(cr)
towrite_dict = {}
for cost in self.browse(cr, uid, ids, context=None):
if not cost.picking_ids:
continue
picking_ids = [p.id for p in cost.picking_ids]
total_qty = 0.0
total_cost = 0.0
total_weight = 0.0
total_volume = 0.0
total_line = 0.0
vals = self.get_valuation_lines(cr, uid, [cost.id], picking_ids=picking_ids, context=context)
for v in vals:
for line in cost.cost_lines:
v.update({'cost_id': cost.id, 'cost_line_id': line.id})
self.pool.get('stock.valuation.adjustment.lines').create(cr, uid, v, context=context)
total_qty += v.get('quantity', 0.0)
total_cost += v.get('former_cost', 0.0)
total_weight += v.get('weight', 0.0)
total_volume += v.get('volume', 0.0)
total_line += 1
for line in cost.cost_lines:
value_split = 0.0
for valuation in cost.valuation_adjustment_lines:
value = 0.0
if valuation.cost_line_id and valuation.cost_line_id.id == line.id:
if line.split_method == 'by_quantity' and total_qty:
per_unit = (line.price_unit / total_qty)
value = valuation.quantity * per_unit
elif line.split_method == 'by_weight' and total_weight:
per_unit = (line.price_unit / total_weight)
value = valuation.weight * per_unit
elif line.split_method == 'by_volume' and total_volume:
per_unit = (line.price_unit / total_volume)
value = valuation.volume * per_unit
elif line.split_method == 'equal':
value = (line.price_unit / total_line)
elif line.split_method == 'by_current_cost_price' and total_cost:
per_unit = (line.price_unit / total_cost)
value = valuation.former_cost * per_unit
else:
value = (line.price_unit / total_line)
if digits:
value = float_round(value, precision_digits=digits[1], rounding_method='UP')
value = min(value, line.price_unit - value_split)
value_split += value
if valuation.id not in towrite_dict:
towrite_dict[valuation.id] = value
else:
towrite_dict[valuation.id] += value
if towrite_dict:
for key, value in towrite_dict.items():
line_obj.write(cr, uid, key, {'additional_landed_cost': value}, context=context)
return True
class stock_landed_cost_lines(osv.osv):
_name = 'stock.landed.cost.lines'
_description = 'Stock Landed Cost Lines'
def onchange_product_id(self, cr, uid, ids, product_id=False, context=None):
result = {}
if not product_id:
return {'value': {'quantity': 0.0, 'price_unit': 0.0}}
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
result['name'] = product.name
result['split_method'] = product.split_method
result['price_unit'] = product.standard_price
result['account_id'] = product.property_account_expense and product.property_account_expense.id or product.categ_id.property_account_expense_categ.id
return {'value': result}
_columns = {
'name': fields.char('Description'),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'price_unit': fields.float('Cost', required=True, digits_compute=dp.get_precision('Product Price')),
'split_method': fields.selection(product.SPLIT_METHOD, string='Split Method', required=True),
'account_id': fields.many2one('account.account', 'Account', domain=[('type', '<>', 'view'), ('type', '<>', 'closed')]),
}
class stock_valuation_adjustment_lines(osv.osv):
_name = 'stock.valuation.adjustment.lines'
_description = 'Stock Valuation Adjustment Lines'
def _amount_final(self, cr, uid, ids, name, args, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
result[line.id] = {
'former_cost_per_unit': 0.0,
'final_cost': 0.0,
}
result[line.id]['former_cost_per_unit'] = (line.former_cost / line.quantity if line.quantity else 1.0)
result[line.id]['final_cost'] = (line.former_cost + line.additional_landed_cost)
return result
def _get_name(self, cr, uid, ids, name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.product_id.code or line.product_id.name or ''
if line.cost_line_id:
res[line.id] += ' - ' + line.cost_line_id.name
return res
_columns = {
'name': fields.function(_get_name, type='char', string='Description', store=True),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'cost_line_id': fields.many2one('stock.landed.cost.lines', 'Cost Line', readonly=True),
'move_id': fields.many2one('stock.move', 'Stock Move', readonly=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'weight': fields.float('Weight', digits_compute=dp.get_precision('Product Unit of Measure')),
'volume': fields.float('Volume', digits_compute=dp.get_precision('Product Unit of Measure')),
'former_cost': fields.float('Former Cost', digits_compute=dp.get_precision('Product Price')),
'former_cost_per_unit': fields.function(_amount_final, multi='cost', string='Former Cost(Per Unit)', type='float', digits_compute=dp.get_precision('Account'), store=True),
'additional_landed_cost': fields.float('Additional Landed Cost', digits_compute=dp.get_precision('Product Price')),
'final_cost': fields.function(_amount_final, multi='cost', string='Final Cost', type='float', digits_compute=dp.get_precision('Account'), store=True),
}
_defaults = {
'quantity': 1.0,
'weight': 1.0,
'volume': 1.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
haad/ansible | lib/ansible/modules/network/ordnance/ordnance_facts.py | 43 | 8505 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ordnance_facts
version_added: "2.3"
author: "Alexander Turner (alex.turner@ordnance.io)"
short_description: Collect facts from Ordnance Virtual Routers over SSH
description:
- Collects a base set of device facts from an Ordnance Virtual
router over SSH. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
---
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: RouterName
password: ordnance
transport: cli
---
# Collect all facts from the device
- ordnance_facts:
gather_subset: all
provider: "{{ cli }}"
# Collect only the config and default facts
- ordnance_facts:
gather_subset:
- config
provider: "{{ cli }}"
# Do not collect hardware facts
- ordnance_facts:
gather_subset:
- "!hardware"
provider: "{{ cli }}"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the virtual router
returned: always
type: list
# config
ansible_net_config:
description: The current active config from the virtual router
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the virtual router
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the virtual router
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the virtual router
returned: when interfaces is configured
type: dict
"""
import re
import traceback
from ansible.module_utils.network.common.network import NetworkModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import zip
from ansible.module_utils._text import to_native
class FactsBase(object):
def __init__(self, module):
self.module = module
self.facts = dict()
self.failed_commands = list()
def run(self, cmd):
try:
return self.module.cli(cmd)[0]
except:
self.failed_commands.append(cmd)
class Config(FactsBase):
def populate(self):
data = self.run('show running-config')
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
def populate(self):
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.run('show interfaces')
if data:
interfaces = self.parse_interfaces(data)
self.facts['interfaces'] = self.populate_interfaces(interfaces)
data = self.run('show ipv6 interface')
if data:
data = self.parse_interfaces(data)
self.populate_ipv6_interfaces(data)
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
ipv4 = self.parse_ipv4(value)
intf['ipv4'] = self.parse_ipv4(value)
if ipv4:
self.add_ip_address(ipv4['address'], 'ipv4')
intf['duplex'] = self.parse_duplex(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(value)
facts[key] = intf
return facts
def populate_ipv6_interfaces(self, data):
for key, value in iteritems(data):
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'\s+(.+), subnet', value, re.M)
subnets = re.findall(r', subnet is (.+)$', value, re.M)
for addr, subnet in zip(addresses, subnets):
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_interfaces(self, data):
parsed = dict()
key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
parsed[key] += '\n%s' % line
else:
match = re.match(r'^(\S+)', line)
if match:
key = match.group(1)
parsed[key] = line
return parsed
def parse_description(self, data):
match = re.search(r'Description: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_macaddress(self, data):
match = re.search(r'address is (\S+)', data)
if match:
return match.group(1)
def parse_ipv4(self, data):
match = re.search(r'Internet address is (\S+)', data)
if match:
addr, masklen = match.group(1).split('/')
return dict(address=addr, masklen=int(masklen))
def parse_duplex(self, data):
match = re.search(r'(\w+) Duplex', data, re.M)
if match:
return match.group(1)
def parse_operstatus(self, data):
match = re.search(r'^(?:.+) is (.+),', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
failed_commands = list()
try:
for inst in instances:
inst.populate()
failed_commands.extend(inst.failed_commands)
facts.update(inst.facts)
except Exception as exc:
module.fail_json(msg=to_native(exc), exception=traceback.format_exc())
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, failed_commands=failed_commands)
if __name__ == '__main__':
main()
| gpl-3.0 |
dakrauth/picker | picker/migrations/0003_auto_20180801_0800.py | 1 | 5687 | # Generated by Django 2.0.7 on 2018-08-01 12:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import picker.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('picker', '0002_auto_20160720_0917'),
]
operations = [
migrations.CreateModel(
name='PickerFavorite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='PickerGrouping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=75, unique=True)),
('status', models.CharField(choices=[('ACTV', 'Active'), ('IDLE', 'Inactive')], default='ACTV', max_length=4)),
],
),
migrations.CreateModel(
name='PickerMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('ACTV', 'Active'), ('IDLE', 'Inactive'), ('SUSP', 'Suspended'), ('MNGT', 'Manager')], default='ACTV', max_length=4)),
('autopick', models.CharField(choices=[('NONE', 'None'), ('RAND', 'Random')], default='RAND', max_length=4)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='members', to='picker.PickerGrouping')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='picker_memberships', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='league',
name='current_season',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='league',
name='slug',
field=models.SlugField(default=picker.models.temp_slug),
),
migrations.AddField(
model_name='pickset',
name='is_winner',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='game',
name='category',
field=models.CharField(choices=[('REG', 'Regular Season'), ('POST', 'Post Season'), ('PRE', 'Pre Season'), ('FRND', 'Friendly')], default='REG', max_length=4),
),
migrations.AlterField(
model_name='game',
name='status',
field=models.CharField(choices=[('U', 'Unplayed'), ('T', 'Tie'), ('H', 'Home Win'), ('A', 'Away Win'), ('X', 'Cancelled')], default='U', max_length=1),
),
migrations.AlterField(
model_name='game',
name='tv',
field=models.CharField(blank=True, max_length=8, verbose_name='TV'),
),
migrations.AlterField(
model_name='gameset',
name='byes',
field=models.ManyToManyField(blank=True, related_name='bye_set', to='picker.Team', verbose_name='Bye Teams'),
),
migrations.AlterField(
model_name='league',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to='picker/logos'),
),
migrations.AlterField(
model_name='pickset',
name='strategy',
field=models.CharField(choices=[('USER', 'User'), ('RAND', 'Random'), ('HOME', 'Home Team'), ('BEST', 'Best Record')], default='USER', max_length=4),
),
migrations.AlterField(
model_name='playoffpicks',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='preference',
name='autopick',
field=models.CharField(choices=[('NONE', 'None'), ('RAND', 'Random')], default='RAND', max_length=4),
),
migrations.AlterField(
model_name='team',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to='picker/logos'),
),
migrations.AlterUniqueTogether(
name='preference',
unique_together=set(),
),
migrations.AddField(
model_name='pickergrouping',
name='leagues',
field=models.ManyToManyField(blank=True, to='picker.League'),
),
migrations.AddField(
model_name='pickerfavorite',
name='league',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='picker.League'),
),
migrations.AddField(
model_name='pickerfavorite',
name='team',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='picker.Team'),
),
migrations.AddField(
model_name='pickerfavorite',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.RemoveField(
model_name='preference',
name='favorite_team',
),
migrations.RemoveField(
model_name='preference',
name='league',
),
migrations.RemoveField(
model_name='preference',
name='status',
),
]
| mit |
cloudControl/libcloud | libcloud/common/aws.py | 12 | 14885 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from datetime import datetime
import hashlib
import hmac
import time
from hashlib import sha256
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.common.base import ConnectionUserAndKey, XmlResponse, BaseDriver
from libcloud.common.types import InvalidCredsError, MalformedResponseError
from libcloud.utils.py3 import b, httplib, urlquote
from libcloud.utils.xml import findtext, findall
__all__ = [
'AWSBaseResponse',
'AWSGenericResponse',
'AWSTokenConnection',
'SignedAWSConnection',
'AWSRequestSignerAlgorithmV2',
'AWSRequestSignerAlgorithmV4',
'AWSDriver'
]
DEFAULT_SIGNATURE_VERSION = '2'
class AWSBaseResponse(XmlResponse):
namespace = None
def _parse_error_details(self, element):
"""
Parse code and message from the provided error element.
:return: ``tuple`` with two elements: (code, message)
:rtype: ``tuple``
"""
code = findtext(element=element, xpath='Code',
namespace=self.namespace)
message = findtext(element=element, xpath='Message',
namespace=self.namespace)
return code, message
class AWSGenericResponse(AWSBaseResponse):
# There are multiple error messages in AWS, but they all have an Error node
# with Code and Message child nodes. Xpath to select them
# None if the root node *is* the Error node
xpath = None
# This dict maps <Error><Code>CodeName</Code></Error> to a specific
# exception class that is raised immediately.
# If a custom exception class is not defined, errors are accumulated and
# returned from the parse_error method.
expections = {}
def success(self):
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_error(self):
context = self.connection.context
status = int(self.status)
# FIXME: Probably ditch this as the forbidden message will have
# corresponding XML.
if status == httplib.FORBIDDEN:
if not self.body:
raise InvalidCredsError(str(self.status) + ': ' + self.error)
else:
raise InvalidCredsError(self.body)
try:
body = ET.XML(self.body)
except Exception:
raise MalformedResponseError('Failed to parse XML',
body=self.body,
driver=self.connection.driver)
if self.xpath:
errs = findall(element=body, xpath=self.xpath,
namespace=self.namespace)
else:
errs = [body]
msgs = []
for err in errs:
code, message = self._parse_error_details(element=err)
exceptionCls = self.exceptions.get(code, None)
if exceptionCls is None:
msgs.append('%s: %s' % (code, message))
continue
# Custom exception class is defined, immediately throw an exception
params = {}
if hasattr(exceptionCls, 'kwargs'):
for key in exceptionCls.kwargs:
if key in context:
params[key] = context[key]
raise exceptionCls(value=message, driver=self.connection.driver,
**params)
return "\n".join(msgs)
class AWSTokenConnection(ConnectionUserAndKey):
def __init__(self, user_id, key, secure=True,
host=None, port=None, url=None, timeout=None, token=None,
retry_delay=None, backoff=None):
self.token = token
super(AWSTokenConnection, self).__init__(user_id, key, secure=secure,
host=host, port=port, url=url,
timeout=timeout,
retry_delay=retry_delay,
backoff=backoff)
def add_default_params(self, params):
# Even though we are adding it to the headers, we need it here too
# so that the token is added to the signature.
if self.token:
params['x-amz-security-token'] = self.token
return super(AWSTokenConnection, self).add_default_params(params)
def add_default_headers(self, headers):
if self.token:
headers['x-amz-security-token'] = self.token
return super(AWSTokenConnection, self).add_default_headers(headers)
class AWSRequestSigner(object):
"""
Class which handles signing the outgoing AWS requests.
"""
def __init__(self, access_key, access_secret, version, connection):
"""
:param access_key: Access key.
:type access_key: ``str``
:param access_secret: Access secret.
:type access_secret: ``str``
:param version: API version.
:type version: ``str``
:param connection: Connection instance.
:type connection: :class:`Connection`
"""
self.access_key = access_key
self.access_secret = access_secret
self.version = version
# TODO: Remove cycling dependency between connection and signer
self.connection = connection
def get_request_params(self, params, method='GET', path='/'):
return params
def get_request_headers(self, params, headers, method='GET', path='/'):
return params, headers
class AWSRequestSignerAlgorithmV2(AWSRequestSigner):
def get_request_params(self, params, method='GET', path='/'):
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['AWSAccessKeyId'] = self.access_key
params['Version'] = self.version
params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
time.gmtime())
params['Signature'] = self._get_aws_auth_param(
params=params,
secret_key=self.access_secret,
path=path)
return params
def _get_aws_auth_param(self, params, secret_key, path='/'):
"""
Creates the signature required for AWS, per
http://bit.ly/aR7GaQ [docs.amazonwebservices.com]:
StringToSign = HTTPVerb + "\n" +
ValueOfHostHeaderInLowercase + "\n" +
HTTPRequestURI + "\n" +
CanonicalizedQueryString <from the preceding step>
"""
connection = self.connection
keys = list(params.keys())
keys.sort()
pairs = []
for key in keys:
value = str(params[key])
pairs.append(urlquote(key, safe='') + '=' +
urlquote(value, safe='-_~'))
qs = '&'.join(pairs)
hostname = connection.host
if (connection.secure and connection.port != 443) or \
(not connection.secure and connection.port != 80):
hostname += ':' + str(connection.port)
string_to_sign = '\n'.join(('GET', hostname, path, qs))
b64_hmac = base64.b64encode(
hmac.new(b(secret_key), b(string_to_sign),
digestmod=sha256).digest()
)
return b64_hmac.decode('utf-8')
class AWSRequestSignerAlgorithmV4(AWSRequestSigner):
def get_request_params(self, params, method='GET', path='/'):
params['Version'] = self.version
return params
def get_request_headers(self, params, headers, method='GET', path='/'):
now = datetime.utcnow()
headers['X-AMZ-Date'] = now.strftime('%Y%m%dT%H%M%SZ')
headers['Authorization'] = \
self._get_authorization_v4_header(params=params, headers=headers,
dt=now, method=method, path=path)
return params, headers
def _get_authorization_v4_header(self, params, headers, dt, method='GET',
path='/'):
assert method == 'GET', 'AWS Signature V4 not implemented for ' \
'other methods than GET'
credentials_scope = self._get_credential_scope(dt=dt)
signed_headers = self._get_signed_headers(headers=headers)
signature = self._get_signature(params=params, headers=headers,
dt=dt, method=method, path=path)
return 'AWS4-HMAC-SHA256 Credential=%(u)s/%(c)s, ' \
'SignedHeaders=%(sh)s, Signature=%(s)s' % {
'u': self.access_key,
'c': credentials_scope,
'sh': signed_headers,
's': signature
}
def _get_signature(self, params, headers, dt, method, path):
key = self._get_key_to_sign_with(dt)
string_to_sign = self._get_string_to_sign(params=params,
headers=headers, dt=dt,
method=method, path=path)
return _sign(key=key, msg=string_to_sign, hex=True)
def _get_key_to_sign_with(self, dt):
return _sign(
_sign(
_sign(
_sign(('AWS4' + self.access_secret),
dt.strftime('%Y%m%d')),
self.connection.driver.region_name),
self.connection.service_name),
'aws4_request')
def _get_string_to_sign(self, params, headers, dt, method, path):
canonical_request = self._get_canonical_request(params=params,
headers=headers,
method=method,
path=path)
return '\n'.join(['AWS4-HMAC-SHA256',
dt.strftime('%Y%m%dT%H%M%SZ'),
self._get_credential_scope(dt),
_hash(canonical_request)])
def _get_credential_scope(self, dt):
return '/'.join([dt.strftime('%Y%m%d'),
self.connection.driver.region_name,
self.connection.service_name,
'aws4_request'])
def _get_signed_headers(self, headers):
return ';'.join([k.lower() for k in sorted(headers.keys())])
def _get_canonical_headers(self, headers):
return '\n'.join([':'.join([k.lower(), v.strip()])
for k, v in sorted(headers.items())]) + '\n'
def _get_payload_hash(self):
return _hash('')
def _get_request_params(self, params):
# For self.method == GET
return '&'.join(["%s=%s" %
(urlquote(k, safe=''), urlquote(str(v), safe='~'))
for k, v in sorted(params.items())])
def _get_canonical_request(self, params, headers, method, path):
return '\n'.join([
method,
path,
self._get_request_params(params),
self._get_canonical_headers(headers),
self._get_signed_headers(headers),
self._get_payload_hash()
])
class SignedAWSConnection(AWSTokenConnection):
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None, token=None, retry_delay=None,
backoff=None, signature_version=DEFAULT_SIGNATURE_VERSION):
super(SignedAWSConnection, self).__init__(user_id=user_id, key=key,
secure=secure, host=host,
port=port, url=url,
timeout=timeout, token=token,
retry_delay=retry_delay,
backoff=backoff)
self.signature_version = str(signature_version)
if self.signature_version == '2':
signer_cls = AWSRequestSignerAlgorithmV2
elif signature_version == '4':
signer_cls = AWSRequestSignerAlgorithmV4
else:
raise ValueError('Unsupported signature_version: %s' %
(signature_version))
self.signer = signer_cls(access_key=self.user_id,
access_secret=self.key,
version=self.version,
connection=self)
def add_default_params(self, params):
params = self.signer.get_request_params(params=params,
method=self.method,
path=self.action)
return params
def pre_connect_hook(self, params, headers):
params, headers = self.signer.get_request_headers(params=params,
headers=headers,
method=self.method,
path=self.action)
return params, headers
def _sign(key, msg, hex=False):
if hex:
return hmac.new(b(key), b(msg), hashlib.sha256).hexdigest()
else:
return hmac.new(b(key), b(msg), hashlib.sha256).digest()
def _hash(msg):
return hashlib.sha256(b(msg)).hexdigest()
class AWSDriver(BaseDriver):
def __init__(self, key, secret=None, secure=True, host=None, port=None,
api_version=None, region=None, token=None, **kwargs):
self.token = token
super(AWSDriver, self).__init__(key, secret=secret, secure=secure,
host=host, port=port,
api_version=api_version, region=region,
token=token, **kwargs)
def _ex_connection_class_kwargs(self):
kwargs = super(AWSDriver, self)._ex_connection_class_kwargs()
kwargs['token'] = self.token
return kwargs
| apache-2.0 |
rahimnathwani/ud858 | Lesson_5/00_Conference_Central/main.py | 39 | 1248 | #!/usr/bin/env python
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
$Id$
created by wesc on 2014 may 24
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
# TODO 1
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
], debug=True)
| gpl-3.0 |
rockyzhang/zhangyanhit-python-for-android-mips | python3-alpha/python3-src/Lib/test/profilee.py | 398 | 3041 | """
Input for test_profile.py and test_cprofile.py.
IMPORTANT: This stuff is touchy. If you modify anything above the
test class you'll have to regenerate the stats by running the two
test files.
*ALL* NUMBERS in the expected output are relevant. If you change
the formatting of pstats, please don't just regenerate the expected
output without checking very carefully that not a single number has
changed.
"""
import sys
# In order to have reproducible time, we simulate a timer in the global
# variable 'TICKS', which represents simulated time in milliseconds.
# (We can't use a helper function increment the timer since it would be
# included in the profile and would appear to consume all the time.)
TICKS = 42000
def timer():
return TICKS
def testfunc():
# 1 call
# 1000 ticks total: 270 ticks local, 730 ticks in subfunctions
global TICKS
TICKS += 99
helper() # 300
helper() # 300
TICKS += 171
factorial(14) # 130
def factorial(n):
# 23 calls total
# 170 ticks total, 150 ticks local
# 3 primitive calls, 130, 20 and 20 ticks total
# including 116, 17, 17 ticks local
global TICKS
if n > 0:
TICKS += n
return mul(n, factorial(n-1))
else:
TICKS += 11
return 1
def mul(a, b):
# 20 calls
# 1 tick, local
global TICKS
TICKS += 1
return a * b
def helper():
# 2 calls
# 300 ticks total: 20 ticks local, 260 ticks in subfunctions
global TICKS
TICKS += 1
helper1() # 30
TICKS += 2
helper1() # 30
TICKS += 6
helper2() # 50
TICKS += 3
helper2() # 50
TICKS += 2
helper2() # 50
TICKS += 5
helper2_indirect() # 70
TICKS += 1
def helper1():
# 4 calls
# 30 ticks total: 29 ticks local, 1 tick in subfunctions
global TICKS
TICKS += 10
hasattr(C(), "foo") # 1
TICKS += 19
lst = []
lst.append(42) # 0
sys.exc_info() # 0
def helper2_indirect():
helper2() # 50
factorial(3) # 20
def helper2():
# 8 calls
# 50 ticks local: 39 ticks local, 11 ticks in subfunctions
global TICKS
TICKS += 11
hasattr(C(), "bar") # 1
TICKS += 13
subhelper() # 10
TICKS += 15
def subhelper():
# 8 calls
# 10 ticks total: 8 ticks local, 2 ticks in subfunctions
global TICKS
TICKS += 2
for i in range(2): # 0
try:
C().foo # 1 x 2
except AttributeError:
TICKS += 3 # 3 x 2
class C:
def __getattr__(self, name):
# 28 calls
# 1 tick, local
global TICKS
TICKS += 1
raise AttributeError
| apache-2.0 |
yask123/django | tests/servers/tests.py | 257 | 5907 | # -*- encoding: utf-8 -*-
"""
Tests for django.core.servers.
"""
from __future__ import unicode_literals
import contextlib
import os
import socket
from django.core.exceptions import ImproperlyConfigured
from django.test import LiveServerTestCase, override_settings
from django.utils._os import upath
from django.utils.http import urlencode
from django.utils.six import text_type
from django.utils.six.moves.urllib.error import HTTPError
from django.utils.six.moves.urllib.request import urlopen
from .models import Person
TEST_ROOT = os.path.dirname(upath(__file__))
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'media'),
'STATIC_URL': '/static/',
'STATIC_ROOT': os.path.join(TEST_ROOT, 'static'),
}
@override_settings(ROOT_URLCONF='servers.urls', **TEST_SETTINGS)
class LiveServerBase(LiveServerTestCase):
available_apps = [
'servers',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
fixtures = ['testdata.json']
def urlopen(self, url):
return urlopen(self.live_server_url + url)
class LiveServerAddress(LiveServerBase):
"""
Ensure that the address set in the environment variable is valid.
Refs #2879.
"""
@classmethod
def setUpClass(cls):
# Backup original environment variable
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
# Just the host is not accepted
cls.raises_exception('localhost', ImproperlyConfigured)
# The host must be valid
cls.raises_exception('blahblahblah:8081', socket.error)
# The list of ports must be in a valid format
cls.raises_exception('localhost:8081,', ImproperlyConfigured)
cls.raises_exception('localhost:8081,blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-', ImproperlyConfigured)
cls.raises_exception('localhost:8081-blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-8082-8083', ImproperlyConfigured)
# Restore original environment variable
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
# put it in a list to prevent descriptor lookups in test
cls.live_server_url_test = [cls.live_server_url]
@classmethod
def tearDownClass(cls):
# skip it, as setUpClass doesn't call its parent either
pass
@classmethod
def raises_exception(cls, address, exception):
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = address
try:
super(LiveServerAddress, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except exception:
pass
finally:
super(LiveServerAddress, cls).tearDownClass()
def test_live_server_url_is_class_property(self):
self.assertIsInstance(self.live_server_url_test[0], text_type)
self.assertEqual(self.live_server_url_test[0], self.live_server_url)
class LiveServerViews(LiveServerBase):
def test_404(self):
"""
Ensure that the LiveServerTestCase serves 404s.
Refs #2879.
"""
try:
self.urlopen('/')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response')
def test_view(self):
"""
Ensure that the LiveServerTestCase serves views.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/example_view/')) as f:
self.assertEqual(f.read(), b'example view')
def test_static_files(self):
"""
Ensure that the LiveServerTestCase serves static files.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/static/example_static_file.txt')) as f:
self.assertEqual(f.read().rstrip(b'\r\n'), b'example static file')
def test_no_collectstatic_emulation(self):
"""
Test that LiveServerTestCase reports a 404 status code when HTTP client
tries to access a static file that isn't explicitly put under
STATIC_ROOT.
"""
try:
self.urlopen('/static/another_app/another_app_static_file.txt')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response (got %d)' % err.code)
def test_media_files(self):
"""
Ensure that the LiveServerTestCase serves media files.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/media/example_media_file.txt')) as f:
self.assertEqual(f.read().rstrip(b'\r\n'), b'example media file')
def test_environ(self):
with contextlib.closing(self.urlopen('/environ_view/?%s' % urlencode({'q': 'тест'}))) as f:
self.assertIn(b"QUERY_STRING: 'q=%D1%82%D0%B5%D1%81%D1%82'", f.read())
class LiveServerDatabase(LiveServerBase):
def test_fixtures_loaded(self):
"""
Ensure that fixtures are properly loaded and visible to the
live server thread.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/model_view/')) as f:
self.assertEqual(f.read().splitlines(), [b'jane', b'robert'])
def test_database_writes(self):
"""
Ensure that data written to the database by a view can be read.
Refs #2879.
"""
self.urlopen('/create_model_instance/')
self.assertQuerysetEqual(
Person.objects.all().order_by('pk'),
['jane', 'robert', 'emily'],
lambda b: b.name
)
| bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/hooks/memcache_memcache.py | 5 | 2121 | import newrelic.api.memcache_trace
def instrument(module):
if hasattr(module.Client, 'add'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.add', 'add')
if hasattr(module.Client, 'append'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.append', 'replace')
if hasattr(module.Client, 'cas'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.cas', 'replace')
if hasattr(module.Client, 'decr'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.decr', 'decr')
if hasattr(module.Client, 'delete'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.delete', 'delete')
if hasattr(module.Client, 'delete_multi'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.delete_multi', 'delete')
if hasattr(module.Client, 'get'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.get', 'get')
if hasattr(module.Client, 'gets'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.gets', 'get')
if hasattr(module.Client, 'get_multi'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.get_multi', 'get')
if hasattr(module.Client, 'incr'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.incr', 'incr')
if hasattr(module.Client, 'prepend'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.prepend', 'replace')
if hasattr(module.Client, 'replace'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.replace', 'replace')
if hasattr(module.Client, 'set'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.set', 'set')
if hasattr(module.Client, 'set_multi'):
newrelic.api.memcache_trace.wrap_memcache_trace(
module, 'Client.set_multi', 'set')
| agpl-3.0 |
harshilasu/GraphicMelon | y/google-cloud-sdk/.install/.backup/lib/apiclient/oauth.py | 12 | 14238 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth.
"""
import copy
import httplib2
import logging
import oauth2 as oauth
import urllib
import urlparse
from oauth2client.client import Credentials
from oauth2client.client import Flow
from oauth2client.client import Storage
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class RequestError(Error):
"""Error occurred during request."""
pass
class MissingParameter(Error):
pass
class CredentialsInvalidError(Error):
pass
def _abstract():
raise NotImplementedError('You need to override this function')
def _oauth_uri(name, discovery, params):
"""Look up the OAuth URI from the discovery
document and add query parameters based on
params.
name - The name of the OAuth URI to lookup, one
of 'request', 'access', or 'authorize'.
discovery - Portion of discovery document the describes
the OAuth endpoints.
params - Dictionary that is used to form the query parameters
for the specified URI.
"""
if name not in ['request', 'access', 'authorize']:
raise KeyError(name)
keys = discovery[name]['parameters'].keys()
query = {}
for key in keys:
if key in params:
query[key] = params[key]
return discovery[name]['url'] + '?' + urllib.urlencode(query)
class OAuthCredentials(Credentials):
"""Credentials object for OAuth 1.0a
"""
def __init__(self, consumer, token, user_agent):
"""
consumer - An instance of oauth.Consumer.
token - An instance of oauth.Token constructed with
the access token and secret.
user_agent - The HTTP User-Agent to provide for this application.
"""
self.consumer = consumer
self.token = token
self.user_agent = user_agent
self.store = None
# True if the credentials have been revoked
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, "_invalid", False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""Authorize an httplib2.Http instance with these Credentials
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
req = oauth.Request.from_consumer_and_token(
self.consumer, self.token, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, self.token)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
# Update the stored credential if it becomes invalid.
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
self._invalid = True
if self.store is not None:
self.store(self)
raise CredentialsInvalidError("Credentials are no longer valid.")
return resp, content
http.request = new_request
return http
class TwoLeggedOAuthCredentials(Credentials):
"""Two Legged Credentials object for OAuth 1.0a.
The Two Legged object is created directly, not from a flow. Once you
authorize and httplib2.Http instance you can change the requestor and that
change will propogate to the authorized httplib2.Http instance. For example:
http = httplib2.Http()
http = credentials.authorize(http)
credentials.requestor = 'foo@example.info'
http.request(...)
credentials.requestor = 'bar@example.info'
http.request(...)
"""
def __init__(self, consumer_key, consumer_secret, user_agent):
"""
Args:
consumer_key: string, An OAuth 1.0 consumer key
consumer_secret: string, An OAuth 1.0 consumer secret
user_agent: string, The HTTP User-Agent to provide for this application.
"""
self.consumer = oauth.Consumer(consumer_key, consumer_secret)
self.user_agent = user_agent
self.store = None
# email address of the user to act on the behalf of.
self._requestor = None
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked.
Always returns False for Two Legged Credentials.
"""
return False
def getrequestor(self):
return self._requestor
def setrequestor(self, email):
self._requestor = email
requestor = property(getrequestor, setrequestor, None,
'The email address of the user to act on behalf of')
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""Authorize an httplib2.Http instance with these Credentials
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
# add in xoauth_requestor_id=self._requestor to the uri
if self._requestor is None:
raise MissingParameter(
'Requestor must be set before using TwoLeggedOAuthCredentials')
parsed = list(urlparse.urlparse(uri))
q = parse_qsl(parsed[4])
q.append(('xoauth_requestor_id', self._requestor))
parsed[4] = urllib.urlencode(q)
uri = urlparse.urlunparse(parsed)
req = oauth.Request.from_consumer_and_token(
self.consumer, None, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, None)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
# Do not store the invalid state of the Credentials because
# being 2LO they could be reinstated in the future.
raise CredentialsInvalidError("Credentials are invalid.")
return resp, content
http.request = new_request
return http
class FlowThreeLegged(Flow):
"""Does the Three Legged Dance for OAuth 1.0a.
"""
def __init__(self, discovery, consumer_key, consumer_secret, user_agent,
**kwargs):
"""
discovery - Section of the API discovery document that describes
the OAuth endpoints.
consumer_key - OAuth consumer key
consumer_secret - OAuth consumer secret
user_agent - The HTTP User-Agent that identifies the application.
**kwargs - The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.discovery = discovery
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_agent = user_agent
self.params = kwargs
self.request_token = {}
required = {}
for uriinfo in discovery.itervalues():
for name, value in uriinfo['parameters'].iteritems():
if value['required'] and not name.startswith('oauth_'):
required[name] = 1
for key in required.iterkeys():
if key not in self.params:
raise MissingParameter('Required parameter %s not supplied' % key)
def step1_get_authorize_url(self, oauth_callback='oob'):
"""Returns a URI to redirect to the provider.
oauth_callback - Either the string 'oob' for a non-web-based application,
or a URI that handles the callback from the authorization
server.
If oauth_callback is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
body = urllib.urlencode({'oauth_callback': oauth_callback})
uri = _oauth_uri('request', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers,
body=body)
if resp['status'] != '200':
logging.error('Failed to retrieve temporary authorization: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
self.request_token = dict(parse_qsl(content))
auth_params = copy.copy(self.params)
auth_params['oauth_token'] = self.request_token['oauth_token']
return _oauth_uri('authorize', self.discovery, auth_params)
def step2_exchange(self, verifier):
"""Exhanges an authorized request token
for OAuthCredentials.
Args:
verifier: string, dict - either the verifier token, or a dictionary
of the query parameters to the callback, which contains
the oauth_verifier.
Returns:
The Credentials object.
"""
if not (isinstance(verifier, str) or isinstance(verifier, unicode)):
verifier = verifier['oauth_verifier']
token = oauth.Token(
self.request_token['oauth_token'],
self.request_token['oauth_token_secret'])
token.set_verifier(verifier)
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer, token)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
uri = _oauth_uri('access', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers)
if resp['status'] != '200':
logging.error('Failed to retrieve access token: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
oauth_params = dict(parse_qsl(content))
token = oauth.Token(
oauth_params['oauth_token'],
oauth_params['oauth_token_secret'])
return OAuthCredentials(consumer, token, self.user_agent)
| gpl-3.0 |
cloud9UG/odoo | addons/l10n_in_hr_payroll/report/report_hr_yearly_salary_detail.py | 374 | 6855 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp.report import report_sxw
from openerp.osv import osv
class employees_yearly_salary_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(employees_yearly_salary_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_employee_detail': self.get_employee_detail,
'cal_monthly_amt': self.cal_monthly_amt,
'get_periods': self.get_periods,
'get_total': self.get_total,
'get_allow': self.get_allow,
'get_deduct': self.get_deduct,
})
self.context = context
def get_periods(self, form):
self.mnths = []
# Get start year-month-date and end year-month-date
first_year = int(form['date_from'][0:4])
last_year = int(form['date_to'][0:4])
first_month = int(form['date_from'][5:7])
last_month = int(form['date_to'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('')
self.mnths.append('')
return [mnth_name]
def get_employee(self, form):
return self.pool.get('hr.employee').browse(self.cr,self.uid, form.get('employee_ids', []), context=self.context)
def get_employee_detail(self, form, obj):
self.allow_list = []
self.deduct_list = []
self.total = 0.00
gross = False
net = False
payslip_lines = self.cal_monthly_amt(form, obj.id)
for line in payslip_lines:
for line[0] in line:
if line[0][0] == "Gross":
gross = line[0]
elif line[0][0] == "Net":
net = line[0]
elif line[0][13] > 0.0 and line[0][0] != "Net":
self.total += line[0][len(line[0])-1]
self.allow_list.append(line[0])
elif line[0][13] < 0.0:
self.total += line[0][len(line[0])-1]
self.deduct_list.append(line[0])
if gross:
self.allow_list.append(gross)
if net:
self.deduct_list.append(net)
return None
def cal_monthly_amt(self, form, emp_id):
category_obj = self.pool.get('hr.salary.rule.category')
result = []
res = []
salaries = {}
self.cr.execute('''SELECT rc.code, pl.name, sum(pl.total), \
to_char(date_to,'mm-yyyy') as to_date FROM hr_payslip_line as pl \
LEFT JOIN hr_salary_rule_category AS rc on (pl.category_id = rc.id) \
LEFT JOIN hr_payslip as p on pl.slip_id = p.id \
LEFT JOIN hr_employee as emp on emp.id = p.employee_id \
WHERE p.employee_id = %s \
GROUP BY rc.parent_id, pl.sequence, pl.id, pl.category_id,pl.name,p.date_to,rc.code \
ORDER BY pl.sequence, rc.parent_id''',(emp_id,))
salary = self.cr.fetchall()
for category in salary:
if category[0] not in salaries:
salaries.setdefault(category[0], {})
salaries[category[0]].update({category[1]: {category[3]: category[2]}})
elif category[1] not in salaries[category[0]]:
salaries[category[0]].setdefault(category[1], {})
salaries[category[0]][category[1]].update({category[3]: category[2]})
else:
salaries[category[0]][category[1]].update({category[3]: category[2]})
category_ids = category_obj.search(self.cr,self.uid, [], context=self.context)
categories = category_obj.read(self.cr, self.uid, category_ids, ['code'], context=self.context)
for code in map(lambda x: x['code'], categories):
if code in salaries:
res = self.salary_list(salaries[code])
result.append(res)
return result
def salary_list(self, salaries):
cat_salary_all = []
for category_name,amount in salaries.items():
cat_salary = []
total = 0.0
cat_salary.append(category_name)
for mnth in self.mnths:
if mnth <> 'None':
if len(mnth) != 7:
mnth = '0' + str(mnth)
if mnth in amount and amount[mnth]:
cat_salary.append(amount[mnth])
total += amount[mnth]
else:
cat_salary.append(0.00)
else:
cat_salary.append('')
cat_salary.append(total)
cat_salary_all.append(cat_salary)
return cat_salary_all
def get_allow(self):
return self.allow_list
def get_deduct(self):
return self.deduct_list
def get_total(self):
return self.total
class wrapped_report_payslip(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_hryearlysalary'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_hryearlysalary'
_wrapped_report_class = employees_yearly_salary_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
apradiznewcyberproject/support-tools | googlecode-issues-exporter/generate_user_map.py | 151 | 3446 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for generating a user mapping from Google Code user to BitBucket user.
"""
import argparse
import json
import sys
import issues
class OptionalMap(dict):
"""Dictionary that returns the key for missing items. """
def __missing__(self, key):
"""Implements the dict interface. """
return key
def addIfNotPresent(users, user):
"""Adds a user if it is not already set."""
if user not in users:
users[user] = user
def _CreateUsersDict(issue_data, project_name):
"""Extract users from list of issues into a dict.
Args:
issue_data: Issue data
project_name: The name of the project being exported.
Returns:
Dict of users associated with a list of issues
"""
users = {}
for issue in issue_data:
googlecode_issue = issues.GoogleCodeIssue(
issue, project_name, OptionalMap())
reporting_user = googlecode_issue.GetAuthor()
addIfNotPresent(users, reporting_user)
assignee_user = googlecode_issue.GetOwner()
addIfNotPresent(users, assignee_user)
googlecode_comments = googlecode_issue.GetComments()
for comment in googlecode_comments:
googlecode_comment = issues.GoogleCodeComment(googlecode_issue, comment)
commenting_user = googlecode_comment.GetAuthor()
addIfNotPresent(users, commenting_user)
return {
"users": users
}
def Generate(issue_file_path, project_name):
"""Generates a user map for the specified issues. """
issue_data = None
user_file = open(issue_file_path)
user_data = json.load(user_file)
user_projects = user_data["projects"]
for project in user_projects:
if project_name in project["name"]:
issue_data = project["issues"]["items"]
break
if issue_data is None:
raise issues.ProjectNotFoundError(
"Project %s not found" % project_name)
users = _CreateUsersDict(issue_data, project_name)
with open("users.json", "w") as users_file:
user_json = json.dumps(users, sort_keys=True, indent=4,
separators=(",", ": "), ensure_ascii=False)
users_file.write(unicode(user_json))
print "\nCreated file users.json.\n"
def main(args):
"""The main function.
Args:
args: The command line arguments.
Raises:
issues.ProjectNotFoundError: The user passed in an invalid project name.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--issue_file_path", required=True,
help="The path to the file containing the issues from"
"Google Code.")
parser.add_argument("--project_name", required=True,
help="The name of the Google Code project you wish to"
"export")
parsed_args, _ = parser.parse_known_args(args)
Generate(parsed_args.issue_file_path, parsed_args.project_name)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 |
jasonbot/django | tests/null_fk/models.py | 282 | 1422 | """
Regression tests for proper working of ForeignKey(null=True).
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class SystemDetails(models.Model):
details = models.TextField()
class SystemInfo(models.Model):
system_details = models.ForeignKey(SystemDetails, models.CASCADE)
system_name = models.CharField(max_length=32)
class Forum(models.Model):
system_info = models.ForeignKey(SystemInfo, models.CASCADE)
forum_name = models.CharField(max_length=32)
@python_2_unicode_compatible
class Post(models.Model):
forum = models.ForeignKey(Forum, models.SET_NULL, null=True)
title = models.CharField(max_length=32)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Comment(models.Model):
post = models.ForeignKey(Post, models.SET_NULL, null=True)
comment_text = models.CharField(max_length=250)
class Meta:
ordering = ('comment_text',)
def __str__(self):
return self.comment_text
# Ticket 15823
class Item(models.Model):
title = models.CharField(max_length=100)
class PropertyValue(models.Model):
label = models.CharField(max_length=100)
class Property(models.Model):
item = models.ForeignKey(Item, models.CASCADE, related_name='props')
key = models.CharField(max_length=100)
value = models.ForeignKey(PropertyValue, models.SET_NULL, null=True)
| bsd-3-clause |
rahul67/hue | desktop/core/ext-py/pysaml2-2.4.0/src/saml2/extension/mdattr.py | 34 | 2430 | #!/usr/bin/env python
#
# Generated Mon May 2 14:23:34 2011 by parse_xsd.py version 0.4.
#
import saml2
from saml2 import SamlBase
from saml2 import saml
NAMESPACE = 'urn:oasis:names:tc:SAML:metadata:attribute'
class EntityAttributesType_(SamlBase):
"""The urn:oasis:names:tc:SAML:metadata:attribute:EntityAttributesType element """
c_tag = 'EntityAttributesType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'] = ('attribute', [saml.Attribute])
c_cardinality['attribute'] = {"min":0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Assertion'] = ('assertion', [saml.Assertion])
c_cardinality['assertion'] = {"min":0}
c_child_order.extend(['attribute', 'assertion'])
def __init__(self,
attribute=None,
assertion=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.attribute=attribute or []
self.assertion=assertion or []
def entity_attributes_type__from_string(xml_string):
return saml2.create_class_from_xml_string(EntityAttributesType_, xml_string)
class EntityAttributes(EntityAttributesType_):
"""The urn:oasis:names:tc:SAML:metadata:attribute:EntityAttributes element """
c_tag = 'EntityAttributes'
c_namespace = NAMESPACE
c_children = EntityAttributesType_.c_children.copy()
c_attributes = EntityAttributesType_.c_attributes.copy()
c_child_order = EntityAttributesType_.c_child_order[:]
c_cardinality = EntityAttributesType_.c_cardinality.copy()
def entity_attributes_from_string(xml_string):
return saml2.create_class_from_xml_string(EntityAttributes, xml_string)
ELEMENT_FROM_STRING = {
EntityAttributes.c_tag: entity_attributes_from_string,
EntityAttributesType_.c_tag: entity_attributes_type__from_string,
}
ELEMENT_BY_TAG = {
'EntityAttributes': EntityAttributes,
'EntityAttributesType': EntityAttributesType_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
| apache-2.0 |
vlegoff/tsunami | src/primaires/joueur/commandes/montrer/niveaux.py | 1 | 2438 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module contenant la commande 'montrer niveaux'."""
from primaires.interpreteur.masque.parametre import Parametre
from primaires.perso.montrer.niveaux import MontrerNiveaux
class PrmNiveaux(Parametre):
"""Commande 'montrer niveaux'."""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "niveaux", "levels")
self.tronquer = True
self.schema = "<nom_joueur>"
self.aide_courte = "affiche le niveaux d'un joueur"
self.aide_longue = \
"Cette commande montre les niveaux d'un joueur. Vous devez " \
"simplement préciser le nom du joueur en paramètre."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
joueur = dic_masques["nom_joueur"].joueur
personnage << MontrerNiveaux.montrer(joueur)
| bsd-3-clause |
code-sauce/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
cryvate/project-euler | project_euler/library/number_theory/pells_equation.py | 1 | 1029 | from .continued_fractions import convergents_sqrt
from typing import Generator, Tuple
def solve_pells_equation(n: int) -> Generator[Tuple[int, int], None, None]:
convergents = convergents_sqrt(n)
for convergent in convergents:
h = convergent.numerator
k = convergent.denominator
if h ** 2 - n * (k ** 2) == 1:
break
x, y = h, k
while True:
yield x, y
x, y = h * x + n * k * y, h * y + k * x
def solve_negative_pells_equation(n: int) -> \
Generator[Tuple[int, int], None, None]:
convergents = convergents_sqrt(n)
for convergent in convergents:
h = convergent.numerator
k = convergent.denominator
if h ** 2 - n * (k ** 2) == -1:
break
if h ** 2 - n * (k ** 2) == 1:
raise ValueError(f"Equation x^2 - {n}y^2 = -1 has no solution")
x, y = h, k
while True:
yield x, y
x, y = h * x + n * k * y, h * y + k * x
x, y = h * x + n * k * y, h * y + k * x
| mit |
Yunchao/hackingchicago | Server/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/common.py | 1292 | 20063 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| mit |
kernel-sanders/arsenic-mobile | Dependencies/Twisted-13.0.0/twisted/python/win32.py | 36 | 5436 | # -*- test-case-name: twisted.python.test.test_win32 -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Win32 utilities.
See also twisted.python.shortcut.
@var O_BINARY: the 'binary' mode flag on Windows, or 0 on other platforms, so it
may safely be OR'ed into a mask for os.open.
"""
from __future__ import division, absolute_import
import re
import os
try:
import win32api
import win32con
except ImportError:
pass
from twisted.python.runtime import platform
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/debug/base/system_error_codes.asp
ERROR_FILE_NOT_FOUND = 2
ERROR_PATH_NOT_FOUND = 3
ERROR_INVALID_NAME = 123
ERROR_DIRECTORY = 267
O_BINARY = getattr(os, "O_BINARY", 0)
class FakeWindowsError(OSError):
"""
Stand-in for sometimes-builtin exception on platforms for which it
is missing.
"""
try:
WindowsError = WindowsError
except NameError:
WindowsError = FakeWindowsError
# XXX fix this to use python's builtin _winreg?
def getProgramsMenuPath():
"""
Get the path to the Programs menu.
Probably will break on non-US Windows.
@return: the filesystem location of the common Start Menu->Programs.
@rtype: L{str}
"""
if not platform.isWindows():
return "C:\\Windows\\Start Menu\\Programs"
keyname = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders'
hShellFolders = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE,
keyname, 0, win32con.KEY_READ)
return win32api.RegQueryValueEx(hShellFolders, 'Common Programs')[0]
def getProgramFilesPath():
"""Get the path to the Program Files folder."""
keyname = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
currentV = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE,
keyname, 0, win32con.KEY_READ)
return win32api.RegQueryValueEx(currentV, 'ProgramFilesDir')[0]
_cmdLineQuoteRe = re.compile(r'(\\*)"')
_cmdLineQuoteRe2 = re.compile(r'(\\+)\Z')
def cmdLineQuote(s):
"""
Internal method for quoting a single command-line argument.
@param s: an unquoted string that you want to quote so that something that
does cmd.exe-style unquoting will interpret it as a single argument,
even if it contains spaces.
@type s: C{str}
@return: a quoted string.
@rtype: C{str}
"""
quote = ((" " in s) or ("\t" in s) or ('"' in s) or s == '') and '"' or ''
return quote + _cmdLineQuoteRe2.sub(r"\1\1", _cmdLineQuoteRe.sub(r'\1\1\\"', s)) + quote
def quoteArguments(arguments):
"""
Quote an iterable of command-line arguments for passing to CreateProcess or
a similar API. This allows the list passed to C{reactor.spawnProcess} to
match the child process's C{sys.argv} properly.
@param arglist: an iterable of C{str}, each unquoted.
@return: a single string, with the given sequence quoted as necessary.
"""
return ' '.join([cmdLineQuote(a) for a in arguments])
class _ErrorFormatter(object):
"""
Formatter for Windows error messages.
@ivar winError: A callable which takes one integer error number argument
and returns an L{exceptions.WindowsError} instance for that error (like
L{ctypes.WinError}).
@ivar formatMessage: A callable which takes one integer error number
argument and returns a C{str} giving the message for that error (like
L{win32api.FormatMessage}).
@ivar errorTab: A mapping from integer error numbers to C{str} messages
which correspond to those erorrs (like L{socket.errorTab}).
"""
def __init__(self, WinError, FormatMessage, errorTab):
self.winError = WinError
self.formatMessage = FormatMessage
self.errorTab = errorTab
def fromEnvironment(cls):
"""
Get as many of the platform-specific error translation objects as
possible and return an instance of C{cls} created with them.
"""
try:
from ctypes import WinError
except ImportError:
WinError = None
try:
from win32api import FormatMessage
except ImportError:
FormatMessage = None
try:
from socket import errorTab
except ImportError:
errorTab = None
return cls(WinError, FormatMessage, errorTab)
fromEnvironment = classmethod(fromEnvironment)
def formatError(self, errorcode):
"""
Returns the string associated with a Windows error message, such as the
ones found in socket.error.
Attempts direct lookup against the win32 API via ctypes and then
pywin32 if available), then in the error table in the socket module,
then finally defaulting to C{os.strerror}.
@param errorcode: the Windows error code
@type errorcode: C{int}
@return: The error message string
@rtype: C{str}
"""
if self.winError is not None:
return self.winError(errorcode).strerror
if self.formatMessage is not None:
return self.formatMessage(errorcode)
if self.errorTab is not None:
result = self.errorTab.get(errorcode)
if result is not None:
return result
return os.strerror(errorcode)
formatError = _ErrorFormatter.fromEnvironment().formatError
| gpl-3.0 |
amyvmiwei/libgit2 | tests/generate.py | 71 | 7484 | #!/usr/bin/env python
#
# Copyright (c) Vicent Marti. All rights reserved.
#
# This file is part of clar, distributed under the ISC license.
# For full terms see the included COPYING file.
#
from __future__ import with_statement
from string import Template
import re, fnmatch, os, codecs, pickle
class Module(object):
class Template(object):
def __init__(self, module):
self.module = module
def _render_callback(self, cb):
if not cb:
return ' { NULL, NULL }'
return ' { "%s", &%s }' % (cb['short_name'], cb['symbol'])
class DeclarationTemplate(Template):
def render(self):
out = "\n".join("extern %s;" % cb['declaration'] for cb in self.module.callbacks) + "\n"
if self.module.initialize:
out += "extern %s;\n" % self.module.initialize['declaration']
if self.module.cleanup:
out += "extern %s;\n" % self.module.cleanup['declaration']
return out
class CallbacksTemplate(Template):
def render(self):
out = "static const struct clar_func _clar_cb_%s[] = {\n" % self.module.name
out += ",\n".join(self._render_callback(cb) for cb in self.module.callbacks)
out += "\n};\n"
return out
class InfoTemplate(Template):
def render(self):
return Template(
r"""
{
"${clean_name}",
${initialize},
${cleanup},
${cb_ptr}, ${cb_count}, ${enabled}
}"""
).substitute(
clean_name = self.module.clean_name(),
initialize = self._render_callback(self.module.initialize),
cleanup = self._render_callback(self.module.cleanup),
cb_ptr = "_clar_cb_%s" % self.module.name,
cb_count = len(self.module.callbacks),
enabled = int(self.module.enabled)
)
def __init__(self, name):
self.name = name
self.mtime = 0
self.enabled = True
self.modified = False
def clean_name(self):
return self.name.replace("_", "::")
def _skip_comments(self, text):
SKIP_COMMENTS_REGEX = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE)
def _replacer(match):
s = match.group(0)
return "" if s.startswith('/') else s
return re.sub(SKIP_COMMENTS_REGEX, _replacer, text)
def parse(self, contents):
TEST_FUNC_REGEX = r"^(void\s+(test_%s__(\w+))\s*\(\s*void\s*\))\s*\{"
contents = self._skip_comments(contents)
regex = re.compile(TEST_FUNC_REGEX % self.name, re.MULTILINE)
self.callbacks = []
self.initialize = None
self.cleanup = None
for (declaration, symbol, short_name) in regex.findall(contents):
data = {
"short_name" : short_name,
"declaration" : declaration,
"symbol" : symbol
}
if short_name == 'initialize':
self.initialize = data
elif short_name == 'cleanup':
self.cleanup = data
else:
self.callbacks.append(data)
return self.callbacks != []
def refresh(self, path):
self.modified = False
try:
st = os.stat(path)
# Not modified
if st.st_mtime == self.mtime:
return True
self.modified = True
self.mtime = st.st_mtime
with codecs.open(path, encoding='utf-8') as fp:
raw_content = fp.read()
except IOError:
return False
return self.parse(raw_content)
class TestSuite(object):
def __init__(self, path):
self.path = path
def should_generate(self, path):
if not os.path.isfile(path):
return True
if any(module.modified for module in self.modules.values()):
return True
return False
def find_modules(self):
modules = []
for root, _, files in os.walk(self.path):
module_root = root[len(self.path):]
module_root = [c for c in module_root.split(os.sep) if c]
tests_in_module = fnmatch.filter(files, "*.c")
for test_file in tests_in_module:
full_path = os.path.join(root, test_file)
module_name = "_".join(module_root + [test_file[:-2]]).replace("-", "_")
modules.append((full_path, module_name))
return modules
def load_cache(self):
path = os.path.join(self.path, '.clarcache')
cache = {}
try:
fp = open(path, 'rb')
cache = pickle.load(fp)
fp.close()
except (IOError, ValueError):
pass
return cache
def save_cache(self):
path = os.path.join(self.path, '.clarcache')
with open(path, 'wb') as cache:
pickle.dump(self.modules, cache)
def load(self, force = False):
module_data = self.find_modules()
self.modules = {} if force else self.load_cache()
for path, name in module_data:
if name not in self.modules:
self.modules[name] = Module(name)
if not self.modules[name].refresh(path):
del self.modules[name]
def disable(self, excluded):
for exclude in excluded:
for module in self.modules.values():
name = module.clean_name()
if name.startswith(exclude):
module.enabled = False
module.modified = True
def suite_count(self):
return len(self.modules)
def callback_count(self):
return sum(len(module.callbacks) for module in self.modules.values())
def write(self):
output = os.path.join(self.path, 'clar.suite')
if not self.should_generate(output):
return False
with open(output, 'w') as data:
for module in self.modules.values():
t = Module.DeclarationTemplate(module)
data.write(t.render())
for module in self.modules.values():
t = Module.CallbacksTemplate(module)
data.write(t.render())
suites = "static struct clar_suite _clar_suites[] = {" + ','.join(
Module.InfoTemplate(module).render() for module in sorted(self.modules.values(), key=lambda module: module.name)
) + "\n};\n"
data.write(suites)
data.write("static const size_t _clar_suite_count = %d;\n" % self.suite_count())
data.write("static const size_t _clar_callback_count = %d;\n" % self.callback_count())
self.save_cache()
return True
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-f', '--force', action="store_true", dest='force', default=False)
parser.add_option('-x', '--exclude', dest='excluded', action='append', default=[])
options, args = parser.parse_args()
for path in args or ['.']:
suite = TestSuite(path)
suite.load(options.force)
suite.disable(options.excluded)
if suite.write():
print("Written `clar.suite` (%d tests in %d suites)" % (suite.callback_count(), suite.suite_count()))
| lgpl-2.1 |
DaniilLeksin/gc | wx/tools/Editra/src/syntax/_perl.py | 4 | 6638 | ###############################################################################
# Name: perl.py #
# Purpose: Define Perl syntax for highlighting and other features #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
FILE: perl.py
AUTHOR: Cody Precord
@summary: Lexer configuration module for Perl.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _perl.py 66108 2010-11-10 21:04:54Z CJP $"
__revision__ = "$Revision: 66108 $"
#-----------------------------------------------------------------------------#
# Imports
import wx
import wx.stc as stc
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
# Perl Keywords
PERL_KW = (0, "if elseif unless else switch eq ne gt lt ge le cmp not and or "
"xor while for foreach do until continue defined undef and or "
"not bless ref BEGIN END my local our goto return last next redo "
"chomp chop chr crypt index lc lcfirst length org pack reverse "
"rindex sprintf substr uc ucfirst pos quotemet split study abs "
"atan2 cos exp hex int log oct rand sin sqrt srand spice unshift "
"shift push pop split join reverse grep map sort unpack each "
"exists keys values tie tied untie carp confess croak dbmclose "
"dbmopen die syscall binmode close closedir eof fileno getc "
"lstat print printf readdir readline readpipe rewinddir select "
"stat tell telldir write fcntl flock ioctl open opendir read "
"seek seekdir sysopen sysread sysseek syswrite truncate pack vec "
"chdir chmod chown chroot glob link mkdir readlink rename rmdir "
"symlink umask ulink utime caller dump eval exit wanarray "
"import alarm exec fork getpgrp getppid getpriority kill pipe "
"setpgrp setpriority sleep system times wait waitpid accept "
"bind connect getpeername getsockname getsockopt listen recv "
"send setsockopt shutdown socket socketpair msgctl msgget msgrcv "
"msgsnd semctl semget semop shmctl shmget shmread shmwrite "
"endhostent endnetent endprooent endservent gethostbyaddr "
"gethostbyname gethostent getnetbyaddr getnetbyname getnetent "
"getprotobyname getprotobynumber getprotoent getervbyname time "
"getservbyport getservent sethostent setnetent setprotoent "
"setservent getpwuid getpwnam getpwent setpwent endpwent "
"getgrgid getlogin getgrnam setgrent endgrent gtime localtime "
"times warn formline reset scalar delete prototype lock new "
"NULL __FILE__ __LINE__ __PACKAGE__ __DATA__ __END__ AUTOLOAD "
"BEGIN CORE DESTROY END EQ GE GT INIT LE LT NE CHECK use sub "
"elsif require getgrent ")
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [ (stc.STC_PL_DEFAULT, 'default_style'),
(stc.STC_PL_ARRAY, 'array_style'),
(stc.STC_PL_BACKTICKS, 'btick_style'),
(stc.STC_PL_CHARACTER, 'char_style'),
(stc.STC_PL_COMMENTLINE, 'comment_style'),
(stc.STC_PL_DATASECTION, 'default_style'), # STYLE ME
(stc.STC_PL_ERROR, 'error_style'),
(stc.STC_PL_HASH, 'global_style'),
(stc.STC_PL_HERE_DELIM, 'here_style'),
(stc.STC_PL_HERE_Q, 'here_style'),
(stc.STC_PL_HERE_QQ, 'here_style'),
(stc.STC_PL_HERE_QX, 'here_style'),
(stc.STC_PL_IDENTIFIER, 'default_style'),
(stc.STC_PL_LONGQUOTE, 'default_style'), # STYLE ME
(stc.STC_PL_NUMBER, 'number_style'),
(stc.STC_PL_OPERATOR, 'operator_style'),
(stc.STC_PL_POD, 'comment_style'),
(stc.STC_PL_PREPROCESSOR, 'pre_style' ),
(stc.STC_PL_PUNCTUATION, 'default_style'), # STYLE ME
(stc.STC_PL_REGEX, 'regex_style'),
(stc.STC_PL_REGSUBST, 'regex_style'),
(stc.STC_PL_SCALAR, 'scalar_style'),
(stc.STC_PL_STRING, 'string_style'),
(stc.STC_PL_STRING_Q, 'string_style'),
(stc.STC_PL_STRING_QQ, 'string_style'),
(stc.STC_PL_STRING_QR, 'string_style'),
(stc.STC_PL_STRING_QW, 'string_style'),
(stc.STC_PL_STRING_QX, 'string_style'),
(stc.STC_PL_SYMBOLTABLE, 'default_style'), # STYLE ME
(stc.STC_PL_WORD, 'keyword_style') ]
if wx.VERSION >= (2, 9, 0, 0, ''):
SYNTAX_ITEMS.append((stc.STC_PL_FORMAT, 'default_style')) #TODO
SYNTAX_ITEMS.append((stc.STC_PL_FORMAT_IDENT, 'default_style')) #TODO
SYNTAX_ITEMS.append((stc.STC_PL_SUB_PROTOTYPE, 'default_style')) #TODO
#---- Extra Properties ----#
FOLD = ("fold", "1")
FLD_COMPACT = ("fold.compact", "1")
FLD_COMMENT = ("fold.comment", "1")
FLD_POD = ("fold.perl.pod", "1")
FLD_PKG = ("fold.perl.package", "1")
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for Perl"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
self.SetLexer(stc.STC_LEX_PERL)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [PERL_KW]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [FOLD]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u'#']
#---- Syntax Modules Internal Functions ----#
def KeywordString(option=0):
"""Returns the specified Keyword String
@note: not used by most modules
"""
if option == synglob.ID_LANG_PERL:
return PERL_KW[1]
else:
return u''
#---- End Syntax Modules Internal Functions ----#
#-----------------------------------------------------------------------------#
| apache-2.0 |
mozilla/stoneridge | python/src/Lib/plat-irix5/FL.py | 132 | 5600 | # Constants used by the FORMS library (module fl).
# This corresponds to "forms.h".
# Recommended use: import FL; ... FL.NORMAL_BOX ... etc.
# Alternate use: from FL import *; ... NORMAL_BOX ... etc.
from warnings import warnpy3k
warnpy3k("the FL module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
_v20 = 1
_v21 = 1
##import fl
##try:
## _v20 = (fl.get_rgbmode is not None)
##except:
## _v20 = 0
##del fl
NULL = 0
FALSE = 0
TRUE = 1
EVENT = -1
LABEL_SIZE = 64
if _v20:
SHORTCUT_SIZE = 32
PLACE_FREE = 0
PLACE_SIZE = 1
PLACE_ASPECT = 2
PLACE_MOUSE = 3
PLACE_CENTER = 4
PLACE_POSITION = 5
FL_PLACE_FULLSCREEN = 6
FIND_INPUT = 0
FIND_AUTOMATIC = 1
FIND_MOUSE = 2
BEGIN_GROUP = 10000
END_GROUP = 20000
ALIGN_TOP = 0
ALIGN_BOTTOM = 1
ALIGN_LEFT = 2
ALIGN_RIGHT = 3
ALIGN_CENTER = 4
NO_BOX = 0
UP_BOX = 1
DOWN_BOX = 2
FLAT_BOX = 3
BORDER_BOX = 4
SHADOW_BOX = 5
FRAME_BOX = 6
ROUNDED_BOX = 7
RFLAT_BOX = 8
RSHADOW_BOX = 9
TOP_BOUND_COL = 51
LEFT_BOUND_COL = 55
BOT_BOUND_COL = 40
RIGHT_BOUND_COL = 35
COL1 = 47
MCOL = 49
LCOL = 0
BOUND_WIDTH = 3.0
DRAW = 0
PUSH = 1
RELEASE = 2
ENTER = 3
LEAVE = 4
MOUSE = 5
FOCUS = 6
UNFOCUS = 7
KEYBOARD = 8
STEP = 9
MOVE = 10
FONT_NAME = 'Helvetica'
FONT_BOLDNAME = 'Helvetica-Bold'
FONT_ITALICNAME = 'Helvetica-Oblique'
FONT_FIXEDNAME = 'Courier'
FONT_ICONNAME = 'Icon'
SMALL_FONT = 8.0
NORMAL_FONT = 11.0
LARGE_FONT = 20.0
NORMAL_STYLE = 0
BOLD_STYLE = 1
ITALIC_STYLE = 2
FIXED_STYLE = 3
ENGRAVED_STYLE = 4
ICON_STYLE = 5
BITMAP = 3
NORMAL_BITMAP = 0
BITMAP_BOXTYPE = NO_BOX
BITMAP_COL1 = 0
BITMAP_COL2 = COL1
BITMAP_LCOL = LCOL
BITMAP_ALIGN = ALIGN_BOTTOM
BITMAP_MAXSIZE = 128*128
BITMAP_BW = BOUND_WIDTH
BOX = 1
BOX_BOXTYPE = UP_BOX
BOX_COL1 = COL1
BOX_LCOL = LCOL
BOX_ALIGN = ALIGN_CENTER
BOX_BW = BOUND_WIDTH
BROWSER = 71
NORMAL_BROWSER = 0
SELECT_BROWSER = 1
HOLD_BROWSER = 2
MULTI_BROWSER = 3
BROWSER_BOXTYPE = DOWN_BOX
BROWSER_COL1 = COL1
BROWSER_COL2 = 3
BROWSER_LCOL = LCOL
BROWSER_ALIGN = ALIGN_BOTTOM
BROWSER_SLCOL = COL1
BROWSER_BW = BOUND_WIDTH
BROWSER_LINELENGTH = 128
BROWSER_MAXLINE = 512
BUTTON = 11
NORMAL_BUTTON = 0
PUSH_BUTTON = 1
RADIO_BUTTON = 2
HIDDEN_BUTTON = 3
TOUCH_BUTTON = 4
INOUT_BUTTON = 5
RETURN_BUTTON = 6
if _v20:
HIDDEN_RET_BUTTON = 7
BUTTON_BOXTYPE = UP_BOX
BUTTON_COL1 = COL1
BUTTON_COL2 = COL1
BUTTON_LCOL = LCOL
BUTTON_ALIGN = ALIGN_CENTER
BUTTON_MCOL1 = MCOL
BUTTON_MCOL2 = MCOL
BUTTON_BW = BOUND_WIDTH
if _v20:
CHART = 4
BAR_CHART = 0
HORBAR_CHART = 1
LINE_CHART = 2
FILLED_CHART = 3
SPIKE_CHART = 4
PIE_CHART = 5
SPECIALPIE_CHART = 6
CHART_BOXTYPE = BORDER_BOX
CHART_COL1 = COL1
CHART_LCOL = LCOL
CHART_ALIGN = ALIGN_BOTTOM
CHART_BW = BOUND_WIDTH
CHART_MAX = 128
CHOICE = 42
NORMAL_CHOICE = 0
CHOICE_BOXTYPE = DOWN_BOX
CHOICE_COL1 = COL1
CHOICE_COL2 = LCOL
CHOICE_LCOL = LCOL
CHOICE_ALIGN = ALIGN_LEFT
CHOICE_BW = BOUND_WIDTH
CHOICE_MCOL = MCOL
CHOICE_MAXITEMS = 128
CHOICE_MAXSTR = 64
CLOCK = 61
SQUARE_CLOCK = 0
ROUND_CLOCK = 1
CLOCK_BOXTYPE = UP_BOX
CLOCK_COL1 = 37
CLOCK_COL2 = 42
CLOCK_LCOL = LCOL
CLOCK_ALIGN = ALIGN_BOTTOM
CLOCK_TOPCOL = COL1
CLOCK_BW = BOUND_WIDTH
COUNTER = 25
NORMAL_COUNTER = 0
SIMPLE_COUNTER = 1
COUNTER_BOXTYPE = UP_BOX
COUNTER_COL1 = COL1
COUNTER_COL2 = 4
COUNTER_LCOL = LCOL
COUNTER_ALIGN = ALIGN_BOTTOM
if _v20:
COUNTER_BW = BOUND_WIDTH
else:
DEFAULT = 51
RETURN_DEFAULT = 0
ALWAYS_DEFAULT = 1
DIAL = 22
NORMAL_DIAL = 0
LINE_DIAL = 1
DIAL_BOXTYPE = NO_BOX
DIAL_COL1 = COL1
DIAL_COL2 = 37
DIAL_LCOL = LCOL
DIAL_ALIGN = ALIGN_BOTTOM
DIAL_TOPCOL = COL1
DIAL_BW = BOUND_WIDTH
FREE = 101
NORMAL_FREE = 1
SLEEPING_FREE = 2
INPUT_FREE = 3
CONTINUOUS_FREE = 4
ALL_FREE = 5
INPUT = 31
NORMAL_INPUT = 0
if _v20:
FLOAT_INPUT = 1
INT_INPUT = 2
HIDDEN_INPUT = 3
if _v21:
MULTILINE_INPUT = 4
SECRET_INPUT = 5
else:
ALWAYS_INPUT = 1
INPUT_BOXTYPE = DOWN_BOX
INPUT_COL1 = 13
INPUT_COL2 = 5
INPUT_LCOL = LCOL
INPUT_ALIGN = ALIGN_LEFT
INPUT_TCOL = LCOL
INPUT_CCOL = 4
INPUT_BW = BOUND_WIDTH
INPUT_MAX = 128
LIGHTBUTTON = 12
LIGHTBUTTON_BOXTYPE = UP_BOX
LIGHTBUTTON_COL1 = 39
LIGHTBUTTON_COL2 = 3
LIGHTBUTTON_LCOL = LCOL
LIGHTBUTTON_ALIGN = ALIGN_CENTER
LIGHTBUTTON_TOPCOL = COL1
LIGHTBUTTON_MCOL = MCOL
LIGHTBUTTON_BW1 = BOUND_WIDTH
LIGHTBUTTON_BW2 = BOUND_WIDTH/2.0
LIGHTBUTTON_MINSIZE = 12.0
MENU = 41
TOUCH_MENU = 0
PUSH_MENU = 1
MENU_BOXTYPE = BORDER_BOX
MENU_COL1 = 55
MENU_COL2 = 37
MENU_LCOL = LCOL
MENU_ALIGN = ALIGN_CENTER
MENU_BW = BOUND_WIDTH
MENU_MAX = 300
POSITIONER = 23
NORMAL_POSITIONER = 0
POSITIONER_BOXTYPE = DOWN_BOX
POSITIONER_COL1 = COL1
POSITIONER_COL2 = 1
POSITIONER_LCOL = LCOL
POSITIONER_ALIGN = ALIGN_BOTTOM
POSITIONER_BW = BOUND_WIDTH
ROUNDBUTTON = 13
ROUNDBUTTON_BOXTYPE = NO_BOX
ROUNDBUTTON_COL1 = 7
ROUNDBUTTON_COL2 = 3
ROUNDBUTTON_LCOL = LCOL
ROUNDBUTTON_ALIGN = ALIGN_CENTER
ROUNDBUTTON_TOPCOL = COL1
ROUNDBUTTON_MCOL = MCOL
ROUNDBUTTON_BW = BOUND_WIDTH
SLIDER = 21
VALSLIDER = 24
VERT_SLIDER = 0
HOR_SLIDER = 1
VERT_FILL_SLIDER = 2
HOR_FILL_SLIDER = 3
VERT_NICE_SLIDER = 4
HOR_NICE_SLIDER = 5
SLIDER_BOXTYPE = DOWN_BOX
SLIDER_COL1 = COL1
SLIDER_COL2 = COL1
SLIDER_LCOL = LCOL
SLIDER_ALIGN = ALIGN_BOTTOM
SLIDER_BW1 = BOUND_WIDTH
SLIDER_BW2 = BOUND_WIDTH*0.75
SLIDER_FINE = 0.05
SLIDER_WIDTH = 0.08
TEXT = 2
NORMAL_TEXT = 0
TEXT_BOXTYPE = NO_BOX
TEXT_COL1 = COL1
TEXT_LCOL = LCOL
TEXT_ALIGN = ALIGN_LEFT
TEXT_BW = BOUND_WIDTH
TIMER = 62
NORMAL_TIMER = 0
VALUE_TIMER = 1
HIDDEN_TIMER = 2
TIMER_BOXTYPE = DOWN_BOX
TIMER_COL1 = COL1
TIMER_COL2 = 1
TIMER_LCOL = LCOL
TIMER_ALIGN = ALIGN_CENTER
TIMER_BW = BOUND_WIDTH
TIMER_BLINKRATE = 0.2
| mpl-2.0 |
paulgradie/SeqPyPlot | main_app/seqpyplot/parsers/htseq_parser.py | 1 | 2244 | """
Read a directory of expression counts in ht-seq format. Each sample
should be an individual file in the directory. File names and
sample order are specified in the config file (order is determined
by order IN the config.)
This class is intended to return the raw dataframe of samples with
missing sample columns as NaN.
"""
import pandas as pd
from pathos.multiprocessing import ProcessPool
import pathlib
try:
from functools import reduce # for py3 compatibility
except ImportError:
pass
class HtSeqParser(object):
def __init__(self, nodes=2):
self.nodes = nodes
def parse_data(self, data_paths, sample_names):
"""
Read the input files from the config file and load in to a
pandas dataframe.
params
data_paths: list of file paths specified in the config. Returned
from config parse sample_names: list of sample names specified in
the config returned from config parse
"""
output = self.load_data(data_paths, sample_names)
data, ercc_df = (self.merge_dfs(output)
.pipe(self.df_cleanup)
.pipe(self.split_on_ercc))
return data, ercc_df
def load_data(self, data_paths, sample_names):
" Multiprocess load of files in to a list of dfs "
pool = ProcessPool(nodes=self.nodes)
dfs = pool.map(self.load_func, zip(data_paths, sample_names))
return dfs
@staticmethod
def load_func(data_tuple):
path, sample_name = data_tuple
return pd.read_csv(path, sep='\t', names=['gene', sample_name])
def merge_dfs(self, dfs):
return reduce(lambda x, y: pd.merge(x, y, on='gene', how='outer'), dfs)
def df_cleanup(self, df_old):
" Clean away unwanted columns, reset index, and fillna "
df = df_old.copy()
df = df[df['gene'].str.startswith('__') == False]
df.set_index('gene', inplace=True)
df.fillna(value='Nan', inplace=True)
return df
def split_on_ercc(self, df):
" Extract the ERCC data "
ercc_cols = df.index.str.startswith('ERCC-')
ercc_df = df[ercc_cols]
data = df[~ercc_cols]
return data, ercc_df
| gpl-3.0 |
livoras/feifanote | tasks/component/make-component.py | 2 | 2058 | # -*- coding: utf-8 -*-
import sys
import os
import shutil
import time
import re
component_dir = None
component_name = sys.argv[1]
target_dir = None
def init_component_dir():
global component_dir
component_dir = os.path.abspath(__file__) \
.replace(__file__.replace('/', '\\'), '')[:-1]
component_dir = os.path.sep.join([component_dir, 'src', 'components']) + os.path.sep
def make_component():
fixtures_dir = os.path.dirname(__file__) + '/fixtures'
target_dir = make_target_dir()
for root, dirs, files in os.walk(fixtures_dir):
target_root = target_dir + root.replace(fixtures_dir, '')
for dir_name in dirs:
os.mkdir(os.path.sep.join([target_root, dir_name.format(name=component_name)]))
for file_name in files:
target_file = os.path.sep.join([target_root, file_name.format(name=component_name)])
print target_file + ' ======> created!'
with open(target_file, 'w') as f:
with open(os.path.sep.join([root, file_name]), 'r') as t:
tpl = t.read()
f.write(tpl.format(name=component_name, initial_style='{\n\n}'))
t.close()
f.close()
def make_target_dir():
global target_dir
target_dir = component_dir + component_name
try:
os.mkdir(target_dir)
except BaseException, e:
print 'Error: ==> %s component has already existed.' % component_name
exit()
return target_dir
def append_script():
with open('index.html', 'r') as index:
file_strs = index.readlines()
for i, line in enumerate(file_strs):
if re.match('\<!--components end--\>', line):
tpl = file_strs[i - 1].replace('<!--', '').replace('-->', '')
file_strs.insert(i - 1, tpl.format(name=component_name))
print 'index.html script ======> wrote'
break
index.close()
with open('index.html', 'w') as index:
index.writelines(file_strs)
index.close()
if __name__ == '__main__':
init_component_dir()
make_component()
append_script()
# time.sleep(3)
# shutil.rmtree(target_dir)
| mit |
airbnb/airflow | tests/api_connexion/endpoints/test_import_error_endpoint.py | 7 | 9329 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from parameterized import parameterized
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.models.errors import ImportError # pylint: disable=redefined-builtin
from airflow.security import permissions
from airflow.utils import timezone
from airflow.utils.session import provide_session
from airflow.www import app
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_import_errors
class TestBaseImportError(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
cls.app = app.create_app(testing=True) # type:ignore
create_user(
cls.app, # type: ignore
username="test",
role_name="Test",
permissions=[(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR)],
)
create_user(cls.app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
@classmethod
def tearDownClass(cls) -> None:
delete_user(cls.app, username="test") # type: ignore
delete_user(cls.app, username="test_no_permissions") # type: ignore
def setUp(self) -> None:
super().setUp()
self.client = self.app.test_client() # type:ignore
self.timestamp = "2020-06-10T12:00"
clear_db_import_errors()
def tearDown(self) -> None:
clear_db_import_errors()
@staticmethod
def _normalize_import_errors(import_errors):
for i, import_error in enumerate(import_errors, 1):
import_error["import_error_id"] = i
class TestGetImportErrorEndpoint(TestBaseImportError):
@provide_session
def test_response_200(self, session):
import_error = ImportError(
filename="Lorem_ipsum.py",
stacktrace="Lorem ipsum",
timestamp=timezone.parse(self.timestamp, timezone="UTC"),
)
session.add(import_error)
session.commit()
response = self.client.get(
f"/api/v1/importErrors/{import_error.id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
response_data = response.json
response_data["import_error_id"] = 1
self.assertEqual(
{
"filename": "Lorem_ipsum.py",
"import_error_id": 1,
"stack_trace": "Lorem ipsum",
"timestamp": "2020-06-10T12:00:00+00:00",
},
response_data,
)
def test_response_404(self):
response = self.client.get("/api/v1/importErrors/2", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
self.assertEqual(
{
"detail": "The ImportError with import_error_id: `2` was not found",
"status": 404,
"title": "Import error not found",
"type": EXCEPTIONS_LINK_MAP[404],
},
response.json,
)
@provide_session
def test_should_raises_401_unauthenticated(self, session):
import_error = ImportError(
filename="Lorem_ipsum.py",
stacktrace="Lorem ipsum",
timestamp=timezone.parse(self.timestamp, timezone="UTC"),
)
session.add(import_error)
session.commit()
response = self.client.get(f"/api/v1/importErrors/{import_error.id}")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
"/api/v1/importErrors", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestGetImportErrorsEndpoint(TestBaseImportError):
@provide_session
def test_get_import_errors(self, session):
import_error = [
ImportError(
filename="Lorem_ipsum.py",
stacktrace="Lorem ipsum",
timestamp=timezone.parse(self.timestamp, timezone="UTC"),
)
for _ in range(2)
]
session.add_all(import_error)
session.commit()
response = self.client.get("/api/v1/importErrors", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
response_data = response.json
self._normalize_import_errors(response_data['import_errors'])
self.assertEqual(
{
"import_errors": [
{
"filename": "Lorem_ipsum.py",
"import_error_id": 1,
"stack_trace": "Lorem ipsum",
"timestamp": "2020-06-10T12:00:00+00:00",
},
{
"filename": "Lorem_ipsum.py",
"import_error_id": 2,
"stack_trace": "Lorem ipsum",
"timestamp": "2020-06-10T12:00:00+00:00",
},
],
"total_entries": 2,
},
response_data,
)
@provide_session
def test_should_raises_401_unauthenticated(self, session):
import_error = [
ImportError(
filename="Lorem_ipsum.py",
stacktrace="Lorem ipsum",
timestamp=timezone.parse(self.timestamp, timezone="UTC"),
)
for _ in range(2)
]
session.add_all(import_error)
session.commit()
response = self.client.get("/api/v1/importErrors")
assert_401(response)
class TestGetImportErrorsEndpointPagination(TestBaseImportError):
@parameterized.expand(
[
# Limit test data
("/api/v1/importErrors?limit=1", ["/tmp/file_1.py"]),
("/api/v1/importErrors?limit=100", [f"/tmp/file_{i}.py" for i in range(1, 101)]),
# Offset test data
("/api/v1/importErrors?offset=1", [f"/tmp/file_{i}.py" for i in range(2, 102)]),
("/api/v1/importErrors?offset=3", [f"/tmp/file_{i}.py" for i in range(4, 104)]),
# Limit and offset test data
("/api/v1/importErrors?offset=3&limit=3", [f"/tmp/file_{i}.py" for i in [4, 5, 6]]),
]
)
@provide_session
def test_limit_and_offset(self, url, expected_import_error_ids, session):
import_errors = [
ImportError(
filename=f"/tmp/file_{i}.py",
stacktrace="Lorem ipsum",
timestamp=timezone.parse(self.timestamp, timezone="UTC"),
)
for i in range(1, 110)
]
session.add_all(import_errors)
session.commit()
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
import_ids = [pool["filename"] for pool in response.json["import_errors"]]
self.assertEqual(import_ids, expected_import_error_ids)
@provide_session
def test_should_respect_page_size_limit_default(self, session):
import_errors = [
ImportError(
filename=f"/tmp/file_{i}.py",
stacktrace="Lorem ipsum",
timestamp=timezone.parse(self.timestamp, timezone="UTC"),
)
for i in range(1, 110)
]
session.add_all(import_errors)
session.commit()
response = self.client.get("/api/v1/importErrors", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
self.assertEqual(len(response.json['import_errors']), 100)
@provide_session
@conf_vars({("api", "maximum_page_limit"): "150"})
def test_should_return_conf_max_if_req_max_above_conf(self, session):
import_errors = [
ImportError(
filename=f"/tmp/file_{i}.py",
stacktrace="Lorem ipsum",
timestamp=timezone.parse(self.timestamp, timezone="UTC"),
)
for i in range(200)
]
session.add_all(import_errors)
session.commit()
response = self.client.get(
"/api/v1/importErrors?limit=180", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
self.assertEqual(len(response.json['import_errors']), 150)
| apache-2.0 |
onitake/ansible | lib/ansible/modules/network/f5/bigip_monitor_tcp.py | 4 | 20485 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_monitor_tcp
short_description: Manages F5 BIG-IP LTM tcp monitors
description: Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API.
version_added: 1.4
options:
name:
description:
- Monitor name.
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(tcp)
parent on the C(Common) partition.
default: /Common/tcp
description:
description:
- The description of the monitor.
version_added: 2.7
send:
description:
- The send string for the monitor call.
receive:
description:
- The receive string for the monitor call.
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
- If this value is an IP address, and the C(type) is C(tcp) (the default),
then a C(port) number must be specified.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified
- This argument is not supported for TCP Echo types.
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, then the default value will be 5. This value B(must)
be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. If this parameter is not
provided when creating a new monitor, then the default value will be 16.
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. If this parameter is not provided when creating
a new monitor, then the default value will be 0.
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
version_added: 2.5
notes:
- Requires BIG-IP software version >= 12
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create TCP Monitor
bigip_monitor_tcp:
state: present
server: lb.mydomain.com
user: admin
password: secret
name: my_tcp_monitor
send: tcp string to send
receive: tcp string to receive
delegate_to: localhost
- name: Remove TCP Monitor
bigip_monitor_tcp:
state: absent
server: lb.mydomain.com
user: admin
password: secret
name: my_tcp_monitor
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: tcp
send:
description: The new send string for this monitor.
returned: changed
type: string
sample: tcp string to send
description:
description: The description of the monitor.
returned: changed
type: str
sample: Important Monitor
receive:
description: The new receive string for this monitor.
returned: changed
type: string
sample: tcp string to receive
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: 10.12.13.14
port:
description: The new port of IP/port definition.
returned: changed
type: string
sample: admin@root.local
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
time_until_up:
description: The new time in which to mark a system as up after first successful response.
returned: changed
type: int
sample: 2
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.network.f5.compare import cmp_str_with_none
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.network.f5.compare import cmp_str_with_none
class Parameters(AnsibleF5Parameters):
api_map = {
'timeUntilUp': 'time_until_up',
'defaultsFrom': 'parent',
'recv': 'receive',
}
api_attributes = [
'timeUntilUp',
'defaultsFrom',
'interval',
'timeout',
'recv',
'send',
'destination',
'description',
]
returnables = [
'parent',
'send',
'receive',
'ip',
'port',
'interval',
'timeout',
'time_until_up',
'description',
]
updatables = [
'destination',
'send',
'receive',
'interval',
'timeout',
'time_until_up',
'description',
]
@property
def interval(self):
if self._values['interval'] is None:
return None
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
if is_valid_ip(self._values['ip']):
return self._values['ip']
else:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def time_until_up(self):
if self._values['time_until_up'] is None:
return None
return int(self._values['time_until_up'])
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def type(self):
return 'tcp'
class ApiParameters(Parameters):
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
class ModuleParameters(Parameters):
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
self._set_default_creation_values()
if self.module.check_mode:
return True
self.create_on_device()
return True
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.time_until_up is None:
self.want.update({'time_until_up': 0})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/tcp'),
description=dict(),
send=dict(),
receive=dict(),
ip=dict(),
port=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
time_until_up=dict(type='int'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
dmlc/xgboost | tests/python/test_with_pandas.py | 1 | 10402 | # -*- coding: utf-8 -*-
import numpy as np
import xgboost as xgb
import testing as tm
import pytest
try:
import pandas as pd
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_pandas())
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
class TestPandas:
def test_pandas(self):
df = pd.DataFrame([[1, 2., True], [2, 3., False]],
columns=['a', 'b', 'c'])
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
assert dm.feature_names == ['a', 'b', 'c']
assert dm.feature_types == ['int', 'float', 'i']
assert dm.num_row() == 2
assert dm.num_col() == 3
np.testing.assert_array_equal(dm.get_label(), np.array([1, 2]))
# overwrite feature_names and feature_types
dm = xgb.DMatrix(df, label=pd.Series([1, 2]),
feature_names=['x', 'y', 'z'],
feature_types=['q', 'q', 'q'])
assert dm.feature_names == ['x', 'y', 'z']
assert dm.feature_types == ['q', 'q', 'q']
assert dm.num_row() == 2
assert dm.num_col() == 3
# incorrect dtypes
df = pd.DataFrame([[1, 2., 'x'], [2, 3., 'y']],
columns=['a', 'b', 'c'])
with pytest.raises(ValueError):
xgb.DMatrix(df)
# numeric columns
df = pd.DataFrame([[1, 2., True], [2, 3., False]])
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
assert dm.feature_names == ['0', '1', '2']
assert dm.feature_types == ['int', 'float', 'i']
assert dm.num_row() == 2
assert dm.num_col() == 3
np.testing.assert_array_equal(dm.get_label(), np.array([1, 2]))
df = pd.DataFrame([[1, 2., 1], [2, 3., 1]], columns=[4, 5, 6])
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
assert dm.feature_names == ['4', '5', '6']
assert dm.feature_types == ['int', 'float', 'int']
assert dm.num_row() == 2
assert dm.num_col() == 3
df = pd.DataFrame({'A': ['X', 'Y', 'Z'], 'B': [1, 2, 3]})
dummies = pd.get_dummies(df)
# B A_X A_Y A_Z
# 0 1 1 0 0
# 1 2 0 1 0
# 2 3 0 0 1
result, _, _ = xgb.data._transform_pandas_df(dummies,
enable_categorical=False)
exp = np.array([[1., 1., 0., 0.],
[2., 0., 1., 0.],
[3., 0., 0., 1.]])
np.testing.assert_array_equal(result, exp)
dm = xgb.DMatrix(dummies)
assert dm.feature_names == ['B', 'A_X', 'A_Y', 'A_Z']
assert dm.feature_types == ['int', 'int', 'int', 'int']
assert dm.num_row() == 3
assert dm.num_col() == 4
df = pd.DataFrame({'A=1': [1, 2, 3], 'A=2': [4, 5, 6]})
dm = xgb.DMatrix(df)
assert dm.feature_names == ['A=1', 'A=2']
assert dm.feature_types == ['int', 'int']
assert dm.num_row() == 3
assert dm.num_col() == 2
df_int = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=[9, 10])
dm_int = xgb.DMatrix(df_int)
df_range = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=range(9, 11, 1))
dm_range = xgb.DMatrix(df_range)
assert dm_int.feature_names == ['9', '10'] # assert not "9 "
assert dm_int.feature_names == dm_range.feature_names
# test MultiIndex as columns
df = pd.DataFrame(
[
(1, 2, 3, 4, 5, 6),
(6, 5, 4, 3, 2, 1)
],
columns=pd.MultiIndex.from_tuples((
('a', 1), ('a', 2), ('a', 3),
('b', 1), ('b', 2), ('b', 3),
))
)
dm = xgb.DMatrix(df)
assert dm.feature_names == ['a 1', 'a 2', 'a 3', 'b 1', 'b 2', 'b 3']
assert dm.feature_types == ['int', 'int', 'int', 'int', 'int', 'int']
assert dm.num_row() == 2
assert dm.num_col() == 6
def test_slice(self):
rng = np.random.RandomState(1994)
rows = 100
X = rng.randint(3, 7, size=rows)
X = pd.DataFrame({'f0': X})
y = rng.randn(rows)
ridxs = [1, 2, 3, 4, 5, 6]
m = xgb.DMatrix(X, y)
sliced = m.slice(ridxs)
assert m.feature_types == sliced.feature_types
def test_pandas_categorical(self):
rng = np.random.RandomState(1994)
rows = 100
X = rng.randint(3, 7, size=rows)
X = pd.Series(X, dtype="category")
X = pd.DataFrame({'f0': X})
y = rng.randn(rows)
m = xgb.DMatrix(X, y, enable_categorical=True)
assert m.feature_types[0] == 'categorical'
def test_pandas_sparse(self):
import pandas as pd
rows = 100
X = pd.DataFrame(
{"A": pd.arrays.SparseArray(np.random.randint(0, 10, size=rows)),
"B": pd.arrays.SparseArray(np.random.randn(rows)),
"C": pd.arrays.SparseArray(np.random.permutation(
[True, False] * (rows // 2)))}
)
y = pd.Series(pd.arrays.SparseArray(np.random.randn(rows)))
dtrain = xgb.DMatrix(X, y)
booster = xgb.train({}, dtrain, num_boost_round=4)
predt_sparse = booster.predict(xgb.DMatrix(X))
predt_dense = booster.predict(xgb.DMatrix(X.sparse.to_dense()))
np.testing.assert_allclose(predt_sparse, predt_dense)
def test_pandas_label(self):
# label must be a single column
df = pd.DataFrame({'A': ['X', 'Y', 'Z'], 'B': [1, 2, 3]})
with pytest.raises(ValueError):
xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float')
# label must be supported dtype
df = pd.DataFrame({'A': np.array(['a', 'b', 'c'], dtype=object)})
with pytest.raises(ValueError):
xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float')
df = pd.DataFrame({'A': np.array([1, 2, 3], dtype=int)})
result, _, _ = xgb.data._transform_pandas_df(df, False, None, None,
'label', 'float')
np.testing.assert_array_equal(result, np.array([[1.], [2.], [3.]],
dtype=float))
dm = xgb.DMatrix(np.random.randn(3, 2), label=df)
assert dm.num_row() == 3
assert dm.num_col() == 2
def test_pandas_weight(self):
kRows = 32
kCols = 8
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
w = np.random.uniform(size=kRows).astype(np.float32)
w_pd = pd.DataFrame(w)
data = xgb.DMatrix(X, y, w_pd)
assert data.num_row() == kRows
assert data.num_col() == kCols
np.testing.assert_array_equal(data.get_weight(), w)
def test_cv_as_pandas(self):
dm = xgb.DMatrix(dpath + 'agaricus.txt.train')
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': 'error'}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10)
assert isinstance(cv, pd.DataFrame)
exp = pd.Index([u'test-error-mean', u'test-error-std',
u'train-error-mean', u'train-error-std'])
assert len(cv.columns.intersection(exp)) == 4
# show progress log (result is the same as above)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
verbose_eval=True)
assert isinstance(cv, pd.DataFrame)
exp = pd.Index([u'test-error-mean', u'test-error-std',
u'train-error-mean', u'train-error-std'])
assert len(cv.columns.intersection(exp)) == 4
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
verbose_eval=True, show_stdv=False)
assert isinstance(cv, pd.DataFrame)
exp = pd.Index([u'test-error-mean', u'test-error-std',
u'train-error-mean', u'train-error-std'])
assert len(cv.columns.intersection(exp)) == 4
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': 'auc'}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True)
assert 'eval_metric' in params
assert 'auc' in cv.columns[0]
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': ['auc']}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True)
assert 'eval_metric' in params
assert 'auc' in cv.columns[0]
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': ['auc']}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, early_stopping_rounds=1)
assert 'eval_metric' in params
assert 'auc' in cv.columns[0]
assert cv.shape[0] < 10
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic'}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, metrics='auc')
assert 'auc' in cv.columns[0]
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic'}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, metrics=['auc'])
assert 'auc' in cv.columns[0]
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': ['auc']}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, metrics='error')
assert 'eval_metric' in params
assert 'auc' not in cv.columns[0]
assert 'error' in cv.columns[0]
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, metrics=['error'])
assert 'eval_metric' in params
assert 'auc' not in cv.columns[0]
assert 'error' in cv.columns[0]
params = list(params.items())
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, metrics=['error'])
assert isinstance(params, list)
assert 'auc' not in cv.columns[0]
assert 'error' in cv.columns[0]
| apache-2.0 |
welshjf/bitnomon | bitnomon/formatting.py | 1 | 1555 | # Copyright 2015 Jacob Welsh
#
# This file is part of Bitnomon; see the README for license information.
"""Text/number formatting"""
class ByteCountFormatter(object):
#pylint: disable=too-few-public-methods
"""Human-readable display of byte counts in various formats.
By default, the formatter uses SI and bytes, so 1000 => "1 KB". All
combinations of (byte, bit) x (SI, binary) are supported, though you
probably shouldn't use bits with the binary prefixes.
Attributes:
unit_bits True for bits or False for bytes
prefix_si True for SI or False for binary prefixes
"""
SI_prefixes = ('k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
binary_prefixes = ('Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
def __init__(self):
self.unit_bits = False
self.prefix_si = True
def __call__(self, count):
"""Formats a byte count using the configured settings."""
if self.unit_bits:
count *= 8
unit = 'b'
else:
unit = 'B'
if self.prefix_si:
factor = 1000.
prefixes = self.SI_prefixes
else:
factor = 1024.
prefixes = self.binary_prefixes
if abs(count) < factor:
return u'%d %c' % (count, unit)
size = float(count)
prefix_index = 0
while abs(size) >= factor and prefix_index < len(prefixes):
size /= factor
prefix_index += 1
return u'%.2f %s%c' % (size, prefixes[prefix_index-1], unit)
| apache-2.0 |
SaschaMester/delicium | tools/python/google/platform_utils_mac.py | 183 | 5676 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Platform-specific utility methods shared by several scripts."""
import os
import subprocess
import google.path_utils
class PlatformUtility(object):
def __init__(self, base_dir):
"""Args:
base_dir: the base dir for running tests.
"""
self._base_dir = base_dir
self._httpd_cmd_string = None # used for starting/stopping httpd
self._bash = "/bin/bash"
def _UnixRoot(self):
"""Returns the path to root."""
return "/"
def GetFilesystemRoot(self):
"""Returns the root directory of the file system."""
return self._UnixRoot()
def GetTempDirectory(self):
"""Returns the file system temp directory
Note that this does not use a random subdirectory, so it's not
intrinsically secure. If you need a secure subdir, use the tempfile
package.
"""
return os.getenv("TMPDIR", "/tmp")
def FilenameToUri(self, path, use_http=False, use_ssl=False, port=8000):
"""Convert a filesystem path to a URI.
Args:
path: For an http URI, the path relative to the httpd server's
DocumentRoot; for a file URI, the full path to the file.
use_http: if True, returns a URI of the form http://127.0.0.1:8000/.
If False, returns a file:/// URI.
use_ssl: if True, returns HTTPS URL (https://127.0.0.1:8000/).
This parameter is ignored if use_http=False.
port: The port number to append when returning an HTTP URI
"""
if use_http:
protocol = 'http'
if use_ssl:
protocol = 'https'
return "%s://127.0.0.1:%d/%s" % (protocol, port, path)
return "file://" + path
def GetStartHttpdCommand(self, output_dir,
httpd_conf_path, mime_types_path,
document_root=None, apache2=False):
"""Prepares the config file and output directory to start an httpd server.
Returns a list of strings containing the server's command line+args.
Args:
output_dir: the path to the server's output directory, for log files.
It will be created if necessary.
httpd_conf_path: full path to the httpd.conf file to be used.
mime_types_path: full path to the mime.types file to be used.
document_root: full path to the DocumentRoot. If None, the DocumentRoot
from the httpd.conf file will be used. Note that the httpd.conf
file alongside this script does not specify any DocumentRoot, so if
you're using that one, be sure to specify a document_root here.
apache2: boolean if true will cause this function to return start
command for Apache 2.x as opposed to Apache 1.3.x. This flag
is ignored on Mac (but preserved here for compatibility in
function signature with win), where httpd2 is used always
"""
exe_name = "httpd"
cert_file = google.path_utils.FindUpward(self._base_dir, 'tools',
'python', 'google',
'httpd_config', 'httpd2.pem')
ssl_enabled = os.path.exists('/etc/apache2/mods-enabled/ssl.conf')
httpd_vars = {
"httpd_executable_path":
os.path.join(self._UnixRoot(), "usr", "sbin", exe_name),
"httpd_conf_path": httpd_conf_path,
"ssl_certificate_file": cert_file,
"document_root" : document_root,
"server_root": os.path.join(self._UnixRoot(), "usr"),
"mime_types_path": mime_types_path,
"output_dir": output_dir,
"ssl_mutex": "file:"+os.path.join(output_dir, "ssl_mutex"),
"user": os.environ.get("USER", "#%d" % os.geteuid()),
"lock_file": os.path.join(output_dir, "accept.lock"),
}
google.path_utils.MaybeMakeDirectory(output_dir)
# We have to wrap the command in bash
# -C: process directive before reading config files
# -c: process directive after reading config files
# Apache wouldn't run CGIs with permissions==700 unless we add
# -c User "<username>"
httpd_cmd_string = (
'%(httpd_executable_path)s'
' -f %(httpd_conf_path)s'
' -c \'TypesConfig "%(mime_types_path)s"\''
' -c \'CustomLog "%(output_dir)s/access_log.txt" common\''
' -c \'ErrorLog "%(output_dir)s/error_log.txt"\''
' -c \'PidFile "%(output_dir)s/httpd.pid"\''
' -C \'User "%(user)s"\''
' -C \'ServerRoot "%(server_root)s"\''
' -c \'LockFile "%(lock_file)s"\''
)
if document_root:
httpd_cmd_string += ' -C \'DocumentRoot "%(document_root)s"\''
if ssl_enabled:
httpd_cmd_string += (
' -c \'SSLCertificateFile "%(ssl_certificate_file)s"\''
' -c \'SSLMutex "%(ssl_mutex)s"\''
)
# Save a copy of httpd_cmd_string to use for stopping httpd
self._httpd_cmd_string = httpd_cmd_string % httpd_vars
httpd_cmd = [self._bash, "-c", self._httpd_cmd_string]
return httpd_cmd
def GetStopHttpdCommand(self):
"""Returns a list of strings that contains the command line+args needed to
stop the http server used in the http tests.
This tries to fetch the pid of httpd (if available) and returns the
command to kill it. If pid is not available, kill all httpd processes
"""
if not self._httpd_cmd_string:
return ["true"] # Haven't been asked for the start cmd yet. Just pass.
# Add a sleep after the shutdown because sometimes it takes some time for
# the port to be available again.
return [self._bash, "-c", self._httpd_cmd_string + ' -k stop && sleep 5']
| bsd-3-clause |
martinhoaragao/hour-of-code | node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | 1284 | 100329 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here. In this case, we also need to save the compile_deps for the target,
# so that the the target that directly depends on the .objs can also depend
# on those.
self.component_objs = None
self.compile_deps = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
compile_deps = self.target.actions_stamp or actions_depends
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
self.target.compile_deps = compile_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
compile_deps)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
order_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
if target.compile_deps:
order_deps.add(target.compile_deps)
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
order_only=list(order_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
carolinux/QGIS | python/plugins/GdalTools/tools/inOutSelector.py | 12 | 8284 | # -*- coding: utf-8 -*-
"""
***************************************************************************
inOutSelector.py
---------------------
Date : April 2011
Copyright : (C) 2011 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giuseppe Sucameli'
__date__ = 'April 2011'
__copyright__ = '(C) 2011, Giuseppe Sucameli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import SIGNAL, Qt, pyqtProperty
from PyQt4.QtGui import QWidget, QComboBox
from qgis.core import QgsMapLayerRegistry, QgsMapLayer
from ui_inOutSelector import Ui_GdalToolsInOutSelector
class GdalToolsInOutSelector(QWidget, Ui_GdalToolsInOutSelector):
FILE = 0x1
LAYER = 0x2
MULTIFILE = 0x4 # NOT IMPLEMENTED YET
FILE_LAYER = 0x1 | 0x2
FILES = 0x1 | 0x4 # NOT IMPLEMENTED YET
FILES_LAYER = 0x3 | 0x4 # NOT IMPLEMENTED YET
__pyqtSignals__ = ("selectClicked()", "filenameChanged(), layerChanged()")
def __init__(self, parent=None, type=None):
QWidget.__init__(self, parent)
self.setupUi(self)
self.setFocusPolicy(Qt.StrongFocus)
self.combo.setInsertPolicy(QComboBox.NoInsert)
self.clear()
self.typ = None
if type is None:
self.resetType()
else:
self.setType(type)
self.connect(self.selectBtn, SIGNAL("clicked()"), self.selectButtonClicked)
self.connect(self.fileEdit, SIGNAL("textChanged(const QString &)"), self.textChanged)
self.connect(self.combo, SIGNAL("editTextChanged(const QString &)"), self.textChanged)
self.connect(self.combo, SIGNAL("currentIndexChanged(int)"), self.indexChanged)
def clear(self):
self.filenames = []
self.fileEdit.clear()
self.clearComboState()
self.combo.clear()
def textChanged(self):
if self.getType() & self.MULTIFILE:
self.filenames = self.fileEdit.text().split(",")
if self.getType() & self.LAYER:
index = self.combo.currentIndex()
if index >= 0:
text = self.combo.currentText()
if text != self.combo.itemText(index):
return self.setFilename(text)
self.filenameChanged()
def indexChanged(self):
self.layerChanged()
self.filenameChanged()
def selectButtonClicked(self):
self.emit(SIGNAL("selectClicked()"))
def filenameChanged(self):
self.emit(SIGNAL("filenameChanged()"))
def layerChanged(self):
self.emit(SIGNAL("layerChanged()"))
def setType(self, type):
if type == self.typ:
return
if type & self.MULTIFILE: # MULTITYPE IS NOT IMPLEMENTED YET
type = type & ~self.MULTIFILE
self.typ = type
self.selectBtn.setVisible(self.getType() & self.FILE)
self.combo.setVisible(self.getType() & self.LAYER)
self.fileEdit.setVisible(not (self.getType() & self.LAYER))
self.combo.setEditable(self.getType() & self.FILE)
if self.getType() & self.FILE:
self.setFocusProxy(self.selectBtn)
else:
self.setFocusProxy(self.combo)
# send signals to refresh connected widgets
self.filenameChanged()
self.layerChanged()
def getType(self):
return self.typ
def resetType(self):
self.setType(self.FILE_LAYER)
selectorType = pyqtProperty("int", getType, setType, resetType)
def setFilename(self, fn=None):
self.blockSignals(True)
prevFn, prevLayer = self.filename(), self.layer()
if isinstance(fn, QgsMapLayer):
fn = fn.source()
elif isinstance(fn, str) or isinstance(fn, unicode):
fn = unicode(fn)
# TODO test
elif isinstance(fn, list):
if len(fn) > 0:
if self.getType() & self.MULTIFILE:
self.filenames = fn
#fn = "".join( fn, "," )
fn = ",".join(fn)
else:
fn = ''
else:
fn = ''
if not (self.getType() & self.LAYER):
self.fileEdit.setText(fn)
else:
self.combo.setCurrentIndex(-1)
self.combo.setEditText(fn)
self.blockSignals(False)
if self.filename() != prevFn:
self.filenameChanged()
if self.layer() != prevLayer:
self.layerChanged()
def setLayer(self, layer=None):
if not (self.getType() & self.LAYER):
return self.setFilename(layer)
self.blockSignals(True)
prevFn, prevLayer = self.filename(), self.layer()
if isinstance(layer, QgsMapLayer):
if self.combo.findData(layer.id()) >= 0:
index = self.combo.findData(layer.id())
self.combo.setCurrentIndex(index)
else:
self.combo.setCurrentIndex(-1)
self.combo.setEditText(layer.source())
elif isinstance(layer, int) and layer >= 0 and layer < self.combo.count():
self.combo.setCurrentIndex(layer)
else:
self.combo.clearEditText()
self.combo.setCurrentIndex(-1)
self.blockSignals(False)
if self.filename() != prevFn:
self.filenameChanged()
if self.layer() != prevLayer:
self.layerChanged()
def setLayers(self, layers=None):
if layers is None or not hasattr(layers, '__iter__') or len(layers) <= 0:
self.combo.clear()
return
self.blockSignals(True)
prevFn, prevLayer = self.filename(), self.layer()
self.saveComboState()
self.combo.clear()
for l in layers:
self.combo.addItem(l.name(), l.id())
self.restoreComboState()
self.blockSignals(False)
if self.filename() != prevFn:
self.filenameChanged()
if self.layer() != prevLayer:
self.layerChanged()
def clearComboState(self):
self.prevState = None
def saveComboState(self):
index = self.combo.currentIndex()
text = self.combo.currentText()
layerID = self.combo.itemData(index) if index >= 0 else ""
self.prevState = (index, text, layerID)
def restoreComboState(self):
if self.prevState is None:
return
index, text, layerID = self.prevState
if index < 0:
if text == '' and self.combo.count() > 0:
index = 0
elif self.combo.findData(layerID) < 0:
index = -1
text = ""
else:
index = self.combo.findData(layerID)
self.combo.setCurrentIndex(index)
if index >= 0:
text = self.combo.itemText(index)
self.combo.setEditText(text)
def layer(self):
if self.getType() != self.FILE and self.combo.currentIndex() >= 0:
layerID = self.combo.itemData(self.combo.currentIndex())
return QgsMapLayerRegistry.instance().mapLayer(layerID)
return None
def filename(self):
if not (self.getType() & self.LAYER):
if self.getType() & self.MULTIFILE:
return self.filenames
return self.fileEdit.text()
if self.combo.currentIndex() < 0:
if self.getType() & self.MULTIFILE:
return self.filenames
return self.combo.currentText()
layer = self.layer()
if layer is not None:
return layer.source()
return ''
| gpl-2.0 |
andela-angene/coursebuilder-core | coursebuilder/tests/functional/student_answers.py | 4 | 16815 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analytics for extracting facts based on StudentAnswerEntity entries."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import collections
from common import crypto
from common import utils as common_utils
from models import courses
from models import models
from models import transforms
from models.data_sources import utils as data_sources_utils
from tests.functional import actions
from google.appengine.ext import db
COURSE_NAME = 'test_course'
COURSE_TITLE = 'Test Course'
ADMIN_EMAIL = 'test@example.com'
AssessmentDef = collections.namedtuple('AssessmentDef',
['unit_id', 'title', 'html_content'])
EntityDef = collections.namedtuple('EntityDef',
['entity_class', 'entity_id',
'entity_key_name', 'data'])
ASSESSMENTS = [
AssessmentDef(
1, 'One Question',
'<question quid="4785074604081152" weight="1" '
'instanceid="8TvGgbrrbZ49"></question><br>'),
AssessmentDef(
2, 'Groups And Questions',
'<question quid="5066549580791808" weight="1" '
'instanceid="zsgZ8dUMvJjz"></question><br>'
'<question quid="5629499534213120" weight="1" '
'instanceid="YlGaKQ2mnOPG"></question><br>'
'<question-group qgid="5348024557502464" '
'instanceid="YpoECeTunEpj"></question-group><br>'
'<question-group qgid="5910974510923776" '
'instanceid="FcIh3jyWOTbP"></question-group><br>'),
AssessmentDef(
3, 'All Questions',
'<question quid="6192449487634432" weight="1" '
'instanceid="E5P0a0bFB0EH"></question><br>'
'<question quid="5629499534213120" weight="1" '
'instanceid="DlfLRsko2QHb"></question><br>'
'<question quid="5066549580791808" weight="1" '
'instanceid="hGrEjnP13pMA"></question><br>'
'<question quid="4785074604081152" weight="1" '
'instanceid="knWukHJApaQh"></question><br>'),
]
ENTITIES = [
# Questions -----------------------------------------------------------
EntityDef(
models.QuestionEntity, 4785074604081152, None,
'{"question": "To produce maximum generosity, what should be the '
'overall shape of the final protrusion?", "rows": 1, "columns": 1'
'00, "defaultFeedback": "", "graders": [{"matcher": "case_insensi'
'tive", "feedback": "", "score": "0.7", "response": "oblong"}, {"'
'matcher": "case_insensitive", "feedback": "", "score": "0.3", "r'
'esponse": "extended"}], "type": 1, "description": "Maximum gener'
'osity protrusion shape", "version": "1.5", "hint": ""}'),
EntityDef(
models.QuestionEntity, 5066549580791808, None,
'{"question": "Describe the shape of a standard trepanning hammer'
'", "multiple_selections": false, "choices": [{"feedback": "", "s'
'core": 0.0, "text": "Round"}, {"feedback": "", "score": 0.0, "te'
'xt": "Square"}, {"feedback": "", "score": 1.0, "text": "Diamond"'
'}, {"feedback": "", "score": 0.0, "text": "Pyramid"}], "type": 0'
', "description": "Trepanning hammer shape", "version": "1.5"}'),
EntityDef(
models.QuestionEntity, 5629499534213120, None,
'{"question": "Describe an appropriate bedside manner for post-tr'
'eatment patient interaction", "rows": 1, "columns": 100, "defaul'
'tFeedback": "", "graders": [{"matcher": "case_insensitive", "fee'
'dback": "", "score": "1.0", "response": "gentle"}, {"matcher": "'
'case_insensitive", "feedback": "", "score": "0.8", "response": "'
'caring"}], "type": 1, "description": "Post-treatement interactio'
'n", "version": "1.5", "hint": ""}'),
EntityDef(
models.QuestionEntity, 6192449487634432, None,
'{"question": "When making a personality shift, how hard should t'
'he strike be?", "multiple_selections": true, "choices": [{"feedb'
'ack": "", "score": -1.0, "text": "Light"}, {"feedback": "", "sco'
're": 0.7, "text": "Medium"}, {"feedback": "", "score": 0.3, "tex'
't": "Heavy"}, {"feedback": "", "score": -1.0, "text": "Crushing"'
'}], "type": 0, "description": "Personality shift strike strength'
'", "version": "1.5"}'),
# Question Groups -----------------------------------------------------
EntityDef(
models.QuestionGroupEntity, 5348024557502464, None,
'{"description": "One MC, one SA", "introduction": "", "version":'
'"1.5", "items": [{"question": 5066549580791808, "weight": "1"}, '
'{"question": 6192449487634432, "weight": "1"}]}'),
EntityDef(
models.QuestionGroupEntity, 5910974510923776, None,
'{"description": "All Questions", "introduction": "All questions"'
', "version": "1.5", "items": [{"question": 4785074604081152, "we'
'ight": "0.25"}, {"question": 5066549580791808, "weight": "0.25"}'
', {"question": 5629499534213120, "weight": "0.25"}, {"question":'
'6192449487634432, "weight": "0.25"}]}'),
# Student Answers -----------------------------------------------------
EntityDef(
models.StudentAnswersEntity, None, '115715231223232197316',
'{"3": {"version": "1.5", "containedTypes": {"DlfLRsko2QHb": "SaQ'
'uestion", "E5P0a0bFB0EH": "McQuestion", "hGrEjnP13pMA": "McQuest'
'ion", "knWukHJApaQh": "SaQuestion"}, "hGrEjnP13pMA": [true, fals'
'e, false, false], "knWukHJApaQh": {"response": "fronk"}, "DlfLRs'
'ko2QHb": {"response": "phleem"}, "answers": {"DlfLRsko2QHb": "ph'
'leem", "E5P0a0bFB0EH": [1], "hGrEjnP13pMA": [0], "knWukHJApaQh":'
'"fronk"}, "E5P0a0bFB0EH": [false, true, false, false], "individu'
'alScores": {"DlfLRsko2QHb": 0, "E5P0a0bFB0EH": 0.7, "hGrEjnP13pM'
'A": 0, "knWukHJApaQh": 0}}, "2": {"version": "1.5", "containedTy'
'pes": {"zsgZ8dUMvJjz": "McQuestion", "FcIh3jyWOTbP": ["SaQuestio'
'n", "McQuestion", "SaQuestion", "McQuestion"], "YlGaKQ2mnOPG": "'
'SaQuestion", "YpoECeTunEpj": ["McQuestion", "McQuestion"]}, "ans'
'wers": {"zsgZ8dUMvJjz": [1], "FcIh3jyWOTbP": ["round", [1], "col'
'd", [3]], "YlGaKQ2mnOPG": "gentle", "YpoECeTunEpj": [[2], [1]]},'
'"FcIh3jyWOTbP": {"FcIh3jyWOTbP.2.5629499534213120": {"response":'
'"cold"}, "FcIh3jyWOTbP.1.5066549580791808": [false, true, false,'
'false], "FcIh3jyWOTbP.3.6192449487634432": [false, false, false,'
'true], "FcIh3jyWOTbP.0.4785074604081152": {"response": "round"}}'
', "YlGaKQ2mnOPG": {"response": "gentle"}, "zsgZ8dUMvJjz": [false'
',true, false, false], "individualScores": {"zsgZ8dUMvJjz": 0, "F'
'cIh3jyWOTbP": [0, 0, 0, 0], "YlGaKQ2mnOPG": 1, "YpoECeTunEpj": ['
'1, 0.7]}, "YpoECeTunEpj": {"YpoECeTunEpj.0.5066549580791808": [f'
'alse, false, true, false], "YpoECeTunEpj.1.6192449487634432": [f'
'alse, true, false, false]}}, "1": {"containedTypes": {"8TvGgbrrb'
'Z49": "SaQuestion"}, "version": "1.5", "answers": {"8TvGgbrrbZ49'
'": "oblong"}, "individualScores": {"8TvGgbrrbZ49": 0.7}, "8TvGgb'
'rrbZ49": {"response": "oblong"}}}'),
EntityDef(
models.StudentAnswersEntity, None, '187186200184131193542',
'{"3": {"version": "1.5", "containedTypes": {"DlfLRsko2QHb": "SaQ'
'uestion", "E5P0a0bFB0EH": "McQuestion", "hGrEjnP13pMA": "McQuest'
'ion", "knWukHJApaQh": "SaQuestion"}, "hGrEjnP13pMA": [false, tru'
'e, false, false], "knWukHJApaQh": {"response": "square"}, "DlfLR'
'sko2QHb": {"response": "caring"}, "answers": {"DlfLRsko2QHb": "c'
'aring", "E5P0a0bFB0EH": [1], "hGrEjnP13pMA": [1], "knWukHJApaQh"'
': "square"}, "E5P0a0bFB0EH": [false, true, false, false], "indiv'
'idualScores": {"DlfLRsko2QHb": 0.8, "E5P0a0bFB0EH": 0.7, "hGrEjn'
'P13pMA": 0, "knWukHJApaQh": 0}}, "2": {"version": "1.5", "contai'
'nedTypes": {"zsgZ8dUMvJjz": "McQuestion", "FcIh3jyWOTbP": ["SaQu'
'estion", "McQuestion", "SaQuestion", "McQuestion"], "YlGaKQ2mnOP'
'G": "SaQuestion", "YpoECeTunEpj": ["McQuestion", "McQuestion"]},'
' "answers": {"zsgZ8dUMvJjz": [3], "FcIh3jyWOTbP": ["spazzle", [3'
'], "gloonk", [3]], "YlGaKQ2mnOPG": "frink", "YpoECeTunEpj": [[0]'
', [0]]}, "FcIh3jyWOTbP": {"FcIh3jyWOTbP.2.5629499534213120": {"r'
'esponse": "gloonk"}, "FcIh3jyWOTbP.1.5066549580791808": [false, '
'false, false, true], "FcIh3jyWOTbP.3.6192449487634432": [false, '
'false, false, true], "FcIh3jyWOTbP.0.4785074604081152": {"respon'
'se": "spazzle"}}, "YlGaKQ2mnOPG": {"response": "frink"}, "zsgZ8d'
'UMvJjz": [false, false, false, true], "individualScores": {"zsgZ'
'8dUMvJjz": 0, "FcIh3jyWOTbP": [0, 0, 0, 0], "YlGaKQ2mnOPG": 0, "'
'YpoECeTunEpj": [0, 0]}, "YpoECeTunEpj": {"YpoECeTunEpj.0.5066549'
'580791808": [true, false, false, false], "YpoECeTunEpj.1.6192449'
'487634432": [true, false, false, false]}}, "1": {"containedTypes'
'": {"8TvGgbrrbZ49": "SaQuestion"}, "version": "1.5", "answers": '
'{"8TvGgbrrbZ49": "spalpeen"}, "individualScores": {"8TvGgbrrbZ49'
'": 0}, "8TvGgbrrbZ49": {"response": "spalpeen"}}}'),
]
EXPECTED_COURSE_UNITS = [
{
'title': 'One Question',
'unit_id': '1',
'now_available': True,
'type': 'A',
},
{
'title': 'Groups And Questions',
'unit_id': '2',
'now_available': True,
'type': 'A',
},
{
'title': 'All Questions',
'unit_id': '3',
'now_available': True,
'type': 'A',
}
]
EXPECTED_QUESTIONS = [
{
'question_id': '4785074604081152',
'description': 'Maximum generosity protrusion shape',
'choices': []
},
{
'question_id': '5066549580791808',
'description': 'Trepanning hammer shape',
'choices': ['Round', 'Square', 'Diamond', 'Pyramid']
},
{
'question_id': '5629499534213120',
'description': 'Post-treatement interaction',
'choices': []
},
{
'question_id': '6192449487634432',
'description': 'Personality shift strike strength',
'choices': ['Light', 'Medium', 'Heavy', 'Crushing']
}
]
EXPECTED_ANSWERS = [
{'unit_id': '1', 'sequence': 0, 'count': 1, 'is_valid': True,
'answer': 'oblong', 'question_id': '4785074604081152'},
{'unit_id': '1', 'sequence': 0, 'count': 1, 'is_valid': False,
'answer': 'spalpeen', 'question_id': '4785074604081152'},
{'unit_id': '2', 'sequence': 0, 'count': 1, 'is_valid': True,
'answer': '1', 'question_id': '5066549580791808'},
{'unit_id': '2', 'sequence': 0, 'count': 1, 'is_valid': True,
'answer': '3', 'question_id': '5066549580791808'},
{'unit_id': '2', 'sequence': 1, 'count': 1, 'is_valid': True,
'answer': 'gentle', 'question_id': '5629499534213120'},
{'unit_id': '2', 'sequence': 1, 'count': 1, 'is_valid': False,
'answer': 'frink', 'question_id': '5629499534213120'},
{'unit_id': '2', 'sequence': 2, 'count': 1, 'is_valid': True,
'answer': '0', 'question_id': '5066549580791808'},
{'unit_id': '2', 'sequence': 2, 'count': 1, 'is_valid': True,
'answer': '2', 'question_id': '5066549580791808'},
{'unit_id': '2', 'sequence': 3, 'count': 1, 'is_valid': True,
'answer': '0', 'question_id': '6192449487634432'},
{'unit_id': '2', 'sequence': 3, 'count': 1, 'is_valid': True,
'answer': '1', 'question_id': '6192449487634432'},
{'unit_id': '2', 'sequence': 4, 'count': 1, 'is_valid': False,
'answer': 'round', 'question_id': '4785074604081152'},
{'unit_id': '2', 'sequence': 4, 'count': 1, 'is_valid': False,
'answer': 'spazzle', 'question_id': '4785074604081152'},
{'unit_id': '2', 'sequence': 5, 'count': 1, 'is_valid': True,
'answer': '1', 'question_id': '5066549580791808'},
{'unit_id': '2', 'sequence': 5, 'count': 1, 'is_valid': True,
'answer': '3', 'question_id': '5066549580791808'},
{'unit_id': '2', 'sequence': 6, 'count': 1, 'is_valid': False,
'answer': 'cold', 'question_id': '5629499534213120'},
{'unit_id': '2', 'sequence': 6, 'count': 1, 'is_valid': False,
'answer': 'gloonk', 'question_id': '5629499534213120'},
{'unit_id': '2', 'sequence': 7, 'count': 2, 'is_valid': True,
'answer': '3', 'question_id': '6192449487634432'},
{'unit_id': '3', 'sequence': 0, 'count': 2, 'is_valid': True,
'answer': '1', 'question_id': '6192449487634432'},
{'unit_id': '3', 'sequence': 1, 'count': 1, 'is_valid': True,
'answer': 'caring', 'question_id': '5629499534213120'},
{'unit_id': '3', 'sequence': 1, 'count': 1, 'is_valid': False,
'answer': 'phleem', 'question_id': '5629499534213120'},
{'unit_id': '3', 'sequence': 2, 'count': 1, 'is_valid': True,
'answer': '0', 'question_id': '5066549580791808'},
{'unit_id': '3', 'sequence': 2, 'count': 1, 'is_valid': True,
'answer': '1', 'question_id': '5066549580791808'},
{'unit_id': '3', 'sequence': 3, 'count': 1, 'is_valid': False,
'answer': 'fronk', 'question_id': '4785074604081152'},
{'unit_id': '3', 'sequence': 3, 'count': 1, 'is_valid': False,
'answer': 'square', 'question_id': '4785074604081152'},
]
class StudentAnswersAnalyticsTest(actions.TestBase):
def setUp(self):
super(StudentAnswersAnalyticsTest, self).setUp()
self.context = actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL,
COURSE_TITLE)
self.course = courses.Course(None, self.context)
for assessment in ASSESSMENTS:
self._add_assessment(self.course, assessment)
self.course.save()
for entity in ENTITIES:
self._add_entity(self.context, entity)
def _add_assessment(self, course, assessment_def):
assessment = course.add_assessment()
assessment.unit_id = assessment_def.unit_id
assessment.title = assessment_def.title
assessment.availability = courses.AVAILABILITY_AVAILABLE
assessment.html_content = assessment_def.html_content
def _add_entity(self, context, entity):
with common_utils.Namespace(context.get_namespace_name()):
if entity.entity_id:
key = db.Key.from_path(entity.entity_class.__name__,
entity.entity_id)
to_store = entity.entity_class(data=entity.data, key=key)
else:
to_store = entity.entity_class(key_name=entity.entity_key_name,
data=entity.data)
to_store.put()
def _get_data_source(self, source_name):
xsrf_token = crypto.XsrfTokenManager.create_xsrf_token(
data_sources_utils.DATA_SOURCE_ACCESS_XSRF_ACTION)
url = ('/test_course/rest/data/%s/items?' % source_name +
'data_source_token=%s&page_number=0' % xsrf_token)
response = self.get(url)
return transforms.loads(response.body)['data']
def _verify_content(self, expected, actual):
for expected_item, actual_item in zip(expected, actual):
self.assertDictContainsSubset(expected_item, actual_item)
def test_end_to_end(self):
actions.login(ADMIN_EMAIL, is_admin=True)
# Start map/reduce analysis job.
response = self.get(
'/test_course/dashboard?action=analytics_questions')
form = response.forms['gcb-run-visualization-question_answers']
self.submit(form, response)
# Wait for map/reduce to run to completion.
self.execute_all_deferred_tasks()
# Verify output.
course_units = self._get_data_source('course_units')
self._verify_content(EXPECTED_COURSE_UNITS, course_units)
course_questions = self._get_data_source('course_questions')
self._verify_content(EXPECTED_QUESTIONS, course_questions)
question_answers = self._get_data_source('question_answers')
self._verify_content(EXPECTED_ANSWERS, question_answers)
| apache-2.0 |
vincentbernat/feedhq | feedhq/feeds/management/commands/add_missing.py | 2 | 1240 | from django.conf import settings
from ...models import Feed, UniqueFeed, enqueue_favicon
from . import SentryCommand
class Command(SentryCommand):
"""Updates the users' feeds"""
def handle_sentry(self, *args, **kwargs):
missing = Feed.objects.raw(
"""
select f.id, f.url
from
feeds_feed f
left join auth_user u on f.user_id = u.id
where
not exists (
select 1 from feeds_uniquefeed u where f.url = u.url
) and
u.is_suspended = false
""")
urls = set([f.url for f in missing])
UniqueFeed.objects.bulk_create([
UniqueFeed(url=url) for url in urls
])
if not settings.TESTS:
missing_favicons = UniqueFeed.objects.raw(
"""
select id, url from feeds_uniquefeed u
where
u.url != '' and
not exists (
select 1 from feeds_favicon f
where f.url = u.url
)
""")
for feed in missing_favicons:
enqueue_favicon(feed.url)
| bsd-3-clause |
rvalyi/OpenUpgrade | addons/hw_posbox_homepage/__init__.py | 1894 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
HyperBaton/ansible | lib/ansible/modules/cloud/amazon/aws_config_aggregation_authorization.py | 9 | 5218 | #!/usr/bin/python
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_config_aggregation_authorization
short_description: Manage cross-account AWS Config authorizations
description:
- Module manages AWS Config resources.
version_added: "2.6"
requirements: [ 'botocore', 'boto3' ]
author:
- "Aaron Smith (@slapula)"
options:
state:
description:
- Whether the Config rule should be present or absent.
default: present
choices: ['present', 'absent']
type: str
authorized_account_id:
description:
- The 12-digit account ID of the account authorized to aggregate data.
type: str
required: true
authorized_aws_region:
description:
- The region authorized to collect aggregated data.
type: str
required: true
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Get current account ID
aws_caller_info:
register: whoami
- aws_config_aggregation_authorization:
state: present
authorized_account_id: '{{ whoami.account }}'
authorzed_aws_region: us-east-1
'''
RETURN = '''#'''
try:
import botocore
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import AWSRetry
def resource_exists(client, module, params):
try:
current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
authorization_exists = next(
(item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
None
)
if authorization_exists:
return True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
return False
def create_resource(client, module, params, result):
try:
response = client.put_aggregation_authorization(
AuthorizedAccountId=params['AuthorizedAccountId'],
AuthorizedAwsRegion=params['AuthorizedAwsRegion']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
def update_resource(client, module, params, result):
current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
current_params = next(
(item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
None
)
del current_params['AggregationAuthorizationArn']
del current_params['CreationTime']
if params != current_params:
try:
response = client.put_aggregation_authorization(
AuthorizedAccountId=params['AuthorizedAccountId'],
AuthorizedAwsRegion=params['AuthorizedAwsRegion']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
def delete_resource(client, module, params, result):
try:
response = client.delete_aggregation_authorization(
AuthorizedAccountId=params['AuthorizedAccountId'],
AuthorizedAwsRegion=params['AuthorizedAwsRegion']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization")
def main():
module = AnsibleAWSModule(
argument_spec={
'state': dict(type='str', choices=['present', 'absent'], default='present'),
'authorized_account_id': dict(type='str', required=True),
'authorized_aws_region': dict(type='str', required=True),
},
supports_check_mode=False,
)
result = {'changed': False}
params = {
'AuthorizedAccountId': module.params.get('authorized_account_id'),
'AuthorizedAwsRegion': module.params.get('authorized_aws_region'),
}
client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
if module.params.get('state') == 'present':
if not resource_status:
create_resource(client, module, params, result)
else:
update_resource(client, module, params, result)
if module.params.get('state') == 'absent':
if resource_status:
delete_resource(client, module, params, result)
module.exit_json(changed=result['changed'])
if __name__ == '__main__':
main()
| gpl-3.0 |
jythontools/pip | pip/commands/completion.py | 435 | 1991 | from __future__ import absolute_import
import sys
from pip.basecommand import Command
BASE_COMPLETION = """
# pip %(shell)s completion start%(script)s# pip %(shell)s completion end
"""
COMPLETION_SCRIPTS = {
'bash': """
_pip_completion()
{
COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
PIP_AUTO_COMPLETE=1 $1 ) )
}
complete -o default -F _pip_completion pip
""", 'zsh': """
function _pip_completion {
local words cword
read -Ac words
read -cn cword
reply=( $( COMP_WORDS="$words[*]" \\
COMP_CWORD=$(( cword-1 )) \\
PIP_AUTO_COMPLETE=1 $words[1] ) )
}
compctl -K _pip_completion pip
"""}
class CompletionCommand(Command):
"""A helper command to be used for command completion."""
name = 'completion'
summary = 'A helper command to be used for command completion'
hidden = True
def __init__(self, *args, **kw):
super(CompletionCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--bash', '-b',
action='store_const',
const='bash',
dest='shell',
help='Emit completion code for bash')
cmd_opts.add_option(
'--zsh', '-z',
action='store_const',
const='zsh',
dest='shell',
help='Emit completion code for zsh')
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
"""Prints the completion code of the given shell"""
shells = COMPLETION_SCRIPTS.keys()
shell_options = ['--' + shell for shell in sorted(shells)]
if options.shell in shells:
script = COMPLETION_SCRIPTS.get(options.shell, '')
print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
else:
sys.stderr.write(
'ERROR: You must pass %s\n' % ' or '.join(shell_options)
)
| mit |
CenterForOpenScience/osf.io | api_tests/tokens/views/test_token_detail.py | 10 | 21922 | import mock
import pytest
from api.scopes.serializers import SCOPES_RELATIONSHIP_VERSION
from osf_tests.factories import (
ApiOAuth2PersonalTokenFactory,
ApiOAuth2ScopeFactory,
AuthUserFactory,
)
from tests.base import assert_dict_contains_subset
from website.util import api_v2_url
@pytest.mark.django_db
def post_payload(
type_payload='tokens',
scopes=None,
name='A shiny updated token'):
if not scopes:
scopes = ApiOAuth2ScopeFactory().name
return {
'data': {
'type': type_payload,
'attributes': {
'name': name,
},
'relationships': {
'scopes': {
'data': [
{
'type': 'scopes',
'id': scopes
}
]
}
}
}
}
@pytest.mark.django_db
class TestTokenDetailScopesAsRelationships:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def token_user_one(self, user_one):
return ApiOAuth2PersonalTokenFactory(owner=user_one)
@pytest.fixture()
def url_token_detail(self, user_one, token_user_one):
path = 'tokens/{}/?version={}'.format(token_user_one._id, SCOPES_RELATIONSHIP_VERSION)
return api_v2_url(path, base_route='/')
@pytest.fixture()
def url_token_list(self):
return api_v2_url('tokens/?version={}'.format(SCOPES_RELATIONSHIP_VERSION), base_route='/')
@pytest.fixture()
def read_scope(self):
return ApiOAuth2ScopeFactory()
def test_token_detail_who_can_view(
self, app, url_token_detail, user_one,
user_two, token_user_one):
# test_owner_can_view
res = app.get(url_token_detail, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['id'] == token_user_one._id
# test_non_owner_cant_view
res = app.get(url_token_detail, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
# test_returns_401_when_not_logged_in
res = app.get(url_token_detail, expect_errors=True)
assert res.status_code == 401
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_owner_can_delete(
self, mock_method, app, user_one, url_token_detail):
mock_method.return_value(True)
res = app.delete(url_token_detail, auth=user_one.auth)
assert res.status_code == 204
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_deleting_tokens_makes_api_view_inaccessible(
self, mock_method, app, url_token_detail, user_one):
mock_method.return_value(True)
app.delete(url_token_detail, auth=user_one.auth)
res = app.get(url_token_detail, auth=user_one.auth, expect_errors=True)
assert res.status_code == 404
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_updating_one_field_should_not_blank_others_on_patch_update(
self, mock_revoke, app, token_user_one, url_token_detail, user_one, read_scope):
mock_revoke.return_value = True
user_one_token = token_user_one
new_name = 'The token formerly known as Prince'
res = app.patch_json_api(
url_token_detail,
{
'data': {
'attributes': {
'name': new_name,
},
'id': token_user_one._id,
'type': 'tokens',
'relationships': {
'scopes': {
'data': [
{
'type': 'scopes',
'id': read_scope.name
}
]
}
}
}
},
auth=user_one.auth)
user_one_token.reload()
assert res.status_code == 200
assert_dict_contains_subset(
{
'name': new_name,
},
res.json['data']['attributes'])
assert res.json['data']['id'] == user_one_token._id
assert res.json['data']['relationships']['owner']['data']['id'] == user_one_token.owner._id
assert len(res.json['data']['embeds']['scopes']['data']) == 1
assert res.json['data']['embeds']['scopes']['data'][0]['id'] == read_scope.name
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_updating_an_instance_does_not_change_the_number_of_instances(
self, mock_revoke, app, url_token_detail, url_token_list, token_user_one, user_one, read_scope):
mock_revoke.return_value = True
new_name = 'The token formerly known as Prince'
res = app.patch_json_api(
url_token_detail,
{'data': {
'attributes': {
'name': new_name,
},
'relationships': {
'scopes': {
'data': [
{
'type': 'scopes',
'id': read_scope.name
}
]
}
},
'id': token_user_one._id,
'type': 'tokens'}},
auth=user_one.auth)
assert res.status_code == 200
res = app.get(url_token_list, auth=user_one.auth)
assert res.status_code == 200
assert (len(res.json['data']) == 1)
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_deleting_token_flags_instance_inactive(
self, mock_method, app, url_token_detail, user_one, token_user_one):
mock_method.return_value(True)
app.delete(url_token_detail, auth=user_one.auth)
token_user_one.reload()
assert not token_user_one.is_active
def test_read_does_not_return_token_id(
self, app, url_token_detail, user_one):
res = app.get(url_token_detail, auth=user_one.auth)
assert res.status_code == 200
assert 'token_id' not in res.json['data']['attributes']
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_update_token_does_not_return_token_id(
self, mock_revoke, app, url_token_detail, user_one):
mock_revoke.return_value = True
correct = post_payload()
res = app.put_json_api(
url_token_detail,
correct,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 200
assert 'token_id' not in res.json['data']['attributes']
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_update_token(self, mock_revoke, app, user_one, url_token_detail):
mock_revoke.return_value = True
correct = post_payload()
res = app.put_json_api(
url_token_detail,
correct,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 200
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_update_token_add_scope(self, mock_revoke, app, user_one, token_user_one, url_token_detail):
mock_revoke.return_value = True
original_scope = token_user_one.scopes.first()
scope = ApiOAuth2ScopeFactory()
correct = post_payload(scopes=scope.name)
res = app.put_json_api(
url_token_detail,
correct,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 200
scopes_data = res.json['data']['embeds']['scopes']['data']
assert len(scopes_data) == 1
assert scopes_data[0]['id'] == scope.name
assert scope.name != original_scope.name
@pytest.mark.enable_implicit_clean
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_token_detail_crud_with_wrong_payload(
self, mock_revoke, app, url_token_list, url_token_detail,
token_user_one, user_one, user_two):
mock_revoke.return_value = True
# test_non_owner_cant_delete
res = app.delete(
url_token_detail,
auth=user_two.auth,
expect_errors=True)
assert res.status_code == 403
# test_create_with_nonexistant_scope_fails
injected_scope = post_payload(
name='A shiny invalid token',
scopes='osf.admin')
res = app.post_json_api(
url_token_list,
injected_scope,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 404
# test_create_with_private_scope_fails
fake_scope = ApiOAuth2ScopeFactory()
fake_scope.is_public = False
fake_scope.save()
nonsense_scope = post_payload(
name='A shiny invalid token',
scopes=fake_scope.name)
res = app.post_json_api(
url_token_list,
nonsense_scope,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test_update_with_nonexistant_scope_fails
injected_scope = post_payload(
name='A shiny invalid token',
scopes='osf.admin')
res = app.put_json_api(
url_token_detail,
injected_scope,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 404
# test_update_with_private_scope_fails
nonsense_scope = post_payload(
name='A shiny invalid token',
scopes=fake_scope.name)
res = app.put_json_api(
url_token_detail,
nonsense_scope,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test_update_token_incorrect_type
incorrect_type = post_payload(type_payload='Wrong type.')
res = app.put_json_api(
url_token_detail,
incorrect_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 409
# test_update_token_no_type
missing_type = post_payload(type_payload='')
res = app.put_json_api(
url_token_detail,
missing_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test_update_token_no_attributes
payload = {
'id': token_user_one._id,
'type': 'tokens',
'name': 'The token formerly known as Prince'
}
res = app.put_json_api(
url_token_detail,
payload,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test_partial_update_token_incorrect_type
incorrect_type = post_payload(type_payload='Wrong type.')
res = app.patch_json_api(
url_token_detail,
incorrect_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 409
# test_partial_update_token_no_type
missing_type = post_payload(type_payload='')
res = app.patch_json_api(
url_token_detail,
missing_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
def post_attributes_payload(
type_payload='tokens',
scopes='osf.full_write',
name='A shiny updated token'):
return {
'data': {
'type': type_payload,
'attributes': {
'name': name,
'scopes': scopes,
}
}
}
@pytest.mark.django_db
class TestTokenDetailScopesAsAttributes:
@pytest.fixture()
def write_scope(self):
return ApiOAuth2ScopeFactory(name='osf.full_write')
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def token_user_one(self, user_one):
return ApiOAuth2PersonalTokenFactory(owner=user_one)
@pytest.fixture()
def url_token_detail(self, user_one, token_user_one):
path = 'tokens/{}/'.format(token_user_one._id)
return api_v2_url(path, base_route='/')
@pytest.fixture()
def url_token_list(self):
return api_v2_url('tokens/', base_route='/')
def test_token_detail_who_can_view(
self, app, url_token_detail, user_one,
user_two, token_user_one):
# test_owner_can_view
res = app.get(url_token_detail, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['id'] == token_user_one._id
assert res.json['data']['attributes']['scopes'] == token_user_one.scopes.first().name
# test_non_owner_cant_view
res = app.get(url_token_detail, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
# test_returns_401_when_not_logged_in
res = app.get(url_token_detail, expect_errors=True)
assert res.status_code == 401
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_owner_can_delete(
self, mock_method, app, user_one, url_token_detail):
mock_method.return_value(True)
res = app.delete(url_token_detail, auth=user_one.auth)
assert res.status_code == 204
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_deleting_tokens_makes_api_view_inaccessible(
self, mock_method, app, url_token_detail, user_one):
mock_method.return_value(True)
app.delete(url_token_detail, auth=user_one.auth)
res = app.get(url_token_detail, auth=user_one.auth, expect_errors=True)
assert res.status_code == 404
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_updating_one_field_should_not_blank_others_on_patch_update(
self, mock_revoke, app, token_user_one, url_token_detail, user_one, write_scope):
mock_revoke.return_value = True
user_one_token = token_user_one
new_name = 'The token formerly known as Prince'
res = app.patch_json_api(
url_token_detail,
{
'data': {
'attributes': {
'name': new_name,
'scopes': 'osf.full_write'
},
'id': token_user_one._id,
'type': 'tokens'
}
},
auth=user_one.auth)
user_one_token.reload()
assert res.status_code == 200
assert_dict_contains_subset(
{
'owner': user_one_token.owner._id,
'name': new_name,
'scopes': '{}'.format(write_scope.name),
},
res.json['data']['attributes'])
assert res.json['data']['id'] == user_one_token._id
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_updating_an_instance_does_not_change_the_number_of_instances(
self, mock_revoke, app, url_token_detail, url_token_list, token_user_one, user_one, write_scope):
mock_revoke.return_value = True
new_name = 'The token formerly known as Prince'
res = app.patch_json_api(
url_token_detail,
{'data': {
'attributes': {
'name': new_name,
'scopes': 'osf.full_write'
},
'id': token_user_one._id,
'type': 'tokens'}},
auth=user_one.auth)
assert res.status_code == 200
res = app.get(url_token_list, auth=user_one.auth)
assert res.status_code == 200
assert (len(res.json['data']) == 1)
assert res.json['data'][0]['attributes']['scopes'] == write_scope.name
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_deleting_token_flags_instance_inactive(
self, mock_method, app, url_token_detail, user_one, token_user_one):
mock_method.return_value(True)
app.delete(url_token_detail, auth=user_one.auth)
token_user_one.reload()
assert not token_user_one.is_active
def test_read_does_not_return_token_id(
self, app, url_token_detail, user_one):
res = app.get(url_token_detail, auth=user_one.auth)
assert res.status_code == 200
assert 'token_id' not in res.json['data']['attributes']
assert 'scopes' in res.json['data']['attributes']
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_update_token_does_not_return_token_id(
self, mock_revoke, app, url_token_detail, user_one, write_scope):
mock_revoke.return_value = True
correct = post_attributes_payload(scopes='osf.full_write')
res = app.put_json_api(
url_token_detail,
correct,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 200
assert 'token_id' not in res.json['data']['attributes']
assert res.json['data']['attributes']['scopes'] == write_scope.name
@mock.patch('framework.auth.cas.CasClient.revoke_tokens')
def test_update_token(self, mock_revoke, app, user_one, url_token_detail, write_scope):
mock_revoke.return_value = True
correct = post_attributes_payload(scopes='osf.full_write')
res = app.put_json_api(
url_token_detail,
correct,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 200
assert res.json['data']['attributes']['scopes'] == write_scope.name
def test_token_detail_crud_with_wrong_payload(
self, app, url_token_list, url_token_detail,
token_user_one, user_one, user_two):
# test_non_owner_cant_delete
res = app.delete(
url_token_detail,
auth=user_two.auth,
expect_errors=True)
assert res.status_code == 403
# test_create_with_admin_scope_fails
admin_token = ApiOAuth2ScopeFactory(name='osf.admin')
admin_token.is_public = False
admin_token.save()
injected_scope = post_attributes_payload(
name='A shiny invalid token',
scopes='osf.admin')
res = app.post_json_api(
url_token_list,
injected_scope,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test_create_with_fake_scope_fails
nonsense_scope = post_attributes_payload(
name='A shiny invalid token',
scopes='osf.nonsense')
res = app.post_json_api(
url_token_list,
nonsense_scope,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 404
# test_update_with_admin_scope_fails
injected_scope = post_attributes_payload(
name='A shiny invalid token',
scopes='osf.admin')
res = app.put_json_api(
url_token_detail,
injected_scope,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test_update_with_fake_scope_fails
nonsense_scope = post_attributes_payload(
name='A shiny invalid token',
scopes='osf.nonsense')
res = app.put_json_api(
url_token_detail,
nonsense_scope,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 404
# test_update_token_incorrect_type
incorrect_type = post_attributes_payload(type_payload='Wrong type.')
res = app.put_json_api(
url_token_detail,
incorrect_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 409
# test_update_token_no_type
missing_type = post_attributes_payload(type_payload='')
res = app.put_json_api(
url_token_detail,
missing_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test_update_token_no_attributes
payload = {
'id': token_user_one._id,
'type': 'tokens',
'name': 'The token formerly known as Prince'
}
res = app.put_json_api(
url_token_detail,
payload,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test_partial_update_token_incorrect_type
incorrect_type = post_attributes_payload(type_payload='Wrong type.')
res = app.patch_json_api(
url_token_detail,
incorrect_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 409
# test_partial_update_token_no_type
missing_type = post_attributes_payload(type_payload='')
res = app.patch_json_api(
url_token_detail,
missing_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test token too long
payload = post_payload(name='A' * 101)
res = app.put_json_api(
url_token_detail,
payload,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
| apache-2.0 |
warp1337/opencv_facerecognizer | src/ocvfacerec/facerec/classifier.py | 1 | 9086 | # Copyright (c) 2015.
# Philipp Wagner <bytefish[at]gmx[dot]de> and
# Florian Lier <flier[at]techfak.uni-bielefeld.de> and
# Norman Koester <nkoester[at]techfak.uni-bielefeld.de>
#
#
# Released to public domain under terms of the BSD Simplified license.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the organization nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# See <http://www.opensource.org/licenses/bsd-license>
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ocvfacerec.facerec.distance import EuclideanDistance
from ocvfacerec.facerec.util import as_row_matrix
import logging
import numpy as np
import operator as op
class AbstractClassifier(object):
def compute(self, X, y):
raise NotImplementedError("Every AbstractClassifier must implement the compute method.")
def predict(self, X):
raise NotImplementedError("Every AbstractClassifier must implement the predict method.")
def update(self, X, y):
raise NotImplementedError("This Classifier is cannot be updated.")
class NearestNeighbor(AbstractClassifier):
"""
Implements a k-Nearest Neighbor Model with a generic distance metric.
"""
def __init__(self, dist_metric=EuclideanDistance(), k=1):
AbstractClassifier.__init__(self)
self.k = k
self.dist_metric = dist_metric
self.X = []
self.y = np.array([], dtype=np.int32)
def update(self, X, y):
"""
Updates the classifier.
"""
self.X.append(X)
self.y = np.append(self.y, y)
def compute(self, X, y):
self.X = X
self.y = np.asarray(y)
def predict(self, q):
"""
Predicts the k-nearest neighbor for a given query in q.
Args:
q: The given query sample, which is an array.
Returns:
A list with the classifier output. In this framework it is
assumed, that the predicted class is always returned as first
element. Moreover, this class returns the distances for the
first k-Nearest Neighbors.
Example:
[ 0,
{ 'labels' : [ 0, 0, 1 ],
'distances' : [ 10.132, 10.341, 13.314 ]
}
]
So if you want to perform a thresholding operation, you could
pick the distances in the second array of the generic classifier
output.
"""
distances = []
for xi in self.X:
xi = xi.reshape(-1, 1)
d = self.dist_metric(xi, q)
distances.append(d)
if len(distances) > len(self.y):
raise Exception("More distances than classes. Is your distance metric correct?")
distances = np.asarray(distances)
# Get the indices in an ascending sort order:
idx = np.argsort(distances)
# Sort the labels and distances accordingly:
sorted_y = self.y[idx]
sorted_distances = distances[idx]
# Take only the k first items:
sorted_y = sorted_y[0:self.k]
sorted_distances = sorted_distances[0:self.k]
# Make a histogram of them:
hist = dict((key, val) for key, val in enumerate(np.bincount(sorted_y)) if val)
# And get the bin with the maximum frequency:
predicted_label = max(hist.iteritems(), key=op.itemgetter(1))[0]
# A classifier should output a list with the label as first item and
# generic data behind. The k-nearest neighbor classifier outputs the
# distance of the k first items. So imagine you have a 1-NN and you
# want to perform a threshold against it, you should take the first
# item
return [predicted_label, {'labels': sorted_y, 'distances': sorted_distances}]
def __repr__(self):
return "NearestNeighbor (k=%s, dist_metric=%s)" % (self.k, repr(self.dist_metric))
# libsvm
try:
from svmutil import *
except ImportError:
logger = logging.getLogger("facerec.classifier.SVM")
logger.debug("Import Error: libsvm bindings not available.")
except:
logger = logging.getLogger("facerec.classifier.SVM")
logger.debug("Import Error: libsvm bindings not available.")
import sys
from StringIO import StringIO
bkp_stdout = sys.stdout
class SVM(AbstractClassifier):
"""
This class is just a simple wrapper to use libsvm in the
CrossValidation module. If you don't use this framework
use the validation methods coming with LibSVM, they are
much easier to access (simply pass the correct class
labels in svm_predict and you are done...).
The grid search method in this class is somewhat similar
to libsvm grid.py, as it performs a parameter search over
a logarithmic scale. Again if you don't use this framework,
use the libsvm tools as they are much easier to access.
Please keep in mind to normalize your input data, as expected
for the model. There's no way to assume a generic normalization
step.
"""
def __init__(self, param=None):
AbstractClassifier.__init__(self)
self.logger = logging.getLogger("facerec.classifier.SVM")
self.param = param
self.svm = svm_model()
self.param = param
if self.param is None:
self.param = svm_parameter("-q")
def compute(self, X, y):
self.logger.debug("SVM TRAINING (C=%.2f,gamma=%.2f,p=%.2f,nu=%.2f,coef=%.2f,degree=%.2f)" % (
self.param.C, self.param.gamma, self.param.p, self.param.nu, self.param.coef0, self.param.degree))
# turn data into a row vector (needed for libsvm)
X = as_row_matrix(X)
y = np.asarray(y)
problem = svm_problem(y, X.tolist())
self.svm = svm_train(problem, self.param)
self.y = y
def predict(self, X):
"""
Args:
X: The query image, which is an array.
Returns:
A list with the classifier output. In this framework it is
assumed, that the predicted class is always returned as first
element. Moreover, this class returns the libsvm output for
p_labels, p_acc and p_vals. The libsvm help states:
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
X = np.asarray(X).reshape(1, -1)
sys.stdout = StringIO()
p_lbl, p_acc, p_val = svm_predict([0], X.tolist(), self.svm)
sys.stdout = bkp_stdout
predicted_label = int(p_lbl[0])
return [predicted_label, {'p_lbl': p_lbl, 'p_acc': p_acc, 'p_val': p_val}]
def __repr__(self):
return "Support Vector Machine (kernel_type=%s, C=%.2f,gamma=%.2f,p=%.2f,nu=%.2f,coef=%.2f,degree=%.2f)" % (
KERNEL_TYPE[self.param.kernel_type], self.param.C, self.param.gamma, self.param.p, self.param.nu,
self.param.coef0, self.param.degree)
| bsd-3-clause |
debbiedub/bcdef | features/steps/application.py | 1 | 1080 | import logging
from multiprocessing import Process
from bc import BCMain
from fcp.CommunicationQueues import comm
def run_create_first_block(queues, *args):
global comm
comm.set(queues=queues)
try:
logging.getLogger().setLevel(logging.DEBUG)
# logging.getLogger().addHandler(comm.get_handler())
logging.info("Started logging")
bc = BCMain(*args)
bc.participants.round_timeout = 1
bc.create_first_block()
finally:
comm.empty_queues()
@when(u'the application is started to create the first block')
def step_impl(context):
global comm
context.bc_process = Process(target=run_create_first_block,
args=(comm, "Me",))
context.bc_process.start()
context.node_simulator.expect("hello")
context.node_simulator.respond(("olleh",))
context.node_simulator.expect_wot("Ping")
context.node_simulator.respond_wot({"Message":"Pong"})
context.node_simulator.expect_wot("GetOwnIdentities")
context.node_simulator.respond_wot({"Replies.Amount": "0"})
| gpl-3.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/unittest/test/test_loader.py | 40 | 49870 | import sys
import types
import unittest
class Test_TestLoader(unittest.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEqual(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegexp(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('unittest.sdasfasfasdf')
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('', unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignoring the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''], unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.testMethodPrefix == 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.sortTestMethodsUsing is cmp)
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertIs(loader.suiteClass, unittest.TestSuite)
# Make sure the dotted name resolution works even if the actual
# function doesn't have the same name as is used to find it.
def test_loadTestsFromName__function_with_different_name_than_method(self):
# lambdas have the name '<lambda>'.
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
test = lambda: 1
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
if __name__ == '__main__':
unittest.main()
| mit |
microcom/odoo | addons/project_issue/report/project_issue_report.py | 38 | 3441 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp import tools
class project_issue_report(osv.osv):
_name = "project.issue.report"
_auto = False
_columns = {
'team_id':fields.many2one('crm.team', 'Sale Team', oldname='section_id', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'opening_date': fields.datetime('Date of Opening', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True),
'date_closed': fields.datetime('Date of Closing', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'stage_id': fields.many2one('project.task.type', 'Stage'),
'nbr': fields.integer('# of Issues', readonly=True), # TDE FIXME master: rename into nbr_issues
'working_hours_open': fields.float('Avg. Working Hours to Open', readonly=True, group_operator="avg"),
'working_hours_close': fields.float('Avg. Working Hours to Close', readonly=True, group_operator="avg"),
'delay_open': fields.float('Avg. Delay to Open', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to open the project issue."),
'delay_close': fields.float('Avg. Delay to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the project issue"),
'company_id' : fields.many2one('res.company', 'Company'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'project_id':fields.many2one('project.project', 'Project',readonly=True),
'user_id' : fields.many2one('res.users', 'Assigned to',readonly=True),
'partner_id': fields.many2one('res.partner','Contact'),
'channel': fields.char('Channel', readonly=True, help="Communication Channel."),
'task_id': fields.many2one('project.task', 'Task'),
'email': fields.integer('# Emails', size=128, readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'project_issue_report')
cr.execute("""
CREATE OR REPLACE VIEW project_issue_report AS (
SELECT
c.id as id,
c.date_open as opening_date,
c.create_date as create_date,
c.date_last_stage_update as date_last_stage_update,
c.user_id,
c.working_hours_open,
c.working_hours_close,
c.team_id,
c.stage_id,
date(c.date_closed) as date_closed,
c.company_id as company_id,
c.priority as priority,
c.project_id as project_id,
1 as nbr,
c.partner_id,
c.channel,
c.task_id,
c.day_open as delay_open,
c.day_close as delay_close,
(SELECT count(id) FROM mail_message WHERE model='project.issue' AND res_id=c.id) AS email
FROM
project_issue c
LEFT JOIN project_task t on c.task_id = t.id
WHERE c.active= 'true'
)""")
| agpl-3.0 |
elena/django | tests/utils_tests/test_functional.py | 7 | 8133 | from unittest import mock
from django.test import SimpleTestCase
from django.utils.functional import cached_property, classproperty, lazy
class FunctionalTests(SimpleTestCase):
def test_lazy(self):
t = lazy(lambda: tuple(range(3)), list, tuple)
for a, b in zip(t(), range(3)):
self.assertEqual(a, b)
def test_lazy_base_class(self):
"""lazy also finds base class methods in the proxy object"""
class Base:
def base_method(self):
pass
class Klazz(Base):
pass
t = lazy(lambda: Klazz(), Klazz)()
self.assertIn('base_method', dir(t))
def test_lazy_base_class_override(self):
"""lazy finds the correct (overridden) method implementation"""
class Base:
def method(self):
return 'Base'
class Klazz(Base):
def method(self):
return 'Klazz'
t = lazy(lambda: Klazz(), Base)()
self.assertEqual(t.method(), 'Klazz')
def test_lazy_object_to_string(self):
class Klazz:
def __str__(self):
return "Î am ā Ǩlâzz."
def __bytes__(self):
return b"\xc3\x8e am \xc4\x81 binary \xc7\xa8l\xc3\xa2zz."
t = lazy(lambda: Klazz(), Klazz)()
self.assertEqual(str(t), "Î am ā Ǩlâzz.")
self.assertEqual(bytes(t), b"\xc3\x8e am \xc4\x81 binary \xc7\xa8l\xc3\xa2zz.")
def assertCachedPropertyWorks(self, attr, Class):
with self.subTest(attr=attr):
def get(source):
return getattr(source, attr)
obj = Class()
class SubClass(Class):
pass
subobj = SubClass()
# Docstring is preserved.
self.assertEqual(get(Class).__doc__, 'Here is the docstring...')
self.assertEqual(get(SubClass).__doc__, 'Here is the docstring...')
# It's cached.
self.assertEqual(get(obj), get(obj))
self.assertEqual(get(subobj), get(subobj))
# The correct value is returned.
self.assertEqual(get(obj)[0], 1)
self.assertEqual(get(subobj)[0], 1)
# State isn't shared between instances.
obj2 = Class()
subobj2 = SubClass()
self.assertNotEqual(get(obj), get(obj2))
self.assertNotEqual(get(subobj), get(subobj2))
# It behaves like a property when there's no instance.
self.assertIsInstance(get(Class), cached_property)
self.assertIsInstance(get(SubClass), cached_property)
# 'other_value' doesn't become a property.
self.assertTrue(callable(obj.other_value))
self.assertTrue(callable(subobj.other_value))
def test_cached_property(self):
"""cached_property caches its value and behaves like a property."""
class Class:
@cached_property
def value(self):
"""Here is the docstring..."""
return 1, object()
@cached_property
def __foo__(self):
"""Here is the docstring..."""
return 1, object()
def other_value(self):
"""Here is the docstring..."""
return 1, object()
other = cached_property(other_value, name='other')
attrs = ['value', 'other', '__foo__']
for attr in attrs:
self.assertCachedPropertyWorks(attr, Class)
def test_cached_property_auto_name(self):
"""
cached_property caches its value and behaves like a property
on mangled methods or when the name kwarg isn't set.
"""
class Class:
@cached_property
def __value(self):
"""Here is the docstring..."""
return 1, object()
def other_value(self):
"""Here is the docstring..."""
return 1, object()
other = cached_property(other_value)
other2 = cached_property(other_value, name='different_name')
attrs = ['_Class__value', 'other']
for attr in attrs:
self.assertCachedPropertyWorks(attr, Class)
# An explicit name is ignored.
obj = Class()
obj.other2
self.assertFalse(hasattr(obj, 'different_name'))
def test_cached_property_reuse_different_names(self):
"""Disallow this case because the decorated function wouldn't be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError(
"Cannot assign the same cached_property to two different "
"names ('a' and 'b')."
))
)
def test_cached_property_reuse_same_name(self):
"""
Reusing a cached_property on different classes under the same name is
allowed.
"""
counter = 0
@cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_cached_property_set_name_not_called(self):
cp = cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
msg = 'Cannot use cached_property instance without calling __set_name__() on it.'
with self.assertRaisesMessage(TypeError, msg):
Foo().cp
def test_lazy_add(self):
lazy_4 = lazy(lambda: 4, int)
lazy_5 = lazy(lambda: 5, int)
self.assertEqual(lazy_4() + lazy_5(), 9)
def test_lazy_equality(self):
"""
== and != work correctly for Promises.
"""
lazy_a = lazy(lambda: 4, int)
lazy_b = lazy(lambda: 4, int)
lazy_c = lazy(lambda: 5, int)
self.assertEqual(lazy_a(), lazy_b())
self.assertNotEqual(lazy_b(), lazy_c())
def test_lazy_repr_text(self):
original_object = 'Lazy translation text'
lazy_obj = lazy(lambda: original_object, str)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_repr_int(self):
original_object = 15
lazy_obj = lazy(lambda: original_object, int)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_repr_bytes(self):
original_object = b'J\xc3\xbcst a str\xc3\xadng'
lazy_obj = lazy(lambda: original_object, bytes)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_class_preparation_caching(self):
# lazy() should prepare the proxy class only once i.e. the first time
# it's used.
lazified = lazy(lambda: 0, int)
__proxy__ = lazified().__class__
with mock.patch.object(__proxy__, '__prepare_class__') as mocked:
lazified()
mocked.assert_not_called()
def test_classproperty_getter(self):
class Foo:
foo_attr = 123
def __init__(self):
self.foo_attr = 456
@classproperty
def foo(cls):
return cls.foo_attr
class Bar:
bar = classproperty()
@bar.getter
def bar(cls):
return 123
self.assertEqual(Foo.foo, 123)
self.assertEqual(Foo().foo, 123)
self.assertEqual(Bar.bar, 123)
self.assertEqual(Bar().bar, 123)
def test_classproperty_override_getter(self):
class Foo:
@classproperty
def foo(cls):
return 123
@foo.getter
def foo(cls):
return 456
self.assertEqual(Foo.foo, 456)
self.assertEqual(Foo().foo, 456)
| bsd-3-clause |
cytsao/X-Informatics-1.3.0 | common/safe_dom.py | 13 | 4961 | """Classes to build sanitized HTML."""
__author__ = 'John Orr (jorr@google.com)'
import cgi
import re
def escape(strg):
return cgi.escape(strg, quote=1).replace("'", ''').replace('`', '`')
class Node(object):
"""Base class for the sanitizing module."""
@property
def sanitized(self):
raise NotImplementedError()
def __str__(self):
return self.sanitized
class NodeList(object):
"""Holds a list of Nodes and can bulk sanitize them."""
def __init__(self):
self.list = []
def __len__(self):
return len(self.list)
def append(self, node):
assert node is not None, 'Cannot add an empty value to the node list'
self.list.append(node)
return self
@property
def sanitized(self):
sanitized_list = []
for node in self.list:
sanitized_list.append(node.sanitized)
return ''.join(sanitized_list)
def __str__(self):
return self.sanitized
class Text(Node):
"""Holds untrusted text which will be sanitized when accessed."""
def __init__(self, unsafe_string):
self._value = unsafe_string
@property
def sanitized(self):
return escape(self._value)
class Element(Node):
"""Embodies an HTML element which will be sanitized when accessed."""
_ALLOWED_NAME_PATTERN = re.compile('^[a-zA-Z][a-zA-Z0-9]*$')
_VOID_ELEMENTS = frozenset([
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen',
'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr'])
def __init__(self, tag_name, **attr):
"""Initializes an element with given tag name and attributes.
Tag name will be restricted to alpha chars, attribute names
will be quote-escaped.
Args:
tag_name: the name of the element, which must match
_ALLOWED_NAME_PATTERN.
**attr: the names and value of the attributes. Names must match
_ALLOWED_NAME_PATTERN and values will be quote-escaped.
"""
assert Element._ALLOWED_NAME_PATTERN.match(tag_name), (
'tag name %s is not allowed' % tag_name)
for attr_name in attr:
assert Element._ALLOWED_NAME_PATTERN.match(attr_name), (
'attribute name %s is not allowed' % attr_name)
self._tag_name = tag_name
self._attr = attr
self._children = []
def add_attribute(self, **attr):
for attr_name, value in attr.items():
assert Element._ALLOWED_NAME_PATTERN.match(attr_name), (
'attribute name %s is not allowed' % attr_name)
self._attr[attr_name] = value
return self
def add_child(self, node):
self._children.append(node)
return self
def add_children(self, node_list):
self._children += node_list.list
return self
def add_text(self, text):
return self.add_child(Text(text))
@property
def sanitized(self):
"""Santize the element and its descendants."""
assert Element._ALLOWED_NAME_PATTERN.match(self._tag_name), (
'tag name %s is not allowed' % self._tag_name)
buff = '<' + self._tag_name
for attr_name, value in sorted(self._attr.items()):
if attr_name == 'className':
attr_name = 'class'
if value is None:
value = ''
buff += ' %s="%s"' % (
attr_name, escape(value))
if self._children:
buff += '>'
for child in self._children:
buff += child.sanitized
buff += '</%s>' % self._tag_name
elif self._tag_name.lower() in Element._VOID_ELEMENTS:
buff += '/>'
else:
buff += '></%s>' % self._tag_name
return buff
class ScriptElement(Element):
"""Represents an HTML <script> element."""
def __init__(self, **attr):
super(ScriptElement, self).__init__('script', **attr)
def add_child(self, unused_node):
raise ValueError()
def add_children(self, unused_nodes):
raise ValueError()
def add_text(self, text):
"""Add the script body."""
class Script(Node):
def __init__(self, script):
self._script = script
@property
def sanitized(self):
if '</script>' in self._script:
raise ValueError('End script tag forbidden')
return self._script
self._children.append(Script(text))
class Entity(Node):
"""Holds an XML entity."""
ENTITY_PATTERN = re.compile('^&([a-zA-Z]+|#[0-9]+|#x[0-9a-fA-F]+);$')
def __init__(self, entity):
assert Entity.ENTITY_PATTERN.match(entity)
self._entity = entity
@property
def sanitized(self):
assert Entity.ENTITY_PATTERN.match(self._entity)
return self._entity
| apache-2.0 |
thomasalrin/Ghost | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/styles/__init__.py | 364 | 2117 | # -*- coding: utf-8 -*-
"""
pygments.styles
~~~~~~~~~~~~~~~
Contains built-in styles.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.plugin import find_plugin_styles
from pygments.util import ClassNotFound
#: Maps style names to 'submodule::classname'.
STYLE_MAP = {
'default': 'default::DefaultStyle',
'emacs': 'emacs::EmacsStyle',
'friendly': 'friendly::FriendlyStyle',
'colorful': 'colorful::ColorfulStyle',
'autumn': 'autumn::AutumnStyle',
'murphy': 'murphy::MurphyStyle',
'manni': 'manni::ManniStyle',
'monokai': 'monokai::MonokaiStyle',
'perldoc': 'perldoc::PerldocStyle',
'pastie': 'pastie::PastieStyle',
'borland': 'borland::BorlandStyle',
'trac': 'trac::TracStyle',
'native': 'native::NativeStyle',
'fruity': 'fruity::FruityStyle',
'bw': 'bw::BlackWhiteStyle',
'vim': 'vim::VimStyle',
'vs': 'vs::VisualStudioStyle',
'tango': 'tango::TangoStyle',
'rrt': 'rrt::RrtStyle',
}
def get_style_by_name(name):
if name in STYLE_MAP:
mod, cls = STYLE_MAP[name].split('::')
builtin = "yes"
else:
for found_name, style in find_plugin_styles():
if name == found_name:
return style
# perhaps it got dropped into our styles package
builtin = ""
mod = name
cls = name.title() + "Style"
try:
mod = __import__('pygments.styles.' + mod, None, None, [cls])
except ImportError:
raise ClassNotFound("Could not find style module %r" % mod +
(builtin and ", though it should be builtin") + ".")
try:
return getattr(mod, cls)
except AttributeError:
raise ClassNotFound("Could not find style class %r in style module." % cls)
def get_all_styles():
"""Return an generator for all styles by name,
both builtin and plugin."""
for name in STYLE_MAP:
yield name
for name, _ in find_plugin_styles():
yield name
| mit |
YannThorimbert/ThorPy-1.4.1 | thorpy/painting/painters/optionnal/human.py | 5 | 3233 | from pygame import BLEND_RGBA_MIN
from pygame import Surface
from thorpy._utils.colorscomputing import grow_color, normalize_color
from thorpy.painting.painters.roundrect import RoundRect
from thorpy.painting.painters.classicframe import ClassicFrame
from thorpy.painting.graphics import linear_v_monogradation
from thorpy.miscgui import style
class Human(ClassicFrame):
def __init__(self, size=None, color=None, clip="auto", radius_ext=None,
radius_int=None, pressed=False, dark=None, light=None, thick=1,
hovered=False, border_color=None): #changer border
"""If radius is in the range [0, 1], self.radius_value is the fraction
of radius * min(size), else it is interpreted as a raw pixel value.
"""
if clip == "auto":
inflation = -2 * thick
clip = (inflation, inflation)
ClassicFrame.__init__(self,
size=size,
color=color,
pressed=pressed,
hovered=hovered,
dark=dark,
light=light,
thick=thick,
clip=clip)
radius_value = style.DEF_RADIUS if radius_ext is None else radius_ext
if 0. <= radius_value <= 1.:
radius_value = min(self.size) * radius_value
self.radius_ext = radius_value
self.radius_int = radius_int
if radius_int == None:
self.radius_int = self.radius_ext - self.thick
self.border_color = style.BORDER_FACT if border_color is None else border_color
if isinstance(self.border_color, float):
self.border_color = normalize_color(grow_color(self.border_color,
self.color))
def draw(self):
if self.hovered:
exterior = RoundRect(self.size, style.COLOR_BULK_HOVER, self.clip,
self.radius_ext)
else:
exterior = RoundRect(self.size, self.border_color, self.clip,
self.radius_ext)
w, h = (self.size[0] - 2*self.thick, self.size[1] - 2*self.thick)
w = 0 if w < 0 else w
h = 0 if h < 0 else h
int_size = (w, h)
if self.pressed:
interior = RoundRect(int_size, self.color, self.clip, self.radius_int)
else:
interior = RoundRect(int_size, self.light, self.clip, self.radius_int)
sext = exterior.draw()
sint = interior.draw()
degrad = Surface(int_size)
if self.pressed:
linear_v_monogradation(degrad, 0, int(h), self.color, self.dark)
sint.blit(degrad, (0, 0), special_flags=BLEND_RGBA_MIN)
else:
linear_v_monogradation(degrad, 0, int(h), self.dark, self.color)
sint.blit(degrad, (0, 0), special_flags=BLEND_RGBA_MIN)
sext.blit(sint, (self.thick, self.thick))
return sext
def set_color(self, color):
ClassicFrame.set_color(self, color)
if len(color) == 4:
self.border_color = tuple(list(self.border_color) + [color[3]]) | mit |
bancek/egradebook | src/lib/django/utils/unittest/result.py | 570 | 6105 | """Test result object"""
import sys
import traceback
import unittest
from StringIO import StringIO
from django.utils.unittest import util
from django.utils.unittest.compatibility import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(unittest.TestResult):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_moduleSetUpFailed = False
def __init__(self):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
self._mirrorOutput = False
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return (len(self.failures) + len(self.errors) == 0)
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures))
| gpl-3.0 |
ttsda/beets | beets/dbcore/__init__.py | 25 | 1079 | # This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""DBCore is an abstract database package that forms the basis for beets'
Library.
"""
from __future__ import absolute_import
from .db import Model, Database
from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery
from .types import Type
from .queryparse import query_from_strings
from .queryparse import sort_from_strings
from .queryparse import parse_sorted_query
from .query import InvalidQueryError
# flake8: noqa
| mit |
40223220/w16b_test | static/Brython3.1.1-20150328-091302/Lib/shutil.py | 720 | 39101 | """Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
import tarfile
try:
import bz2
del bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive",
"ignore_patterns", "chown", "which"]
# disk_usage is added later, if available on the platform
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registeries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst, *, follow_symlinks=True):
"""Copy data from src to dst.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
return dst
def copymode(src, dst, *, follow_symlinks=True):
"""Copy mode bits from src to dst.
If follow_symlinks is not set, symlinks aren't followed if and only
if both `src` and `dst` are symlinks. If `lchmod` isn't available
(e.g. Linux) this method does nothing.
"""
if not follow_symlinks and os.path.islink(src) and os.path.islink(dst):
if hasattr(os, 'lchmod'):
stat_func, chmod_func = os.lstat, os.lchmod
else:
return
elif hasattr(os, 'chmod'):
stat_func, chmod_func = os.stat, os.chmod
else:
return
st = stat_func(src)
chmod_func(dst, stat.S_IMODE(st.st_mode))
if hasattr(os, 'listxattr'):
def _copyxattr(src, dst, *, follow_symlinks=True):
"""Copy extended filesystem attributes from `src` to `dst`.
Overwrite existing attributes.
If `follow_symlinks` is false, symlinks won't be followed.
"""
try:
names = os.listxattr(src, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.ENOTSUP, errno.ENODATA):
raise
return
for name in names:
try:
value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
raise
else:
def _copyxattr(*args, **kwargs):
pass
def copystat(src, dst, *, follow_symlinks=True):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst.
If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and
only if both `src` and `dst` are symlinks.
"""
def _nop(*args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
return getattr(os, name, _nop)
else:
# use the real function only if it exists
# *and* it supports follow_symlinks
def lookup(name):
fn = getattr(os, name, _nop)
if fn in os.supports_follow_symlinks:
return fn
return _nop
st = lookup("stat")(src, follow_symlinks=follow)
mode = stat.S_IMODE(st.st_mode)
lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
follow_symlinks=follow)
try:
lookup("chmod")(dst, mode, follow_symlinks=follow)
except NotImplementedError:
# if we got a NotImplementedError, it's because
# * follow_symlinks=False,
# * lchown() is unavailable, and
# * either
# * fchownat() is unavailable or
# * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# (it returned ENOSUP.)
# therefore we're out of options--we simply cannot chown the
# symlink. give up, suppress the error.
# (which is what shutil always did in this circumstance.)
pass
if hasattr(st, 'st_flags'):
try:
lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
except OSError as why:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and why.errno == getattr(errno, err):
break
else:
raise
_copyxattr(src, dst, follow_symlinks=follow)
def copy(src, dst, *, follow_symlinks=True):
"""Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copymode(src, dst, follow_symlinks=follow_symlinks)
return dst
def copy2(src, dst, *, follow_symlinks=True):
"""Copy data and all stat info ("cp -p src dst"). Return the file's
destination."
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
os.unlink(fullname)
except os.error:
onerror(os.unlink, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = os.listdir(topfd)
except OSError as err:
err.filename = path
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
orig_st = os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
try:
dirfd = os.open(name, os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(os.open, fullname, sys.exc_info())
else:
try:
if os.path.samestat(orig_st, os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
os.rmdir(name, dir_fd=topfd)
except OSError:
onerror(os.rmdir, fullname, sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(os.path.islink, fullname, sys.exc_info())
finally:
os.close(dirfd)
else:
try:
os.unlink(name, dir_fd=topfd)
except OSError:
onerror(os.unlink, fullname, sys.exc_info())
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.listdir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
finally:
os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
# Allow introspection of whether or not the hardening against symlink
# attacks is supported on the current platform
rmtree.avoids_symlink_attacks = _use_fd_functions
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command. Return the file or directory's
destination.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed. Symlinks are
recreated under the new name if os.rename() fails because of cross
filesystem renames.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, real_dst)
os.unlink(src)
elif os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
return real_dst
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not callable(function):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not callable(function):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
if hasattr(os, 'statvfs'):
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned value is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt':
import nt
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned values is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
total, free = nt._getdiskusage(path)
used = total - free
return _ntuple_diskusage(total, used, free)
def chown(path, user=None, group=None):
"""Change owner user and group of the given path.
user and group can be the uid/gid or the user/group names, and in that case,
they are converted to their respective uid/gid.
"""
if user is None and group is None:
raise ValueError("user and/or group must be set")
_user = user
_group = group
# -1 means don't change it
if user is None:
_user = -1
# user can either be an int (the uid) or a string (the system username)
elif isinstance(user, str):
_user = _get_uid(user)
if _user is None:
raise LookupError("no such user: {!r}".format(user))
if group is None:
_group = -1
elif not isinstance(group, int):
_group = _get_gid(group)
if _group is None:
raise LookupError("no such group: {!r}".format(group))
os.chown(path, _user, _group)
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# columns, lines are the working values
try:
columns = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ['LINES'])
except (KeyError, ValueError):
lines = 0
# only query if necessary
if columns <= 0 or lines <= 0:
try:
size = os.get_terminal_size(sys.__stdout__.fileno())
except (NameError, OSError):
size = os.terminal_size(fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return os.terminal_size((columns, lines))
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| agpl-3.0 |
terbolous/CouchPotatoServer | libs/pyutil/scripts/try_decoding.py | 106 | 3163 | #!/usr/bin/env python
import binascii, codecs, encodings, locale, os, sys, zlib
import argparse
def listcodecs(dir):
names = []
for filename in os.listdir(dir):
if filename[-3:] != '.py':
continue
name = filename[:-3]
# Check whether we've found a true codec
try:
codecs.lookup(name)
except LookupError:
# Codec not found
continue
except Exception:
# Probably an error from importing the codec; still it's
# a valid code name
pass
names.append(name)
return names
def listem():
return listcodecs(encodings.__path__[0])
def _canonical_encoding(encoding):
if encoding is None:
encoding = 'utf-8'
encoding = encoding.lower()
if encoding == "cp65001":
encoding = 'utf-8'
elif encoding == "us-ascii" or encoding == "646":
encoding = 'ascii'
# sometimes Python returns an encoding name that it doesn't support for conversion
# fail early if this happens
try:
u"test".encode(encoding)
except (LookupError, AttributeError):
raise AssertionError("The character encoding '%s' is not supported for conversion." % (encoding,))
return encoding
def get_output_encoding():
return _canonical_encoding(sys.stdout.encoding or locale.getpreferredencoding())
def get_argv_encoding():
if sys.platform == 'win32':
# Unicode arguments are not supported on Windows yet; see Tahoe-LAFS tickets #565 and #1074.
return 'ascii'
else:
return get_output_encoding()
output_encoding = get_output_encoding()
argv_encoding = get_argv_encoding()
def type_unicode(argstr):
return argstr.decode(argv_encoding)
def main():
parser = argparse.ArgumentParser(prog="try_decoding", description="Try decoding some bytes with all sorts of different codecs and print out any that decode.")
parser.add_argument('inputfile', help='file to decode or "-" for stdin', type=argparse.FileType('rb'), metavar='INF')
parser.add_argument('-t', '--target', help='unicode string to match against (if any)', type=type_unicode, metavar='T')
parser.add_argument('-a', '--accept-bytes', help='include codecs which return bytes instead of returning unicode (they will be marked with "!!!" in the output)', action='store_true')
args = parser.parse_args()
inb = args.inputfile.read()
for codec in listem():
try:
u = inb.decode(codec)
except (UnicodeDecodeError, IOError, TypeError, IndexError, UnicodeError, ValueError, zlib.error, binascii.Error):
pass
else:
if isinstance(u, unicode):
if args.target:
if args.target != u:
continue
print "%19s" % codec,
print ':',
print u.encode(output_encoding)
else:
if not args.accept_bytes:
continue
print "%19s" % codec,
print "!!! ",
print ':',
print u
if __name__ == "__main__":
main()
| gpl-3.0 |
google/capirca | capirca/lib/windows.py | 1 | 12745 | # Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generic Windows security policy generator; requires subclassing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import string
from absl import logging
from capirca.lib import aclgenerator
from capirca.lib import nacaddr
CMD_PREFIX = 'netsh ipsec static add '
class Term(aclgenerator.Term):
"""Generate generic windows policy terms."""
_PLATFORM = 'windows'
_COMMENT_FORMAT = string.Template(': $comment')
# filter rules
_ACTION_TABLE = {}
def __init__(self, term, filter_name, filter_action, af='inet'):
"""Setup a new term.
Args:
term: A policy.Term object to represent in windows_ipsec.
filter_name: The name of the filter chan to attach the term to.
filter_action: The default action of the filter.
af: Which address family ('inet' or 'inet6') to apply the term to.
Raises:
UnsupportedFilterError: Filter is not supported.
"""
super(Term, self).__init__(term)
self.term = term # term object
self.filter = filter_name # actual name of filter
self.default_action = filter_action
self.options = []
self.af = af
if af == 'inet6':
self._all_ips = nacaddr.IPv6('::/0')
else:
self._all_ips = nacaddr.IPv4('0.0.0.0/0')
self.term_name = '%s_%s' % (self.filter[:1], self.term.name)
def __str__(self):
# Verify platform specific terms. Skip whole term if platform does not
# match.
if self.term.platform:
if self._PLATFORM not in self.term.platform:
return ''
if self.term.platform_exclude:
if self._PLATFORM in self.term.platform_exclude:
return ''
ret_str = []
# Don't render icmpv6 protocol terms under inet, or icmp under inet6
if ((self.af == 'inet6' and 'icmp' in self.term.protocol) or
(self.af == 'inet' and 'icmpv6' in self.term.protocol)):
logging.debug(self.NO_AF_LOG_PROTO.substitute(term=self.term.name,
proto=self.term.protocol,
af=self.af))
return ''
# append comments to output
ret_str.append(self._COMMENT_FORMAT.substitute(filter=self.filter,
term=self.term_name,
comment=self.term.comment))
# if terms does not specify action, use filter default action
if not self.term.action:
self.term.action[0].value = self.default_action
if self.term.action[0] == 'next':
return ''
if len(self.term.action) > 1:
raise aclgenerator.UnsupportedFilterError('\n%s %s %s %s' % (
'Multiple actions unsupported by', self._PLATFORM,
'\nError in term:', self.term.name))
# protocol
if self.term.protocol:
protocols = self.term.protocol
else:
protocols = ['any']
# addresses
src_addr = self.term.source_address
if not src_addr:
src_addr = [self._all_ips]
dst_addr = self.term.destination_address
if not dst_addr:
dst_addr = [self._all_ips]
if (self.term.source_address_exclude or
self.term.destination_address_exclude):
raise aclgenerator.UnsupportedFilterError('\n%s %s %s %s' % (
'address exclusions unsupported by', self._PLATFORM,
'\nError in term:', self.term.name))
# ports = Map the ports in a straight list since multiports aren't supported
(src_ports, dst_ports) = self._HandlePorts(self.term.source_port,
self.term.destination_port)
# The windows ipsec driver requires either 'tcp' or 'udp' to be specified
# if a srcport or dstport is specified. Fail if src or dst ports are
# specified and of the protocols are not exactly one or both of 'tcp'
# or 'udp'.
if ((not set(protocols).issubset(set(['tcp', 'udp']))) and
(len(src_ports) > 1 or len(dst_ports) > 1)):
raise aclgenerator.UnsupportedFilterError('%s %s %s' % (
'\n', self.term.name,
'src or dst ports may only be specified with "tcp" and/or "udp".'))
# icmp-types
(icmp_types, protocols) = self._HandleIcmpTypes(self.term.icmp_type,
protocols)
ret_str = []
self._HandlePreRule(ret_str)
self._CartesianProduct(src_addr, dst_addr, protocols, icmp_types, src_ports,
dst_ports, ret_str)
self._HandlePreRule(ret_str)
return '\n'.join(str(v) for v in ret_str if v)
def _HandleIcmpTypes(self, icmp_types, protocols):
"""Perform implementation-specific icmp_type and protocol transforms.
Note that icmp_types or protocols are passed as parameters in case they
are to be munged prior to this function call, and may not be identical
to self.term.* parameters.
Args:
icmp_types: a list of icmp types, e.g., self.term.icmp_types
protocols: a list of protocols, e.g., self.term.protocols
Returns:
A pair of lists of (icmp_types, protocols)
"""
return None, None
def _HandlePorts(self, src_ports, dst_ports):
"""Perform implementation-specific port transforms.
Note that icmp_types or protocols are passed as parameters in case they
are to be munged prior to this function call, and may not be identical
to self.term.* parameters.
Args:
src_ports: list of source port range tuples, e.g., self.term.source_port
dst_ports: list of destination port range tuples
Returns:
A pair of lists of (icmp_types, protocols)
"""
return None, None
def _HandlePreRule(self, ret_str):
"""Perform any pre-cartesian product transforms on the ret_str array.
Args:
ret_str: an array of strings that will eventually be joined to form
the string output for the term.
"""
pass
def _CartesianProduct(self, src_addr, dst_addr, protocol, icmp_types,
src_ports, dst_ports, ret_str):
"""Perform any the appropriate cartesian product of the input parameters.
Args:
src_addr: a type(IP) list of the source addresses
dst_addr: a type(IP) list of the destination addresses
protocol: a string list of the protocols
icmp_types: a numeric list of the icmp_types
src_ports: a (start, end) list of the source ports
dst_ports: a (start,end) list of the destination ports
ret_str: an array of strings that will eventually be joined to form
the string output for the term.
"""
pass
def _HandlePostRule(self, ret_str):
"""Perform any port-cartesian product transforms on the ret_str array.
Args:
ret_str: an array of strings that will eventually be joined to form
the string output for the term.
"""
pass
class WindowsGenerator(aclgenerator.ACLGenerator):
"""Generates filters and terms from provided policy object."""
_PLATFORM = 'windows'
_DEFAULT_PROTOCOL = 'all'
SUFFIX = '.bat'
_RENDER_PREFIX = None
_DEFAULT_ACTION = 'block'
_TERM = Term
_GOOD_AFS = ['inet', 'inet6']
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
tuple containing both supported tokens and sub tokens
"""
supported_tokens, supported_sub_tokens = super(
WindowsGenerator, self)._BuildTokens()
supported_tokens |= {'option'}
supported_tokens -= {'verbatim'}
supported_sub_tokens.update({'action': {'accept', 'deny'}})
del supported_sub_tokens['option']
return supported_tokens, supported_sub_tokens
def _TranslatePolicy(self, pol, exp_info):
"""Translate a policy from objects into strings."""
self.windows_policies = []
current_date = datetime.datetime.utcnow().date()
exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
default_action = None
good_default_actions = ['permit', 'block']
good_options = []
for header, terms in pol.filters:
filter_type = None
if self._PLATFORM not in header.platforms:
continue
filter_options = header.FilterOptions(self._PLATFORM)[1:]
filter_name = header.FilterName(self._PLATFORM)
# ensure all options after the filter name are expected
for opt in filter_options:
if opt not in good_default_actions + self._GOOD_AFS + good_options:
raise aclgenerator.UnsupportedTargetOptionError('%s %s %s %s' % (
'\nUnsupported option found in', self._PLATFORM,
'target definition:', opt))
# Check for matching af
for address_family in self._GOOD_AFS:
if address_family in filter_options:
# should not specify more than one AF in options
if filter_type is not None:
raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % (
'\nMay only specify one of', self._GOOD_AFS,
'in filter options:', filter_options))
filter_type = address_family
if filter_type is None:
filter_type = 'inet'
# does this policy override the default filter actions?
for next_target in header.target:
if next_target.platform == self._PLATFORM:
if len(next_target.options) > 1:
for arg in next_target.options:
if arg in good_default_actions:
default_action = arg
if default_action and default_action not in good_default_actions:
raise aclgenerator.UnsupportedTargetOptionError('%s %s %s %s %s' % (
'\nOnly', ', '.join(good_default_actions),
'default filter action allowed;', default_action, 'used.'))
# add the terms
new_terms = []
term_names = set()
for term in terms:
if term.name in term_names:
raise aclgenerator.DuplicateTermError(
'You have a duplicate term: %s' % term.name)
term_names.add(term.name)
if term.expiration:
if term.expiration <= exp_info_date:
logging.info('INFO: Term %s in policy %s expires '
'in less than two weeks.', term.name, filter_name)
if term.expiration <= current_date:
logging.warning('WARNING: Term %s in policy %s is expired and '
'will not be rendered.', term.name, filter_name)
continue
if 'established' in term.option or 'tcp-established' in term.option:
continue
new_terms.append(self._TERM(term, filter_name, default_action,
filter_type))
self.windows_policies.append((header, filter_name, filter_type,
default_action, new_terms))
def __str__(self):
target = []
pretty_platform = '%s%s' % (self._PLATFORM[0].upper(), self._PLATFORM[1:])
if self._RENDER_PREFIX:
target.append(self._RENDER_PREFIX)
for header, _, filter_type, default_action, terms in self.windows_policies:
# Add comments for this filter
target.append(': %s %s Policy' % (pretty_platform,
header.FilterName(self._PLATFORM)))
self._HandlePolicyHeader(header, target)
# reformat long text comments, if needed
comments = aclgenerator.WrapWords(header.comment, 70)
if comments and comments[0]:
for line in comments:
target.append(': %s' % line)
target.append(':')
# add the p4 tags
target.extend(aclgenerator.AddRepositoryTags(': '))
target.append(': ' + filter_type)
if default_action:
raise aclgenerator.UnsupportedTargetOptionError(
'Windows generator does not support default actions')
# add the terms
for term in terms:
term_str = str(term)
if term_str:
target.append(term_str)
self._HandleTermFooter(header, term, target)
target.append('')
return '\n'.join(target)
def _HandlePolicyHeader(self, header, target):
pass
def _HandleTermFooter(self, header, term, target):
pass
| apache-2.0 |
sclabs/sccms-nonrel | django/templatetags/i18n.py | 154 | 13327 | import re
from django.template import Node, Variable, VariableNode
from django.template import TemplateSyntaxError, TokenParser, Library
from django.template import TOKEN_TEXT, TOKEN_VAR
from django.template.base import _render_value_in_context
from django.utils import translation
from django.utils.encoding import force_unicode
from django.template.defaulttags import token_kwargs
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
from django.conf import settings
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = Variable(lang_code)
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = Variable(languages)
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop):
self.noop = noop
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, basestring):
self.filter_expression.var = Variable(u"'%s'" % self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
output = self.filter_expression.resolve(context)
return _render_value_in_context(output, context)
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append(u'%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context):
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
result = translation.ugettext(singular)
# Escape all isolated '%' before substituting in the context.
result = re.sub(u'%(?!\()', u'%%', result)
data = dict([(v, _render_value_in_context(context.get(v, ''), context)) for v in vars])
context.pop()
return result % data
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(args[2], args[4])
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style tuple (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(args[2], args[4])
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
# Backwards Compatiblity fix:
# FilterExpression does not support single-quoted strings,
# so we make a cheap localized fix in order to maintain
# backwards compatibility with existing uses of ``trans``
# where single quote use is supported.
if value[0] == "'":
pos = None
m = re.match("^'([^']+)'(\|.*$)",value)
if m:
value = '"%s"%s' % (m.group(1).replace('"','\\"'),m.group(2))
elif value[-1] == "'":
value = '"%s"' % value[1:-1].replace('"','\\"')
if self.more():
if self.tag() == 'noop':
noop = True
else:
raise TemplateSyntaxError("only option for 'trans' is 'noop'")
else:
noop = False
return (value, noop)
value, noop = TranslateParser(token.contents).top()
return TranslateNode(parser.compile_filter(value), noop)
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = options['count'].items()[0]
else:
countervar, counter = None, None
extra_context = options.get('with', {})
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter)
register.tag('get_available_languages', do_get_available_languages)
register.tag('get_language_info', do_get_language_info)
register.tag('get_language_info_list', do_get_language_info_list)
register.tag('get_current_language', do_get_current_language)
register.tag('get_current_language_bidi', do_get_current_language_bidi)
register.tag('trans', do_translate)
register.tag('blocktrans', do_block_translate)
register.filter(language_name)
register.filter(language_name_local)
register.filter(language_bidi)
| bsd-3-clause |
jessrosenfield/pants | src/python/pants/backend/jvm/tasks/ivy_resolve.py | 2 | 7835 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
import time
from textwrap import dedent
from pants.backend.jvm.ivy_utils import IvyUtils
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.ivy_task_mixin import IvyTaskMixin
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.binaries import binary_util
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
from pants.util.strutil import safe_shlex_split
class IvyResolve(IvyTaskMixin, NailgunTask):
@classmethod
def register_options(cls, register):
super(IvyResolve, cls).register_options(register)
register('--override', action='append',
fingerprint=True,
help='Specifies a jar dependency override in the form: '
'[org]#[name]=(revision|url) '
'Multiple overrides can be specified using repeated invocations of this flag. '
'For example, to specify 2 overrides: '
'--override=com.foo#bar=0.1.2 '
'--override=com.baz#spam=file:///tmp/spam.jar ')
register('--report', action='store_true', default=False,
help='Generate an ivy resolve html report')
register('--open', action='store_true', default=False,
help='Attempt to open the generated ivy resolve report '
'in a browser (implies --report)')
register('--outdir', help='Emit ivy report outputs in to this directory.')
register('--args', action='append',
fingerprint=True,
help='Pass these extra args to ivy.')
register('--confs', action='append', default=['default'],
help='Pass a configuration to ivy in addition to the default ones.')
register('--mutable-pattern',
fingerprint=True,
help='If specified, all artifact revisions matching this pattern will be treated as '
'mutable unless a matching artifact explicitly marks mutable as False.')
cls.register_jvm_tool(register,
'xalan',
classpath=[
JarDependency(org='xalan', name='xalan', rev='2.7.1'),
])
@classmethod
def product_types(cls):
return ['compile_classpath']
@classmethod
def prepare(cls, options, round_manager):
super(IvyResolve, cls).prepare(options, round_manager)
round_manager.require_data('java')
round_manager.require_data('scala')
def __init__(self, *args, **kwargs):
super(IvyResolve, self).__init__(*args, **kwargs)
self._outdir = self.get_options().outdir or os.path.join(self.workdir, 'reports')
self._open = self.get_options().open
self._report = self._open or self.get_options().report
self._args = []
for arg in self.get_options().args:
self._args.extend(safe_shlex_split(arg))
@memoized_property
def confs(self):
# TODO(John Sirois): This supports `IdeGen` and `Resolve` signalling their resolve confs needs.
# Fix those tasks to do their own resolves.
# See: https://github.com/pantsbuild/pants/issues/2177
confs = set(self.get_options().confs)
for conf in ('default', 'sources', 'javadoc'):
if self.context.products.isrequired('jar_map_{conf}'.format(conf=conf)):
confs.add(conf)
return confs
def execute(self):
"""Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path).
"""
executor = self.create_java_executor()
targets = self.context.targets()
compile_classpath = self.context.products.get_data('compile_classpath',
init_func=ClasspathProducts.init_func(self.get_options().pants_workdir))
resolve_hash_name = self.resolve(executor=executor,
targets=targets,
classpath_products=compile_classpath,
confs=self.confs,
extra_args=self._args)
if self._report:
self._generate_ivy_report(resolve_hash_name)
def check_artifact_cache_for(self, invalidation_check):
# Ivy resolution is an output dependent on the entire target set, and is not divisible
# by target. So we can only cache it keyed by the entire target set.
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
return [global_vts]
def _generate_ivy_report(self, resolve_hash_name):
def make_empty_report(report, organisation, module, conf):
no_deps_xml_template = dedent("""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="ivy-report.xsl"?>
<ivy-report version="1.0">
<info
organisation="{organisation}"
module="{module}"
revision="latest.integration"
conf="{conf}"
confs="{conf}"
date="{timestamp}"/>
</ivy-report>
""").format(
organisation=organisation,
module=module,
conf=conf,
timestamp=time.strftime('%Y%m%d%H%M%S'),
)
with open(report, 'w') as report_handle:
print(no_deps_xml_template, file=report_handle)
tool_classpath = self.tool_classpath('xalan')
report = None
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
xsl = os.path.join(self.ivy_cache_dir, 'ivy-report.xsl')
# Xalan needs this dir to exist - ensure that, but do no more - we have no clue where this
# points.
safe_mkdir(self._outdir, clean=False)
for conf in self.confs:
xml_path = self._get_report_path(conf, resolve_hash_name)
if not os.path.exists(xml_path):
# Make it clear that this is not the original report from Ivy by changing its name.
xml_path = xml_path[:-4] + "-empty.xml"
make_empty_report(xml_path, org, name, conf)
out = os.path.join(self._outdir,
'{org}-{name}-{conf}.html'.format(org=org, name=name, conf=conf))
args = ['-IN', xml_path, '-XSL', xsl, '-OUT', out]
# The ivy-report.xsl genrates tab links to files with extension 'xml' by default, we
# override that to point to the html files we generate.
args.extend(['-param', 'extension', 'html'])
if 0 != self.runjava(classpath=tool_classpath, main='org.apache.xalan.xslt.Process',
args=args, workunit_name='report'):
raise self.Error('Failed to create html report from xml ivy report.')
# The ivy-report.xsl is already smart enough to generate an html page with tab links to all
# confs for a given report coordinate (org, name). We need only display 1 of the generated
# htmls and the user can then navigate to the others via the tab links.
if report is None:
report = out
css = os.path.join(self._outdir, 'ivy-report.css')
if os.path.exists(css):
os.unlink(css)
shutil.copy(os.path.join(self.ivy_cache_dir, 'ivy-report.css'), self._outdir)
if self._open and report:
binary_util.ui_open(report)
def _get_report_path(self, conf, resolve_hash_name):
try:
return IvyUtils.xml_report_path(self.ivy_cache_dir, resolve_hash_name, conf)
except IvyUtils.IvyResolveReportError as e:
raise self.Error('Failed to generate ivy report: {}'.format(e))
| apache-2.0 |
mhils/mitmproxy | test/mitmproxy/contentviews/test_auto.py | 6 | 1233 | from mitmproxy.contentviews import auto
from mitmproxy.net import http
from mitmproxy.coretypes import multidict
from . import full_eval
def test_view_auto():
v = full_eval(auto.ViewAuto())
f = v(
b"foo",
headers=http.Headers()
)
assert f[0] == "Raw"
f = v(
b"<html></html>",
headers=http.Headers(content_type="text/html")
)
assert f[0] == "HTML"
f = v(
b"foo",
headers=http.Headers(content_type="text/flibble")
)
assert f[0] == "Raw"
f = v(
b"<xml></xml>",
headers=http.Headers(content_type="text/flibble")
)
assert f[0].startswith("XML")
f = v(
b"<svg></svg>",
headers=http.Headers(content_type="image/svg+xml")
)
assert f[0].startswith("XML")
f = v(
b"verybinary",
headers=http.Headers(content_type="image/new-magic-image-format")
)
assert f[0] == "Unknown Image"
f = v(b"\xFF" * 30)
assert f[0] == "Hex"
f = v(
b"",
headers=http.Headers()
)
assert f[0] == "No content"
f = v(
b"",
headers=http.Headers(),
query=multidict.MultiDict([("foo", "bar")]),
)
assert f[0] == "Query"
| mit |
CardinalTesting/kernel_lge_msm8974 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
Batterfii/django | django/contrib/auth/decorators.py | 356 | 3049 | from functools import wraps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.shortcuts import resolve_url
from django.utils import six
from django.utils.decorators import available_attrs
from django.utils.six.moves.urllib.parse import urlparse
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def permission_required(perm, login_url=None, raise_exception=False):
"""
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if necessary.
If the raise_exception parameter is given the PermissionDenied exception
is raised.
"""
def check_perms(user):
if isinstance(perm, six.string_types):
perms = (perm, )
else:
perms = perm
# First check if the user has the permission (even anon users)
if user.has_perms(perms):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
return user_passes_test(check_perms, login_url=login_url)
| bsd-3-clause |
silveregg/moto | tests/test_swf/responses/test_workflow_executions.py | 2 | 9087 | import boto
from boto.swf.exceptions import SWFResponseError
from datetime import datetime, timedelta
import sure # noqa
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises # noqa
from moto import mock_swf
from moto.core.utils import unix_time
# Utils
@mock_swf
def setup_swf_environment():
conn = boto.connect_swf("the_key", "the_secret")
conn.register_domain("test-domain", "60", description="A test domain")
conn.register_workflow_type(
"test-domain", "test-workflow", "v1.0",
task_list="queue", default_child_policy="TERMINATE",
default_execution_start_to_close_timeout="300",
default_task_start_to_close_timeout="300",
)
conn.register_activity_type("test-domain", "test-activity", "v1.1")
return conn
# StartWorkflowExecution endpoint
@mock_swf
def test_start_workflow_execution():
conn = setup_swf_environment()
wf = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0")
wf.should.contain("runId")
@mock_swf
def test_start_already_started_workflow_execution():
conn = setup_swf_environment()
conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0")
conn.start_workflow_execution.when.called_with(
"test-domain", "uid-abcd1234", "test-workflow", "v1.0"
).should.throw(SWFResponseError)
@mock_swf
def test_start_workflow_execution_on_deprecated_type():
conn = setup_swf_environment()
conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0")
conn.start_workflow_execution.when.called_with(
"test-domain", "uid-abcd1234", "test-workflow", "v1.0"
).should.throw(SWFResponseError)
# DescribeWorkflowExecution endpoint
@mock_swf
def test_describe_workflow_execution():
conn = setup_swf_environment()
hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0")
run_id = hsh["runId"]
wfe = conn.describe_workflow_execution("test-domain", run_id, "uid-abcd1234")
wfe["executionInfo"]["execution"]["workflowId"].should.equal("uid-abcd1234")
wfe["executionInfo"]["executionStatus"].should.equal("OPEN")
@mock_swf
def test_describe_non_existent_workflow_execution():
conn = setup_swf_environment()
conn.describe_workflow_execution.when.called_with(
"test-domain", "wrong-run-id", "wrong-workflow-id"
).should.throw(SWFResponseError)
# GetWorkflowExecutionHistory endpoint
@mock_swf
def test_get_workflow_execution_history():
conn = setup_swf_environment()
hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0")
run_id = hsh["runId"]
resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234")
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"])
@mock_swf
def test_get_workflow_execution_history_with_reverse_order():
conn = setup_swf_environment()
hsh = conn.start_workflow_execution("test-domain", "uid-abcd1234", "test-workflow", "v1.0")
run_id = hsh["runId"]
resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234",
reverse_order=True)
types = [evt["eventType"] for evt in resp["events"]]
types.should.equal(["DecisionTaskScheduled", "WorkflowExecutionStarted"])
@mock_swf
def test_get_workflow_execution_history_on_non_existent_workflow_execution():
conn = setup_swf_environment()
conn.get_workflow_execution_history.when.called_with(
"test-domain", "wrong-run-id", "wrong-workflow-id"
).should.throw(SWFResponseError)
# ListOpenWorkflowExecutions endpoint
@mock_swf
def test_list_open_workflow_executions():
conn = setup_swf_environment()
# One open workflow execution
conn.start_workflow_execution(
'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0'
)
# One closed workflow execution to make sure it isn't displayed
run_id = conn.start_workflow_execution(
'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0'
)['runId']
conn.terminate_workflow_execution('test-domain', 'uid-abcd12345',
details='some details',
reason='a more complete reason',
run_id=run_id)
yesterday = datetime.utcnow() - timedelta(days=1)
oldest_date = unix_time(yesterday)
response = conn.list_open_workflow_executions('test-domain',
oldest_date,
workflow_id='test-workflow')
execution_infos = response['executionInfos']
len(execution_infos).should.equal(1)
open_workflow = execution_infos[0]
open_workflow['workflowType'].should.equal({'version': 'v1.0',
'name': 'test-workflow'})
open_workflow.should.contain('startTimestamp')
open_workflow['execution']['workflowId'].should.equal('uid-abcd1234')
open_workflow['execution'].should.contain('runId')
open_workflow['cancelRequested'].should.be(False)
open_workflow['executionStatus'].should.equal('OPEN')
# ListClosedWorkflowExecutions endpoint
@mock_swf
def test_list_closed_workflow_executions():
conn = setup_swf_environment()
# Leave one workflow execution open to make sure it isn't displayed
conn.start_workflow_execution(
'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0'
)
# One closed workflow execution
run_id = conn.start_workflow_execution(
'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0'
)['runId']
conn.terminate_workflow_execution('test-domain', 'uid-abcd12345',
details='some details',
reason='a more complete reason',
run_id=run_id)
yesterday = datetime.utcnow() - timedelta(days=1)
oldest_date = unix_time(yesterday)
response = conn.list_closed_workflow_executions(
'test-domain',
start_oldest_date=oldest_date,
workflow_id='test-workflow')
execution_infos = response['executionInfos']
len(execution_infos).should.equal(1)
open_workflow = execution_infos[0]
open_workflow['workflowType'].should.equal({'version': 'v1.0',
'name': 'test-workflow'})
open_workflow.should.contain('startTimestamp')
open_workflow['execution']['workflowId'].should.equal('uid-abcd12345')
open_workflow['execution'].should.contain('runId')
open_workflow['cancelRequested'].should.be(False)
open_workflow['executionStatus'].should.equal('CLOSED')
# TerminateWorkflowExecution endpoint
@mock_swf
def test_terminate_workflow_execution():
conn = setup_swf_environment()
run_id = conn.start_workflow_execution(
"test-domain", "uid-abcd1234", "test-workflow", "v1.0"
)["runId"]
resp = conn.terminate_workflow_execution("test-domain", "uid-abcd1234",
details="some details",
reason="a more complete reason",
run_id=run_id)
resp.should.be.none
resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234")
evt = resp["events"][-1]
evt["eventType"].should.equal("WorkflowExecutionTerminated")
attrs = evt["workflowExecutionTerminatedEventAttributes"]
attrs["details"].should.equal("some details")
attrs["reason"].should.equal("a more complete reason")
attrs["cause"].should.equal("OPERATOR_INITIATED")
@mock_swf
def test_terminate_workflow_execution_with_wrong_workflow_or_run_id():
conn = setup_swf_environment()
run_id = conn.start_workflow_execution(
"test-domain", "uid-abcd1234", "test-workflow", "v1.0"
)["runId"]
# terminate workflow execution
conn.terminate_workflow_execution("test-domain", "uid-abcd1234")
# already closed, with run_id
conn.terminate_workflow_execution.when.called_with(
"test-domain", "uid-abcd1234", run_id=run_id
).should.throw(
SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId="
)
# already closed, without run_id
conn.terminate_workflow_execution.when.called_with(
"test-domain", "uid-abcd1234"
).should.throw(
SWFResponseError, "Unknown execution, workflowId = uid-abcd1234"
)
# wrong workflow id
conn.terminate_workflow_execution.when.called_with(
"test-domain", "uid-non-existent"
).should.throw(
SWFResponseError, "Unknown execution, workflowId = uid-non-existent"
)
# wrong run_id
conn.terminate_workflow_execution.when.called_with(
"test-domain", "uid-abcd1234", run_id="foo"
).should.throw(
SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId="
)
| apache-2.0 |
Balachan27/django | django/core/management/commands/loaddata.py | 294 | 12977 | from __future__ import unicode_literals
import glob
import gzip
import os
import warnings
import zipfile
from itertools import product
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router,
transaction,
)
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.glob import glob_escape
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
missing_args_message = ("No database fixture specified. Please provide the "
"path of at least one fixture in the command line.")
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='+',
help='Fixture labels.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load '
'fixtures into. Defaults to the "default" database.')
parser.add_argument('--app', action='store', dest='app_label',
default=None, help='Only look for fixtures in the specified app.')
parser.add_argument('--ignorenonexistent', '-i', action='store_true',
dest='ignore', default=False,
help='Ignores entries in the serialized data for fields that do not '
'currently exist on the model.')
def handle(self, *fixture_labels, **options):
self.ignore = options.get('ignore')
self.using = options.get('database')
self.app_label = options.get('app_label')
self.hide_empty = options.get('hide_empty', False)
self.verbosity = options.get('verbosity')
with transaction.atomic(using=self.using):
self.loaddata(fixture_labels)
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
self.serialization_formats = serializers.get_public_serializer_formats()
# Forcing binary mode may be revisited after dropping Python 2 support (see #22399)
self.compression_formats = {
None: (open, 'rb'),
'gz': (gzip.GzipFile, 'rb'),
'zip': (SingleZipReader, 'r'),
}
if has_bz2:
self.compression_formats['bz2'] = (bz2.BZ2File, 'r')
with connection.constraint_checks_disabled():
for fixture_label in fixture_labels:
self.load_label(fixture_label)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if self.verbosity >= 2:
self.stdout.write("Resetting sequences\n")
with connection.cursor() as cursor:
for line in sequence_sql:
cursor.execute(line)
if self.verbosity >= 1:
if self.fixture_count == 0 and self.hide_empty:
pass
elif self.fixture_object_count == self.loaded_object_count:
self.stdout.write("Installed %d object(s) from %d fixture(s)" %
(self.loaded_object_count, self.fixture_count))
else:
self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)" %
(self.loaded_object_count, self.fixture_object_count, self.fixture_count))
def load_label(self, fixture_label):
"""
Loads fixtures files for a given label.
"""
show_progress = self.verbosity >= 3
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
try:
self.fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if self.verbosity >= 2:
self.stdout.write("Installing %s fixture '%s' from %s." %
(ser_fmt, fixture_name, humanize(fixture_dir)))
objects = serializers.deserialize(ser_fmt, fixture,
using=self.using, ignorenonexistent=self.ignore)
for obj in objects:
objects_in_fixture += 1
if router.allow_migrate_model(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
obj.save(using=self.using)
if show_progress:
self.stdout.write(
'\rProcessed %i object(s).' % loaded_objects_in_fixture,
ending=''
)
except (DatabaseError, IntegrityError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': force_text(e)
},)
raise
if objects and show_progress:
self.stdout.write('') # add a newline after progress indicator
self.loaded_object_count += loaded_objects_in_fixture
self.fixture_object_count += objects_in_fixture
except Exception as e:
if not isinstance(e, CommandError):
e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),)
raise
finally:
fixture.close()
# Warn if the fixture we loaded contains 0 objects.
if objects_in_fixture == 0:
warnings.warn(
"No fixture data found for '%s'. (File format may be "
"invalid.)" % fixture_name,
RuntimeWarning
)
@lru_cache.lru_cache(maxsize=None)
def find_fixtures(self, fixture_label):
"""
Finds fixture files for a given label.
"""
fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
databases = [self.using, None]
cmp_fmts = list(self.compression_formats.keys()) if cmp_fmt is None else [cmp_fmt]
ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt]
if self.verbosity >= 2:
self.stdout.write("Loading '%s' fixtures..." % fixture_name)
if os.path.isabs(fixture_name):
fixture_dirs = [os.path.dirname(fixture_name)]
fixture_name = os.path.basename(fixture_name)
else:
fixture_dirs = self.fixture_dirs
if os.path.sep in os.path.normpath(fixture_name):
fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
for dir_ in fixture_dirs]
fixture_name = os.path.basename(fixture_name)
suffixes = ('.'.join(ext for ext in combo if ext)
for combo in product(databases, ser_fmts, cmp_fmts))
targets = set('.'.join((fixture_name, suffix)) for suffix in suffixes)
fixture_files = []
for fixture_dir in fixture_dirs:
if self.verbosity >= 2:
self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
fixture_files_in_dir = []
path = os.path.join(fixture_dir, fixture_name)
for candidate in glob.iglob(glob_escape(path) + '*'):
if os.path.basename(candidate) in targets:
# Save the fixture_dir and fixture_name for future error messages.
fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))
if self.verbosity >= 2 and not fixture_files_in_dir:
self.stdout.write("No fixture '%s' in %s." %
(fixture_name, humanize(fixture_dir)))
# Check kept for backwards-compatibility; it isn't clear why
# duplicates are only allowed in different directories.
if len(fixture_files_in_dir) > 1:
raise CommandError(
"Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
fixture_files.extend(fixture_files_in_dir)
if not fixture_files:
# Warning kept for backwards-compatibility; why not an exception?
warnings.warn("No fixture named '%s' found." % fixture_name)
return fixture_files
@cached_property
def fixture_dirs(self):
"""
Return a list of fixture directories.
The list contains the 'fixtures' subdirectory of each installed
application, if it exists, the directories in FIXTURE_DIRS, and the
current directory.
"""
dirs = []
fixture_dirs = settings.FIXTURE_DIRS
if len(fixture_dirs) != len(set(fixture_dirs)):
raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.")
for app_config in apps.get_app_configs():
app_label = app_config.label
app_dir = os.path.join(app_config.path, 'fixtures')
if app_dir in fixture_dirs:
raise ImproperlyConfigured(
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label)
)
if self.app_label and app_label != self.app_label:
continue
if os.path.isdir(app_dir):
dirs.append(app_dir)
dirs.extend(list(fixture_dirs))
dirs.append('')
dirs = [upath(os.path.abspath(os.path.realpath(d))) for d in dirs]
return dirs
def parse_name(self, fixture_name):
"""
Splits fixture name in name, serialization format, compression format.
"""
parts = fixture_name.rsplit('.', 2)
if len(parts) > 1 and parts[-1] in self.compression_formats:
cmp_fmt = parts[-1]
parts = parts[:-1]
else:
cmp_fmt = None
if len(parts) > 1:
if parts[-1] in self.serialization_formats:
ser_fmt = parts[-1]
parts = parts[:-1]
else:
raise CommandError(
"Problem installing fixture '%s': %s is not a known "
"serialization format." % (''.join(parts[:-1]), parts[-1]))
else:
ser_fmt = None
name = '.'.join(parts)
return name, ser_fmt, cmp_fmt
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if len(self.namelist()) != 1:
raise ValueError("Zip-compressed fixtures must contain one file.")
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
def humanize(dirname):
return "'%s'" % dirname if dirname else 'absolute path'
| bsd-3-clause |
AlfredNeverKog/BrainCarya | src/my/kadenze/lesson3/mnist_autoencoder.py | 1 | 2610 | from mnist import MNIST
import numpy as np
import tensorflow as tf
from src.my.lib.utils import montage
import matplotlib.pyplot as plt
from PIL import Image
src = '../../../../data/mnist/'
output='./content/1/%s.jpg'
mndata = MNIST(src)
data = np.array(mndata.load_testing())
X = data[0]
Y = data[1]
items = 100
imgs = np.array([i for i in np.array(X[:items])]).reshape(items,28,28)
n_features = 784
n_input = n_features
Y = imgs.reshape(items,n_features).astype(float)
current_input = imgs.reshape(items,n_features).astype(float)
Ws = []
Bs = []
dimensions = [512,256,128,64]
for layer_i,n_ouputs in enumerate(dimensions):
with tf.variable_scope("encoder/variable/%s" % layer_i):
W = tf.get_variable(name="weight%s" % layer_i, dtype=tf.float64,
initializer=tf.contrib.layers.xavier_initializer(),
shape=[n_input, n_ouputs])
#B = tf.get_variable(name='bias%s' % layer_i, dtype=tf.float64,
# initializer=tf.random_normal_initializer(mean=0.0, stddev=1.1),
# shape=[n_ouputs])
#h = tf.nn.bias_add(value=tf.matmul(current_input, W),
# bias=B)
h = tf.matmul(current_input, W)
current_input = h
current_input = tf.nn.relu(current_input)
n_input = n_ouputs
Ws.append(W)
#Bs.append()
Ws = Ws[::-1]#reverse
Bs = Bs[::-1]#reverse
#dimensions = dimensions[::1][1:].append(n_features)
dimensions = dimensions[::-1][1:] +[n_features]
#Build DECODER
for layer_i,n_ouputs in enumerate(dimensions):
with tf.variable_scope("encoder/variable/%s" % layer_i):
##128x64 -> 64x128
h = value=tf.matmul(current_input,tf.transpose(Ws[layer_i]))
if layer_i + 1 < len(Bs):
h = tf.nn.bias_add(h,bias=Bs[layer_i + 1])
current_input = h
current_input = tf.nn.relu(current_input)
n_input = n_ouputs
loss_func = tf.reduce_mean(tf.squared_difference(current_input, Y), 1)
optimizer = tf.train.AdamOptimizer(learning_rate=0.00001)
train = optimizer.minimize(loss_func)
counter = 0
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(50000):
sess.run(train)
if i % 15 == 0:
Image.fromarray(montage(sess.run(current_input).reshape(items,28,28)).astype(np.uint8)) \
.save(output % ("0"*(5 - len(str(counter))) + str(counter)))
print(sess.run(tf.reduce_mean(loss_func)))
counter += 1
| mit |
icydoge/AdventOfCodeSolutions2 | day3.py | 1 | 1161 | ###################################
# Many lines #
# Such O(n) #
# Very Doge #
###################################
# By icydoge <icydoge@gmail.com> #
###################################
with open("inputs/day3-1.txt") as f:
content = f.readlines()
# Part 1
lines = list(map(str.strip, content))
triangles = []
for line in lines:
split = line.split(' ')
triangles.append([int(i) for i in split if i != ''])
valid = 0
for triangle in triangles:
if (triangle[0] + triangle[1]) > triangle[2] and (triangle[1] + triangle[2]) > triangle[0] and (triangle[0] + triangle[2]) > triangle[1]:
valid += 1
print("Final answer for Part 1: %d" % (valid))
# Part 2
triangles2 = []
for i in range(0, len(triangles) - 2, 3):
for j in range(0, 3):
triangles2.append([triangles[i][j], triangles[i + 1][j], triangles[i + 2][j]])
valid = 0
for triangle in triangles2:
if (triangle[0] + triangle[1]) > triangle[2] and (triangle[1] + triangle[2]) > triangle[0] and (triangle[0] + triangle[2]) > triangle[1]:
valid += 1
print("Final answer for Part 2: %d" % (valid))
| mit |
shapiromatron/bmds-server | bmds_server/analysis/transforms.py | 1 | 3196 | from enum import Enum
from typing import Dict, List, Union
import bmds
from bmds.bmds3.sessions import get_model
from bmds.bmds3.types.continuous import ContinuousModelSettings
from bmds.bmds3.types.dichotomous import DichotomousModelSettings
from bmds.bmds3.types.priors import PriorClass, get_continuous_prior, get_dichotomous_prior
from bmds.constants import Dtype
from .validators.datasets import AdverseDirection
class PriorEnum(str, Enum):
frequentist_restricted = "frequentist_restricted"
frequentist_unrestricted = "frequentist_unrestricted"
bayesian = "bayesian"
# TODO - remove these maps; use contants from bmds
bmd3_prior_map = {
PriorEnum.frequentist_restricted: PriorClass.frequentist_restricted,
PriorEnum.frequentist_unrestricted: PriorClass.frequentist_unrestricted,
PriorEnum.bayesian: PriorClass.bayesian,
}
is_increasing_map = {
AdverseDirection.AUTOMATIC: None,
AdverseDirection.UP: True,
AdverseDirection.DOWN: False,
}
def build_model_settings(
bmds_version: str,
dataset_type: str,
model_name: str,
prior_class: str,
options: Dict,
dataset_options: Dict,
) -> Union[DichotomousModelSettings, ContinuousModelSettings]:
model = get_model(bmds_version, dataset_type, model_name)
prior_class = bmd3_prior_map[prior_class]
if dataset_type in bmds.constants.DICHOTOMOUS_DTYPES:
return DichotomousModelSettings(
bmr=options["bmr_value"],
alpha=1.0 - options["confidence_level"],
bmr_type=options["bmr_type"],
degree=dataset_options["degree"],
priors=get_dichotomous_prior(model.bmd_model_class, prior_class),
)
elif dataset_type in bmds.constants.CONTINUOUS_DTYPES:
return ContinuousModelSettings(
bmr=options["bmr_value"],
alpha=1.0 - options["confidence_level"],
tailProb=options["tail_probability"],
bmr_type=options["bmr_type"],
disttype=options["dist_type"],
degree=dataset_options["degree"],
is_increasing=is_increasing_map[dataset_options["adverse_direction"]],
priors=get_continuous_prior(model.bmd_model_class, prior_class),
)
else:
raise ValueError(f"Unknown dataset_type: {dataset_type}")
def build_dataset(dataset_type: str, dataset: Dict[str, List[float]]) -> bmds.datasets.DatasetType:
if dataset_type == Dtype.CONTINUOUS:
schema = bmds.datasets.ContinuousDatasetSchema
elif dataset_type == Dtype.CONTINUOUS_INDIVIDUAL:
schema = bmds.datasets.ContinuousIndividualDatasetSchema
elif dataset_type == Dtype.DICHOTOMOUS:
schema = bmds.datasets.DichotomousDatasetSchema
else:
raise ValueError(f"Unknown dataset type: {dataset_type}")
return schema.parse_obj(dataset).deserialize()
def remap_exponential(models: List[str]) -> List[str]:
# recursively expand user-specified "exponential" model into M3 and M5
if bmds.constants.M_Exponential in models:
pos = models.index(bmds.constants.M_Exponential)
models[pos : pos + 1] = (bmds.constants.M_ExponentialM3, bmds.constants.M_ExponentialM5)
return models
| mit |
Lujeni/ansible | lib/ansible/modules/network/f5/bigip_selfip.py | 24 | 26437 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2016, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_selfip
short_description: Manage Self-IPs on a BIG-IP system
description:
- Manage Self-IPs on a BIG-IP system.
version_added: 2.2
options:
address:
description:
- The IP addresses for the new self IP. This value is ignored upon update
as addresses themselves cannot be changed after they are created.
- This value is required when creating new self IPs.
type: str
allow_service:
description:
- Configure port lockdown for the Self IP. By default, the Self IP has a
"default deny" policy. This can be changed to allow TCP and UDP ports
as well as specific protocols. This list should contain C(protocol):C(port)
values.
type: list
name:
description:
- The self IP to create.
- If this parameter is not specified, then it will default to the value supplied
in the C(address) parameter.
type: str
required: True
description:
description:
- Description of the traffic selector.
type: str
version_added: 2.8
netmask:
description:
- The netmask for the self IP. When creating a new Self IP, this value
is required.
type: str
state:
description:
- When C(present), guarantees that the Self-IP exists with the provided
attributes.
- When C(absent), removes the Self-IP from the system.
type: str
choices:
- absent
- present
default: present
traffic_group:
description:
- The traffic group for the Self IP addresses in an active-active,
redundant load balancer configuration. When creating a new Self IP, if
this value is not specified, the default of C(/Common/traffic-group-local-only)
will be used.
type: str
vlan:
description:
- The VLAN that the new self IPs will be on. When creating a new Self
IP, this value is required.
type: str
route_domain:
description:
- The route domain id of the system. When creating a new Self IP, if
this value is not specified, a default value of C(0) will be used.
- This value cannot be changed after it is set.
type: int
version_added: 2.3
partition:
description:
- Device partition to manage resources on. You can set different partitions
for Self IPs, but the address used may not match any other address used
by a Self IP. In that sense, Self IPs are not isolated by partitions as
other resources on a BIG-IP are.
type: str
default: Common
version_added: 2.5
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create Self IP
bigip_selfip:
address: 10.10.10.10
name: self1
netmask: 255.255.255.0
vlan: vlan1
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create Self IP with a Route Domain
bigip_selfip:
name: self1
address: 10.10.10.10
netmask: 255.255.255.0
vlan: vlan1
route_domain: 10
allow_service: default
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Delete Self IP
bigip_selfip:
name: self1
state: absent
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Allow management web UI to be accessed on this Self IP
bigip_selfip:
name: self1
state: absent
allow_service:
- tcp:443
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Allow HTTPS and SSH access to this Self IP
bigip_selfip:
name: self1
state: absent
allow_service:
- tcp:443
- tcp:22
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Allow all services access to this Self IP
bigip_selfip:
name: self1
state: absent
allow_service:
- all
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Allow only GRE and IGMP protocols access to this Self IP
bigip_selfip:
name: self1
state: absent
allow_service:
- gre:0
- igmp:0
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Allow all TCP, but no other protocols access to this Self IP
bigip_selfip:
name: self1
state: absent
allow_service:
- tcp:0
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
allow_service:
description: Services that allowed via this Self IP
returned: changed
type: list
sample: ['igmp:0','tcp:22','udp:53']
address:
description: The address for the Self IP
returned: changed
type: str
sample: 192.0.2.10
name:
description: The name of the Self IP
returned: created
type: str
sample: self1
netmask:
description: The netmask of the Self IP
returned: changed
type: str
sample: 255.255.255.0
traffic_group:
description: The traffic group that the Self IP is a member of
returned: changed
type: str
sample: traffic-group-local-only
vlan:
description: The VLAN set on the Self IP
returned: changed
type: str
sample: vlan1
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.network.f5.ipaddress import ipv6_netmask_to_cidr
from library.module_utils.compat.ipaddress import ip_address
from library.module_utils.compat.ipaddress import ip_network
from library.module_utils.compat.ipaddress import ip_interface
from library.module_utils.network.f5.compare import cmp_str_with_none
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.network.f5.ipaddress import ipv6_netmask_to_cidr
from ansible.module_utils.compat.ipaddress import ip_address
from ansible.module_utils.compat.ipaddress import ip_network
from ansible.module_utils.compat.ipaddress import ip_interface
from ansible.module_utils.network.f5.compare import cmp_str_with_none
class Parameters(AnsibleF5Parameters):
api_map = {
'trafficGroup': 'traffic_group',
'allowService': 'allow_service',
}
updatables = [
'traffic_group',
'allow_service',
'vlan',
'netmask',
'address',
'description',
]
returnables = [
'traffic_group',
'allow_service',
'vlan',
'route_domain',
'netmask',
'address',
'description',
]
api_attributes = [
'trafficGroup',
'allowService',
'vlan',
'address',
'description',
]
@property
def vlan(self):
if self._values['vlan'] is None:
return None
return fq_name(self.partition, self._values['vlan'])
class ModuleParameters(Parameters):
@property
def address(self):
address = "{0}%{1}/{2}".format(
self.ip, self.route_domain, self.netmask
)
return address
@property
def ip(self):
if self._values['address'] is None:
return None
if is_valid_ip(self._values['address']):
return self._values['address']
else:
raise F5ModuleError(
'The provided address is not a valid IP address'
)
@property
def traffic_group(self):
if self._values['traffic_group'] is None:
return None
return fq_name(self.partition, self._values['traffic_group'])
@property
def route_domain(self):
if self._values['route_domain'] is None:
return None
result = int(self._values['route_domain'])
return result
@property
def netmask(self):
if self._values['netmask'] is None:
return None
result = -1
try:
result = int(self._values['netmask'])
if 0 < result < 256:
pass
except ValueError:
if is_valid_ip(self._values['netmask']):
addr = ip_address(u'{0}'.format(str(self._values['netmask'])))
if addr.version == 4:
ip = ip_network(u'0.0.0.0/%s' % str(self._values['netmask']))
result = ip.prefixlen
else:
result = ipv6_netmask_to_cidr(self._values['netmask'])
if result < 0:
raise F5ModuleError(
'The provided netmask {0} is neither in IP or CIDR format'.format(result)
)
return result
@property
def allow_service(self):
"""Verifies that a supplied service string has correct format
The string format for port lockdown is PROTOCOL:PORT. This method
will verify that the provided input matches the allowed protocols
and the port ranges before submitting to BIG-IP.
The only allowed exceptions to this rule are the following values
* all
* default
* none
These are special cases that are handled differently in the API.
"all" is set as a string, "default" is set as a one item list, and
"none" removes the key entirely from the REST API.
:raises F5ModuleError:
"""
if self._values['allow_service'] is None:
return None
result = []
allowed_protocols = [
'eigrp', 'egp', 'gre', 'icmp', 'igmp', 'igp', 'ipip',
'l2tp', 'ospf', 'pim', 'tcp', 'udp'
]
special_protocols = [
'all', 'none', 'default'
]
for svc in self._values['allow_service']:
if svc in special_protocols:
result = [svc]
break
elif svc in allowed_protocols:
full_service = '{0}:0'.format(svc)
result.append(full_service)
else:
tmp = svc.split(':')
if tmp[0] not in allowed_protocols:
raise F5ModuleError(
"The provided protocol '%s' is invalid" % (tmp[0])
)
try:
port = int(tmp[1])
except Exception:
raise F5ModuleError(
"The provided port '%s' is not a number" % (tmp[1])
)
if port < 0 or port > 65535:
raise F5ModuleError(
"The provided port '{0}' must be between 0 and 65535".format(port)
)
else:
result.append(svc)
result = sorted(list(set(result)))
return result
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
class ApiParameters(Parameters):
@property
def allow_service(self):
if self._values['allow_service'] is None:
return None
if self._values['allow_service'] == 'all':
self._values['allow_service'] = ['all']
return sorted(self._values['allow_service'])
@property
def destination_ip(self):
if self._values['address'] is None:
return None
try:
pattern = r'(?P<rd>%[0-9]+)'
addr = re.sub(pattern, '', self._values['address'])
ip = ip_interface(u'{0}'.format(addr))
return ip.with_prefixlen
except ValueError:
raise F5ModuleError(
"The provided destination is not an IP address"
)
@property
def netmask(self):
ip = ip_interface(self.destination_ip)
return int(ip.network.prefixlen)
@property
def ip(self):
result = ip_interface(self.destination_ip)
return str(result.ip)
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def allow_service(self):
if self._values['allow_service'] is None:
return None
if self._values['allow_service'] == ['all']:
return 'all'
return sorted(self._values['allow_service'])
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def address(self):
return None
@property
def allow_service(self):
"""Returns services formatted for consumption by f5-sdk update
The BIG-IP endpoint for services takes different values depending on
what you want the "allowed services" to be. It can be any of the
following
- a list containing "protocol:port" values
- the string "all"
- a null value, or None
This is a convenience function to massage the values the user has
supplied so that they are formatted in such a way that BIG-IP will
accept them and apply the specified policy.
"""
if self.want.allow_service is None:
return None
result = self.want.allow_service
if result[0] == 'none' and self.have.allow_service is None:
return None
elif self.have.allow_service is None:
return result
elif result[0] == 'all' and self.have.allow_service[0] != 'all':
return ['all']
elif result[0] == 'none':
return []
elif set(self.want.allow_service) != set(self.have.allow_service):
return result
@property
def netmask(self):
if self.want.netmask is None:
return None
ip = self.have.ip
if is_valid_ip(ip):
if self.want.route_domain is not None:
want = "{0}%{1}/{2}".format(ip, self.want.route_domain, self.want.netmask)
have = "{0}%{1}/{2}".format(ip, self.want.route_domain, self.have.netmask)
elif self.have.route_domain is not None:
want = "{0}%{1}/{2}".format(ip, self.have.route_domain, self.want.netmask)
have = "{0}%{1}/{2}".format(ip, self.have.route_domain, self.have.netmask)
else:
want = "{0}/{1}".format(ip, self.want.netmask)
have = "{0}/{1}".format(ip, self.have.netmask)
if want != have:
return want
else:
raise F5ModuleError(
'The provided address/netmask value "{0}" was invalid'.format(self.have.ip)
)
@property
def traffic_group(self):
if self.want.traffic_group != self.have.traffic_group:
return self.want.traffic_group
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = ApiParameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if k in ['netmask']:
changed['address'] = change
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
changed = self.update()
else:
changed = self.create()
return changed
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the Self IP")
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def create(self):
if self.want.address is None or self.want.netmask is None:
raise F5ModuleError(
'An address and a netmask must be specified'
)
if self.want.vlan is None:
raise F5ModuleError(
'A VLAN name must be specified'
)
if self.want.route_domain is None:
rd = self.read_partition_default_route_domain_from_device()
self.want.update({'route_domain': rd})
if self.want.traffic_group is None:
self.want.update({'traffic_group': '/Common/traffic-group-local-only'})
if self.want.route_domain is None:
self.want.update({'route_domain': 0})
if self.want.allow_service:
if 'all' in self.want.allow_service:
self.want.update(dict(allow_service=['all']))
elif 'none' in self.want.allow_service:
self.want.update(dict(allow_service=[]))
elif 'default' in self.want.allow_service:
self.want.update(dict(allow_service=['default']))
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the Self IP")
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/net/self/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/net/self/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/net/self/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/net/self/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/net/self/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def read_partition_default_route_domain_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/partition/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.partition
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return int(response['defaultRouteDomain'])
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
address=dict(),
allow_service=dict(type='list'),
name=dict(required=True),
netmask=dict(),
traffic_group=dict(),
vlan=dict(),
route_domain=dict(type='int'),
description=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
f3r/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 159 | 2951 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='darkorange', lw=2)
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='darkorange', lw=2)
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
promptworks/horizon | horizon/utils/filters.py | 86 | 2017 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from django.template.defaultfilters import register # noqa
from django.template.defaultfilters import timesince # noqa
from django.utils.safestring import mark_safe
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
@register.filter
def replace_underscores(string):
return string.replace("_", " ")
@register.filter
def parse_isotime(timestr, default=None):
"""This duplicates oslo timeutils parse_isotime but with a
@register.filter annotation and a silent fallback on error.
"""
try:
return iso8601.parse_date(timestr)
except (iso8601.ParseError, TypeError):
return default or ''
@register.filter
def timesince_or_never(dt, default=None):
"""Call the Django ``timesince`` filter, but return the string
*default* if *dt* is not a valid ``date`` or ``datetime`` object.
When *default* is None, "Never" is returned.
"""
if default is None:
default = _("Never")
if isinstance(dt, datetime.date):
return timesince(dt)
else:
return default
@register.filter
def timesince_sortable(dt):
delta = timezone.now() - dt
# timedelta.total_seconds() not supported on python < 2.7
seconds = delta.seconds + (delta.days * 24 * 3600)
return mark_safe("<span data-seconds=\"%d\">%s</span>" %
(seconds, timesince(dt)))
| apache-2.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/IPython/core/magics/display.py | 8 | 2127 | """Simple magics for display formats"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from IPython.core.display import display, Javascript, Latex, SVG, HTML
from IPython.core.magic import (
Magics, magics_class, cell_magic
)
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
@magics_class
class DisplayMagics(Magics):
"""Magics for displaying various output types with literals
Defines javascript/latex/svg/html cell magics for writing
blocks in those languages, to be rendered in the frontend.
"""
@cell_magic
def js(self, line, cell):
"""Run the cell block of Javascript code
Alias of `%%javascript`
"""
self.javascript(line, cell)
@cell_magic
def javascript(self, line, cell):
"""Run the cell block of Javascript code"""
display(Javascript(cell))
@cell_magic
def latex(self, line, cell):
"""Render the cell as a block of latex
The subset of latex which is support depends on the implementation in
the client. In the Jupyter Notebook, this magic only renders the subset
of latex defined by MathJax
[here](https://docs.mathjax.org/en/v2.5-latest/tex.html)."""
display(Latex(cell))
@cell_magic
def svg(self, line, cell):
"""Render the cell as an SVG literal"""
display(SVG(cell))
@cell_magic
def html(self, line, cell):
"""Render the cell as a block of HTML"""
display(HTML(cell))
| gpl-3.0 |
f123h456/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2018_05_01/models/_resource_management_client_enums.py | 5 | 2298 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class DeploymentMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The mode that is used to deploy resources. This value can be either Incremental or Complete. In
Incremental mode, resources are deployed without deleting existing resources that are not
included in the template. In Complete mode, resources are deployed and existing resources in
the resource group that are not included in the template are deleted. Be careful when using
Complete mode as you may unintentionally delete resources.
"""
INCREMENTAL = "Incremental"
COMPLETE = "Complete"
class OnErrorDeploymentType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The deployment on error behavior type. Possible values are LastSuccessful and
SpecificDeployment.
"""
LAST_SUCCESSFUL = "LastSuccessful"
SPECIFIC_DEPLOYMENT = "SpecificDeployment"
class ResourceIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
| mit |
ThiefMaster/indico | indico/modules/events/notes/models/notes.py | 3 | 8477 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from functools import partial
from flask import g
from sqlalchemy.event import listen, listens_for
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import joinedload
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum, UTCDateTime
from indico.core.db.sqlalchemy.descriptions import RenderMode
from indico.core.db.sqlalchemy.links import LinkMixin, LinkType
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.util.date_time import now_utc
from indico.util.locators import locator_property
from indico.util.string import render_markdown, text_to_repr
class EventNote(LinkMixin, db.Model):
__tablename__ = 'notes'
allowed_link_types = LinkMixin.allowed_link_types - {LinkType.category, LinkType.session_block}
unique_links = True
events_backref_name = 'all_notes'
link_backref_name = 'note'
@declared_attr
def __table_args__(cls):
return auto_table_args(cls, schema='events')
#: The ID of the note
id = db.Column(
db.Integer,
primary_key=True
)
#: If the note has been deleted
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: The rendered HTML of the note
html = db.Column(
db.Text,
nullable=False
)
#: The ID of the current revision
current_revision_id = db.Column(
db.Integer,
db.ForeignKey('events.note_revisions.id', use_alter=True),
nullable=True # needed for post_update :(
)
#: The list of all revisions for the note
revisions = db.relationship(
'EventNoteRevision',
primaryjoin=lambda: EventNote.id == EventNoteRevision.note_id,
foreign_keys=lambda: EventNoteRevision.note_id,
lazy=True,
cascade='all, delete-orphan',
order_by=lambda: EventNoteRevision.created_dt.desc(),
backref=db.backref(
'note',
lazy=False
)
)
#: The currently active revision of the note
current_revision = db.relationship(
'EventNoteRevision',
primaryjoin=lambda: EventNote.current_revision_id == EventNoteRevision.id,
foreign_keys=current_revision_id,
lazy=True,
post_update=True
)
@locator_property
def locator(self):
return self.object.locator
@classmethod
def get_for_linked_object(cls, linked_object, preload_event=True):
"""Get the note for the given object.
This only returns a note that hasn't been deleted.
:param linked_object: An event, session, contribution or
subcontribution.
:param preload_event: If all notes for the same event should
be pre-loaded and cached in the app
context.
"""
event = linked_object.event
try:
return g.event_notes[event].get(linked_object)
except (AttributeError, KeyError):
if not preload_event:
return linked_object.note if linked_object.note and not linked_object.note.is_deleted else None
if 'event_notes' not in g:
g.event_notes = {}
query = (event.all_notes
.filter_by(is_deleted=False)
.options(joinedload(EventNote.linked_event),
joinedload(EventNote.session),
joinedload(EventNote.contribution),
joinedload(EventNote.subcontribution)))
g.event_notes[event] = {n.object: n for n in query}
return g.event_notes[event].get(linked_object)
@classmethod
def get_or_create(cls, linked_object):
"""Get the note for the given object or creates a new one.
If there is an existing note for the object, it will be returned
even. Otherwise a new note is created.
"""
note = cls.query.filter_by(object=linked_object).first()
if note is None:
note = cls(object=linked_object)
return note
def delete(self, user):
"""Mark the note as deleted and adds a new empty revision."""
self.create_revision(self.current_revision.render_mode, '', user)
self.is_deleted = True
def create_revision(self, render_mode, source, user):
"""Create a new revision if needed and marks it as undeleted if it was.
Any change to the render mode or the source causes a new
revision to be created. The user is not taken into account
since a user "modifying" a note without changing things is
not really a change.
"""
self.is_deleted = False
with db.session.no_autoflush:
current = self.current_revision
if current is not None and current.render_mode == render_mode and current.source == source:
return current
self.current_revision = EventNoteRevision(render_mode=render_mode, source=source, user=user)
return self.current_revision
def __repr__(self):
return '<EventNote({}, current_revision={}{}, {})>'.format(
self.id,
self.current_revision_id,
', is_deleted=True' if self.is_deleted else '',
self.link_repr
)
class EventNoteRevision(db.Model):
__tablename__ = 'note_revisions'
__table_args__ = {'schema': 'events'}
#: The ID of the revision
id = db.Column(
db.Integer,
primary_key=True
)
#: The ID of the associated note
note_id = db.Column(
db.Integer,
db.ForeignKey('events.notes.id'),
nullable=False,
index=True
)
#: The user who created the revision
user_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
nullable=False,
index=True
)
#: The date/time when the revision was created
created_dt = db.Column(
UTCDateTime,
nullable=False,
default=now_utc
)
#: How the note is rendered
render_mode = db.Column(
PyIntEnum(RenderMode),
nullable=False
)
#: The raw source of the note as provided by the user
source = db.Column(
db.Text,
nullable=False
)
#: The rendered HTML of the note
html = db.Column(
db.Text,
nullable=False
)
#: The user who created the revision
user = db.relationship(
'User',
lazy=True,
backref=db.backref(
'event_notes_revisions',
lazy='dynamic'
)
)
# relationship backrefs:
# - note (EventNote.revisions)
def __repr__(self):
render_mode = self.render_mode.name if self.render_mode is not None else None
source = text_to_repr(self.source, html=True)
return '<EventNoteRevision({}, {}, {}, {}): "{}">'.format(self.id, self.note_id, render_mode, self.created_dt,
source)
@listens_for(EventNote.current_revision, 'set')
def _add_current_revision(target, value, *unused):
if value is None:
raise ValueError('current_revision cannot be set to None')
with db.session.no_autoflush:
target.revisions.append(value)
target.html = value.html
def _render_revision(attr, target, value, *unused):
source = value if attr == 'source' else target.source
render_mode = value if attr == 'render_mode' else target.render_mode
if source is None or render_mode is None:
return
if render_mode == RenderMode.html:
target.html = source
elif render_mode == RenderMode.markdown:
target.html = render_markdown(source, extensions=('nl2br',))
else: # pragma: no cover
raise ValueError(f'Invalid render mode: {render_mode}')
listen(EventNoteRevision.render_mode, 'set', partial(_render_revision, 'render_mode'))
listen(EventNoteRevision.source, 'set', partial(_render_revision, 'source'))
@listens_for(EventNoteRevision.html, 'set')
def _update_note_html(target, value, *unused):
if target.note_id is not None and target == target.note.current_revision:
target.note.html = value
EventNote.register_link_events()
| mit |
ccastell/Transfer-System | Website/env/lib/python3.5/site-packages/django/db/backends/oracle/schema.py | 58 | 5290 | import binascii
import copy
import datetime
import re
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.text import force_text
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_alter_column_null = "MODIFY %(column)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s NOT NULL"
sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s"
sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS"
def quote_value(self, value):
if isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
return "'%s'" % value
elif isinstance(value, six.string_types):
return "'%s'" % six.text_type(value).replace("\'", "\'\'")
elif isinstance(value, six.buffer_types):
return "'%s'" % force_text(binascii.hexlify(value))
elif isinstance(value, bool):
return "1" if value else "0"
else:
return str(value)
def delete_model(self, model):
# Run superclass action
super(DatabaseSchemaEditor, self).delete_model(model)
# Clean up any autoincrement trigger
self.execute("""
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(1) INTO i FROM USER_SEQUENCES
WHERE SEQUENCE_NAME = '%(sq_name)s';
IF i = 1 THEN
EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % {'sq_name': self.connection.ops._get_sequence_name(model._meta.db_table)})
def alter_field(self, model, old_field, new_field, strict=False):
try:
super(DatabaseSchemaEditor, self).alter_field(model, old_field, new_field, strict)
except DatabaseError as e:
description = str(e)
# If we're changing type to an unsupported type we need a
# SQLite-ish workaround
if 'ORA-22858' in description or 'ORA-22859' in description:
self._alter_field_type_workaround(model, old_field, new_field)
else:
raise
def _alter_field_type_workaround(self, model, old_field, new_field):
"""
Oracle refuses to change from some type to other type.
What we need to do instead is:
- Add a nullable version of the desired field with a temporary name
- Update the table to transfer values from old to new
- Drop old column
- Rename the new column and possibly drop the nullable property
"""
# Make a new field that's like the new one but with a temporary
# column name.
new_temp_field = copy.deepcopy(new_field)
new_temp_field.null = True
new_temp_field.column = self._generate_temp_name(new_field.column)
# Add it
self.add_field(model, new_temp_field)
# Explicit data type conversion
# https://docs.oracle.com/cd/B19306_01/server.102/b14200/sql_elements002.htm#sthref340
new_value = self.quote_name(old_field.column)
old_type = old_field.db_type(self.connection)
if re.match('^N?CLOB', old_type):
new_value = "TO_CHAR(%s)" % new_value
old_type = 'VARCHAR2'
if re.match('^N?VARCHAR2', old_type):
new_internal_type = new_field.get_internal_type()
if new_internal_type == 'DateField':
new_value = "TO_DATE(%s, 'YYYY-MM-DD')" % new_value
elif new_internal_type == 'DateTimeField':
new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
elif new_internal_type == 'TimeField':
# TimeField are stored as TIMESTAMP with a 1900-01-01 date part.
new_value = "TO_TIMESTAMP(CONCAT('1900-01-01 ', %s), 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
# Transfer values across
self.execute("UPDATE %s set %s=%s" % (
self.quote_name(model._meta.db_table),
self.quote_name(new_temp_field.column),
new_value,
))
# Drop the old field
self.remove_field(model, old_field)
# Rename and possibly make the new field NOT NULL
super(DatabaseSchemaEditor, self).alter_field(model, new_temp_field, new_field)
def normalize_name(self, name):
"""
Get the properly shortened and uppercased identifier as returned by
quote_name(), but without the actual quotes.
"""
nn = self.quote_name(name)
if nn[0] == '"' and nn[-1] == '"':
nn = nn[1:-1]
return nn
def _generate_temp_name(self, for_name):
"""
Generates temporary names for workarounds that need temp columns
"""
suffix = hex(hash(for_name)).upper()[1:]
return self.normalize_name(for_name + "_" + suffix)
def prepare_default(self, value):
return self.quote_value(value)
| apache-2.0 |
himleyb85/django | tests/servers/test_basehttp.py | 7 | 4388 | import logging
from io import BytesIO
from django.core.handlers.wsgi import WSGIRequest
from django.core.servers.basehttp import WSGIRequestHandler
from django.test import SimpleTestCase
from django.test.client import RequestFactory
from django.test.utils import captured_stderr, patch_logger
class Stub(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class WSGIRequestHandlerTestCase(SimpleTestCase):
def test_log_message(self):
# Silence the django.server logger by replacing its StreamHandler with
# NullHandler.
logger = logging.getLogger('django.server')
original_handlers = logger.handlers
logger.handlers = [logging.NullHandler()]
try:
request = WSGIRequest(RequestFactory().get('/').environ)
request.makefile = lambda *args, **kwargs: BytesIO()
handler = WSGIRequestHandler(request, '192.168.0.2', None)
level_status_codes = {
'info': [200, 301, 304],
'warning': [400, 403, 404],
'error': [500, 503],
}
def _log_level_code(level, status_code):
with patch_logger('django.server', level) as messages:
handler.log_message('GET %s %s', 'A', str(status_code))
return messages
for level, status_codes in level_status_codes.items():
for status_code in status_codes:
# The correct level gets the message.
messages = _log_level_code(level, status_code)
self.assertIn('GET A %d' % status_code, messages[0])
# Incorrect levels shouldn't have any messages.
for wrong_level in level_status_codes.keys():
if wrong_level != level:
messages = _log_level_code(wrong_level, status_code)
self.assertEqual(len(messages), 0)
finally:
logger.handlers = original_handlers
def test_https(self):
request = WSGIRequest(RequestFactory().get('/').environ)
request.makefile = lambda *args, **kwargs: BytesIO()
handler = WSGIRequestHandler(request, '192.168.0.2', None)
with patch_logger('django.server', 'error') as messages:
handler.log_message("GET %s %s", str('\x16\x03'), "4")
self.assertIn(
"You're accessing the development server over HTTPS, "
"but it only supports HTTP.",
messages[0]
)
def test_strips_underscore_headers(self):
"""WSGIRequestHandler ignores headers containing underscores.
This follows the lead of nginx and Apache 2.4, and is to avoid
ambiguity between dashes and underscores in mapping to WSGI environ,
which can have security implications.
"""
def test_app(environ, start_response):
"""A WSGI app that just reflects its HTTP environ."""
start_response('200 OK', [])
http_environ_items = sorted(
'%s:%s' % (k, v) for k, v in environ.items()
if k.startswith('HTTP_')
)
yield (','.join(http_environ_items)).encode('utf-8')
rfile = BytesIO()
rfile.write(b"GET / HTTP/1.0\r\n")
rfile.write(b"Some-Header: good\r\n")
rfile.write(b"Some_Header: bad\r\n")
rfile.write(b"Other_Header: bad\r\n")
rfile.seek(0)
# WSGIRequestHandler closes the output file; we need to make this a
# no-op so we can still read its contents.
class UnclosableBytesIO(BytesIO):
def close(self):
pass
wfile = UnclosableBytesIO()
def makefile(mode, *a, **kw):
if mode == 'rb':
return rfile
elif mode == 'wb':
return wfile
request = Stub(makefile=makefile)
server = Stub(base_environ={}, get_app=lambda: test_app)
# We don't need to check stderr, but we don't want it in test output
with captured_stderr():
# instantiating a handler runs the request as side effect
WSGIRequestHandler(request, '192.168.0.2', server)
wfile.seek(0)
body = list(wfile.readlines())[-1]
self.assertEqual(body, b'HTTP_SOME_HEADER:good')
| bsd-3-clause |
Danielhiversen/home-assistant | homeassistant/components/alarm_control_panel/verisure.py | 4 | 3042 | """
Interfaces with Verisure alarm control panel.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel.verisure/
"""
import logging
from time import sleep
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.verisure import CONF_ALARM, CONF_CODE_DIGITS
from homeassistant.components.verisure import HUB as hub
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED,
STATE_UNKNOWN)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Verisure platform."""
alarms = []
if int(hub.config.get(CONF_ALARM, 1)):
hub.update_overview()
alarms.append(VerisureAlarm())
add_entities(alarms)
def set_arm_state(state, code=None):
"""Send set arm state command."""
transaction_id = hub.session.set_arm_state(code, state)[
'armStateChangeTransactionId']
_LOGGER.info('verisure set arm state %s', state)
transaction = {}
while 'result' not in transaction:
sleep(0.5)
transaction = hub.session.get_arm_state_transaction(transaction_id)
# pylint: disable=unexpected-keyword-arg
hub.update_overview(no_throttle=True)
class VerisureAlarm(alarm.AlarmControlPanel):
"""Representation of a Verisure alarm status."""
def __init__(self):
"""Initialize the Verisure alarm panel."""
self._state = STATE_UNKNOWN
self._digits = hub.config.get(CONF_CODE_DIGITS)
self._changed_by = None
@property
def name(self):
"""Return the name of the device."""
return '{} alarm'.format(hub.session.installations[0]['alias'])
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def code_format(self):
"""Return one or more digits/characters."""
return 'Number'
@property
def changed_by(self):
"""Return the last change triggered by."""
return self._changed_by
def update(self):
"""Update alarm status."""
hub.update_overview()
status = hub.get_first("$.armState.statusType")
if status == 'DISARMED':
self._state = STATE_ALARM_DISARMED
elif status == 'ARMED_HOME':
self._state = STATE_ALARM_ARMED_HOME
elif status == 'ARMED_AWAY':
self._state = STATE_ALARM_ARMED_AWAY
elif status != 'PENDING':
_LOGGER.error('Unknown alarm state %s', status)
self._changed_by = hub.get_first("$.armState.name")
def alarm_disarm(self, code=None):
"""Send disarm command."""
set_arm_state('DISARMED', code)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
set_arm_state('ARMED_HOME', code)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
set_arm_state('ARMED_AWAY', code)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.