gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import inspect
import utils
from activations import Identity
from baselayers import AbsLayer
from convolution import ConvLayer, DeConvLayer
from scanlayers import ScanConvLSTM, ScanLSTM
from simple import FullyConnectedLayer, ZeroLayer
class RecurrentLayer(AbsLayer):
"""
A reccurent layer consists of two applications of somewhat independant
layers: one that does an application like a feedforward, and the other
that does the application through time.
A rnn can also be used in mostly two fashion. It is either fed its own
output for the next time step or it computes a whole sequence. In case
1), we only need one theano scan which is outside what is actually just
a normal FeedForwardNetwork. In case 2), every single instance of an
rnn needs to have its own theano scan.
This class, with ScanLayer class, is intended to handle all these cases.
NOTE: It should be possible to use a non scanlayer for the time application,
in this case if no step is implemented, this class will call the fprop
of that layer.
NEW: Can now use without an upwardlayer defined.
"""
def __init__(self, scanlayer, upwardlayer=None, mode='auto', time_collapse=True):
assert mode in ['scan', 'out2in', 'auto']
self.mode = mode
self.scanlayer = scanlayer
self.upwardlayer = ZeroLayer(scanlayer.spatial_input_dims) \
if upwardlayer is None else upwardlayer
self.time_collapse = False \
if isinstance(upwardlayer, ZeroLayer) else time_collapse
@property
def prefix(self):
return self._prefix
@prefix.setter
def prefix(self, value):
self.upwardlayer.prefix = value
self.scanlayer.prefix = value
self._prefix = value
@property
def params(self):
return self.upwardlayer.params + self.scanlayer.params
@property
def input_dims(self):
return self.upwardlayer.input_dims
@property
def output_dims(self):
return self.scanlayer.output_dims
@property
def accepted_kwargs_fprop(self):
kwargs = super(RecurrentLayer, self).accepted_kwargs_fprop
kwargs.update(self.scanlayer.accepted_kwargs_fprop)
kwargs.update(self.upwardlayer.accepted_kwargs_fprop)
return kwargs
@property
def outputs_info(self):
return self.scanlayer.outputs_info
def get_outputs_info(self, *args):
return self.scanlayer.get_outputs_info(*args)
def set_attributes(self, attributes):
self.upwardlayer.set_attributes(attributes)
self.scanlayer.set_attributes(attributes)
def initialize(self, dims):
self.upwardlayer.initialize(dims)
self.scanlayer.initialize(self.upwardlayer.output_dims)
def set_io_dims(self, tup):
print "----------------> hallo? <-----------------"
self.upwardlayer.set_io_dims(tup)
self.scanlayer.set_io_dims(self.upwardlayer.output_dims)
def fprop(self, x=None, outputs_info=None, **kwargs):
"""
This fprop should deal with various setups. if x.ndim == 5 it is
pretty easy, every individual fprop of the rnn should handle this
case easily since the fprop through time has its own time
implementation.
if x.ndim == 4 now it gets funky. Are we inside a for loop or a
inside a theano scan?
Since a for loop is easier, lets consider the scan case and the for
loop user shall adapt. In this case kwargs should contain outputs_info
which IN THE SAME ORDER should correspond
to the reccurent state that the scanlayer.step is using.
"""
if x is None or isinstance(self.upwardlayer, ZeroLayer):
in_up = None
mode = 'out2in'
else:
# logic here is that if x.ndim is 2 or 4, x is in bc or bc01
# for 3 or 5 x is in tbc or tbc01. When t is here, you want to
# scan on the whole thing.
if self.mode == 'auto':
mode = 'scan' if x.ndim in [3, 5] else 'out2in'
else:
mode = self.mode
if x.ndim in [2, 4]:
assert mode == 'out2in'
if self.time_collapse and mode == 'scan':
# collapse batch and time together
in_up, xshp = utils.collapse_time_on_batch(x)
else:
in_up = x
# forward pass
h = self.upwardlayer.fprop(in_up, **kwargs)
# sketchy but not sure how to workaround
# scan step function doesnt accept keywords
self.scanlayer.deterministic = kwargs.pop('deterministic', False)
if mode == 'out2in':
if not hasattr(self.scanlayer, 'step'):
# hmm maybe this can work?
return self.scanlayer.fprop(h)
# the outputs_info of the outside scan should contain the reccurent state
if outputs_info is None:
raise RuntimeError(
"There should be an outputs_info in fprop of "+self.prefix)
# parse the format correctly
outputs_info = list(outputs_info) if (isinstance(outputs_info, list)\
or isinstance(outputs_info, tuple)) else [outputs_info]
# this calls modify outputs info in the dict, but it should be fine
self.scanlayer.before_scan(h, axis=0, outputs_info=outputs_info)
args = tuple(self.scanlayer.scan_namespace['sequences'] + \
self.scanlayer.scan_namespace['outputs_info'] + \
self.scanlayer.scan_namespace['non_sequences'])
scanout = self.scanlayer.step(*args)
y = self.scanlayer.after_scan(scanout[0], scanout[1])
elif mode == 'scan':
kwargs.update({'outputs_info': outputs_info})
if self.time_collapse:
# reshape to org tensor ndim
h = utils.expand_time_from_batch(h, xshp)
y = self.scanlayer.apply(h, **kwargs)
return y
#TODO: Intercept Initilization keyword properly
# All the classes below are intended to add sugar and define default behaviors.
class TypicalReccurentLayer(RecurrentLayer):
"""
A typical rnn doesn't have two biases applications and applies batch norm
only in the time computation phase. Add more as it goes!
Don't worry about kwargs having things like batch_norm=True, it won't conflict
with RecurrentLayer as this class dosen't pass kwargs to RecurrentLayer.
"""
def __init__(self, *args, **kwargs):
mode = kwargs.pop('mode', 'auto')
time_collapse = kwargs.pop('time_collapse', True)
super(TypicalReccurentLayer, self).__init__(*args, mode=mode, time_collapse=time_collapse)
self.upwardlayer.use_bias = False
self.upwardlayer.batch_norm = False
self.upwardlayer.activation = Identity()
if isinstance(self.upwardlayer, ZeroLayer):
self.scanlayer.batch_norm_on_x = False
self.scanlayer.batch_norm = False
def popkwargs(self, upwardlayer, kwargs):
"""
A typical rnn do not want the kwargs SPECIFIC to the upwardlayer
to go into the constructor of the scanlayer. Everything else
for the whole rnn / both layers, will go through.
"""
if not hasattr(upwardlayer, '__init__'):
return kwargs
kwargs_upwardlayer = inspect.getargspec(upwardlayer.__init__)
for arg in kwargs_upwardlayer.args:
if kwargs.has_key(arg):
kwargs.pop(arg)
return kwargs
# generic LSTMs don't change dimensions in the reccurence
class LSTM(TypicalReccurentLayer):
"""
Generic LSTM class
REMINDER: Take care with those * 4
"""
def __init__(self, output_dims, input_dims=None, upward='default', time='default', **kwargs):
output_dims = utils.parse_tuple(output_dims)
scan_spatial_input_dims = (output_dims[0]*4,)+output_dims[1:]
if upward == 'default':
upward = FullyConnectedLayer(output_dims=scan_spatial_input_dims,
input_dims=input_dims, **kwargs)
# there is no kwargs proper to a fully
#kwargs = self.popkwargs(upward, kwargs)
if time =='default':
time = ScanLSTM(output_dims=output_dims, input_dims=output_dims,
spatial_input_dims=scan_spatial_input_dims, **kwargs)
super(LSTM, self).__init__(time, upward, **kwargs)
class ConvLSTM(TypicalReccurentLayer):
"""
Generic ConvLSTM class
REMINDER: Take care with those * 4
"""
def __init__(self, filter_size, num_filters,
time_filter_size=None, time_num_filters=None,
convupward='conv', convtime='conv', **kwargs):
if time_filter_size is None:
time_filter_size = utils.parse_tuple(filter_size, 2)
else:
time_filter_size = utils.parse_tuple(time_filter_size, 2)
# the time application doesnt change the dimensions
# it is achieved through filter_size of odd shape with half padding
assert time_filter_size[0] % 2 == 1
if time_num_filters is None:
time_num_filters = num_filters
scan_spatial_input_dims = num_filters * 4
if convupward == 'conv':
convupward = ConvLayer(filter_size, scan_spatial_input_dims, **kwargs)
elif convupward == 'deconv':
convupward = DeConvLayer(filter_size, scan_spatial_input_dims, **kwargs)
kwargs = self.popkwargs(convupward, kwargs)
if convtime == 'conv':
convtime = ScanConvLSTM(time_filter_size, time_num_filters,
num_channels=num_filters,
spatial_input_dims=scan_spatial_input_dims, **kwargs)
super(ConvLSTM, self).__init__(convtime, convupward, **kwargs)
if __name__ == '__main__':
import theano
import theano.tensor as T
import numpy as np
from network import Feedforward
from activations import LeakyRectifier
from initializations import Initialization, Gaussian
config = {
'batch_norm' : True,
'use_bias' : True,
'gamma_scale' : 1.,
'activation' : LeakyRectifier(leak=0.4),
'initialization' : Initialization({'W' : Gaussian(std=0.05)}),
}
theano.config.compute_test_value = 'warn'
xnp = np.random.random((10,50,3,1,1)).astype(np.float32)
ftensor5 = T.TensorType('float32', (False,)*5)
x = ftensor5('x')
x.tag.test_value = xnp
layers = [
LSTM(output_dims=100, input_dims=(3,1,1))
]
ff = Feedforward(layers, 'lstm', **config)
ff.initialize()
y = ff.fprop(x)
#cost = y[-1].mean()
f = theano.function(inputs=[x], outputs=[y])
out = f(xnp)
print out[0].shape
|
|
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
ElasticNetCV)
def test_sparse_coef():
""" Check that the sparse_coef propery works """
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.todense().tolist()[0], clf.coef_)
def test_normalize_option():
""" Check that the normalize option in enet works """
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
"""Check that the sparse lasso can handle zero data without crashing"""
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
"""Test ElasticNet for various values of alpha and l1_ratio with list X"""
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
with warnings.catch_warnings(record=True):
# catch warning about alpha=0.
# this is discouraged but should work.
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
"""Test ElasticNet for various values of alpha and l1_ratio with sparse
X"""
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples / 2:], X[:n_samples / 2]
y_train, y_test = y[n_samples / 2:], y[:n_samples / 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.todense(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
# check that warm restart leads to the same result with
# sparse and dense versions
rng = np.random.RandomState(seed=0)
coef_init = rng.randn(n_features)
d_clf.fit(X_train.todense(), y_train, coef_init=coef_init)
s_clf.fit(X_train, y_train, coef_init=coef_init)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples / 2:], X[:n_samples / 2]
y_train, y_test = y[n_samples / 2:], y[:n_samples / 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.todense(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap, eps = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_, estimator.eps_)
for k in xrange(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
assert_array_almost_equal(eps[k], estimator.eps_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
|
|
import numpy as np
import scipy as sp
from scipy import stats
import chippr
from chippr import defaults as d
from chippr import utils as u
def mean(population):
"""
Calculates the mean of a population
Parameters
----------
population: np.array, float
population over which to calculate the mean
Returns
-------
mean: np.array, float
mean value over population
"""
shape = np.shape(population)
flat = population.reshape(np.prod(shape[:-1]), shape[-1])
mean = np.mean(flat, axis=0)
return mean
def norm_fit(population):
"""
Calculates the mean and standard deviation of a population
Parameters
----------
population: np.array, float
population over which to calculate the mean
Returns
-------
norm_stats: tuple, list, float
mean and standard deviation over population
"""
shape = np.shape(population)
flat = population.reshape(np.prod(shape[:-1]), shape[-1]).T
locs, scales = [], []
for k in range(shape[-1]):
(loc, scale) = sp.stats.norm.fit_loc_scale(flat[k])
locs.append(loc)
scales.append(scale)
locs = np.array(locs)
scales = np.array(scales)
norm_stats = (locs, scales)
return norm_stats
def calculate_kld(pe, qe, vb=True):
"""
Calculates the Kullback-Leibler Divergence between two PDFs.
Parameters
----------
pe: numpy.ndarray, float
probability distribution evaluated on a grid whose distance from `q`
will be calculated.
qe: numpy.ndarray, float
probability distribution evaluated on a grid whose distance to `p` will
be calculated.
vb: boolean
report on progress to stdout?
Returns
-------
Dpq: float
the value of the Kullback-Leibler Divergence from `q` to `p`
"""
# Normalize the evaluations, so that the integrals can be done
# (very approximately!) by simple summation:
pn = pe / np.sum(pe)
qn = qe / np.sum(qe)
# Compute the log of the normalized PDFs
logp = u.safe_log(pn)
logq = u.safe_log(qn)
# Calculate the KLD from q to p
Dpq = np.sum(pn * (logp - logq))
return Dpq
def calculate_rms(pe, qe, vb=True):
"""
Calculates the Root Mean Square Error between two PDFs.
Parameters
----------
pe: numpy.ndarray, float
probability distribution evaluated on a grid whose distance _from_ `q`
will be calculated.
qe: numpy.ndarray, float
probability distribution evaluated on a grid whose distance _to_ `p`
will be calculated.
vb: boolean
report on progress to stdout?
Returns
-------
rms: float
the value of the RMS error between `q` and `p`
"""
npoints = len(pe)
assert len(pe) == len(qe)
# Calculate the RMS between p and q
rms = np.sqrt(np.sum((pe - qe) ** 2) / npoints)
return rms
def single_parameter_gr_stat(chain):
"""
Calculates the Gelman-Rubin test statistic of convergence of an MCMC chain
over one parameter
Parameters
----------
chain: numpy.ndarray, float
single-parameter chain
Returns
-------
R_hat: float
potential scale reduction factor
"""
ssq = np.var(chain, axis=1, ddof=1)
W = np.mean(ssq, axis=0)
xb = np.mean(chain, axis=1)
xbb = np.mean(xb, axis=0)
m = chain.shape[0]
n = chain.shape[1]
B = n / (m - 1.) * np.sum((xbb - xb)**2., axis=0)
var_x = (n - 1.) / n * W + 1. / n * B
R_hat = np.sqrt(var_x / W)
return R_hat
def multi_parameter_gr_stat(sample):
"""
Calculates the Gelman-Rubin test statistic of convergence of an MCMC chain
over multiple parameters
Parameters
----------
sample: numpy.ndarray, float
multi-parameter chain output
Returns
-------
Rs: numpy.ndarray, float
vector of the potential scale reduction factors
"""
dims = np.shape(sample)
(n_walkers, n_iterations, n_params) = dims
n_burn_ins = n_iterations / 2
chain_ensemble = np.swapaxes(sample, 0, 1)
chain_ensemble = chain_ensemble[n_burn_ins:, :]
Rs = np.zeros((n_params))
for i in range(n_params):
chains = chain_ensemble[:, :, i].T
Rs[i] = single_parameter_gr_stat(chains)
return Rs
def gr_test(sample, threshold=d.gr_threshold):
"""
Performs the Gelman-Rubin test of convergence of an MCMC chain
Parameters
----------
sample: numpy.ndarray, float
chain output
threshold: float, optional
Gelman-Rubin test statistic criterion (usually around 1)
Returns
-------
test_result: boolean
True if burning in, False if post-burn in
"""
gr = multi_parameter_gr_stat(sample)
print('Gelman-Rubin test statistic = '+str(gr))
test_result = np.max(gr) > threshold
return test_result
def cft(xtimes, lag):#xtimes has ntimes elements
"""
Helper function to calculate autocorrelation time for chain of MCMC samples
Parameters
----------
xtimes: numpy.ndarray, float
single parameter values for a single walker over all iterations
lag: int
maximum lag time in number of iterations
Returns
-------
ans: numpy.ndarray, float
autocorrelation time for one time lag for one parameter of one walker
"""
lent = len(xtimes) - lag
allt = xrange(lent)
ans = np.array([xtimes[t+lag] * xtimes[t] for t in allt])
return ans
def cf(xtimes):#xtimes has ntimes elements
"""
Helper function to calculate autocorrelation time for chain of MCMC samples
Parameters
----------
xtimes: numpy.ndarray, float
single parameter values for a single walker over all iterations
Returns
-------
cf: numpy.ndarray, float
autocorrelation time over all time lags for one parameter of one walker
"""
cf0 = np.dot(xtimes, xtimes)
allt = xrange(len(xtimes) / 2)
cf = np.array([sum(cft(xtimes,lag)[len(xtimes) / 2:]) for lag in allt]) / cf0
return cf
def cfs(x, mode):#xbinstimes has nbins by ntimes elements
"""
Helper function for calculating autocorrelation time for MCMC chains
Parameters
----------
x: numpy.ndarray, float
input parameter values of length number of iterations by number of
walkers if mode='walkers' or dimension of parameters if mode='bins'
mode: string
'bins' for one autocorrelation time per parameter, 'walkers' for one
autocorrelation time per walker
Returns
-------
cfs: numpy.ndarray, float
autocorrelation times for all walkers if mode='walkers' or all
parameters if mode='bins'
"""
if mode == 'walkers':
xbinstimes = x
cfs = np.array([sum(cf(xtimes)) for xtimes in xbinstimes]) / len(xbinstimes)
if mode == 'bins':
xwalkerstimes = x
cfs = np.array([sum(cf(xtimes)) for xtimes in xwalkerstimes]) / len(xwalkerstimes)
return cfs
def acors(xtimeswalkersbins, mode='bins'):
"""
Calculates autocorrelation time for MCMC chains
Parameters
----------
xtimeswalkersbins: numpy.ndarray, float
emcee chain values of dimensions (n_iterations, n_walkers, n_parameters)
mode: string, optional
'bins' for one autocorrelation time per parameter, 'walkers' for one
autocorrelation time per walker
Returns
-------
taus: numpy.ndarray, float
autocorrelation times by bin or by walker depending on mode
"""
if mode == 'walkers':
xwalkersbinstimes = np.swapaxes(xtimeswalkersbins, 1, 2)#nwalkers by nbins by nsteps
taus = np.array([1. + 2. * sum(cfs(xbinstimes, mode)) for xbinstimes in xwalkersbinstimes])#/len(xwalkersbinstimes)# 1+2*sum(...)
if mode == 'bins':
xbinswalkerstimes = xtimeswalkersbins.T#nbins by nwalkers by nsteps
taus = np.array([1. + 2. * sum(cfs(xwalkerstimes, mode)) for xwalkerstimes in xbinswalkerstimes])#/len(xwalkersbinstimes)# 1+2*sum(...)
return taus
|
|
#Copyright (c) 2014, Ben Goodrich
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.#
#
#2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
#THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import select_funcs as sf
class layer(object):
def __init__(self,node_count,activation='squash',step_size=None,dropout=None,
momentum=None,maxnorm=None,use_float32=False,
select_func=None,initialization_scheme=None,nodes_per_group=None,
initialization_constant=None,sparse_penalty=None,sparse_target=None,
rms_prop_rate=None):
self.node_count = node_count
self.activation = activation
self.step_size = step_size
#tells the percentage of neurons to keep active
self.dropout = dropout
self.maxnorm = maxnorm
self.momentum = momentum
self.rms_prop_rate = rms_prop_rate
#function used to select neurons
#used for local winner take all, maxout, or your own selection function (k sparse autoencoders?)
if(activation == 'lwta'):
self.select_func = sf.lwta_select_func
elif(activation == 'maxout'):
self.select_func = sf.maxout_select_func
else:
self.select_func = select_func
self.selected_neurons = None;
self.nodes_per_group = nodes_per_group
#parameters related to the weight initilization scheme
self.initialization_scheme = initialization_scheme;
self.initialization_constant = initialization_constant;
#parameters related to sparse auto-encoder based on KL-divergence
self.sparse_penalty = sparse_penalty
self.sparse_target = sparse_target
self.mean_estimate_count = None
self.use_float32 = use_float32
pass;
class net(object):
def __init__(self,layer,step_size=None,dropout=None):
#set up input and output node counts
#NOTE: a maxout layer can have more outputs than there are inputs on the layer after
#This is due to the grouping of nodes before they are passed to the next layer
for i in range(1,len(layer)):
#If previous layer was maxout, then nodes must be grouped to get inputs for this layer.
if(layer[i-1].activation == 'maxout'):
layer[i].node_count_input = layer[i-1].node_count/layer[i-1].nodes_per_group
else:
layer[i].node_count_input = layer[i-1].node_count
layer[i].node_count_output = layer[i].node_count
#Store layers, but don't store first layer since it is simply the input layer
self.layer = layer[1:len(layer)]
#we may want to be able to quickly loop over the layer
#and know the index
for i in range(len(self.layer)):
self.layer[i].index = i
for l in self.layer:
if(step_size is not None and l.step_size is None):
l.step_size = step_size;
if(dropout is not None):
l.dropout = dropout
self.layer[len(self.layer)-1].dropout = None
self.initialize_weights()
self.zero_gradients()
#init momentum, and rmsprop
for l in self.layer:
if(l.momentum is not None):
l.vel = np.zeros(l.weights.shape,dtype=l.weights.dtype)
if(l.rms_prop_rate is not None):
l.mean_square_avg = np.ones(l.weights.shape,dtype=l.weights.dtype)
self.epoch_size = 0
self.train = True
def initialize_weights(self):
for index,l in enumerate(self.layer):
if(l.initialization_scheme == 'krizhevsky'):
#taken from
#'ImageNet Classification with Deep Convolutional Neural Networks'
#Hinton et all
l.weights = np.random.normal(0.0,.01,[l.node_count_output+1, l.node_count_input+1])
l.weights[:,-1] = 1.0
elif(l.initialization_scheme == 'glorot'):
#taken from
#'Understanding the difficulty of training deep feedforward neural networks'
#Xavier Glorot, Yoshua Bengio
C = np.sqrt(6)/np.sqrt(l.node_count_output + l.node_count_input + 1)
if(l.initialization_constant is not None):
C = C*l.initialization_constant
l.weights = C*2*(np.random.random([l.node_count_output+1, l.node_count_input+1]) - 0.5)
#a large bias weight for LWTA and Maxout can make a unit win the max too often
#as described in "An Emperical Investigation of Catastrophic Forgetting in Neural Networks"
#We set bias weights to 0 for these types of nets
if(l.activation == 'lwta' or l.activation == 'maxout'):
l.weights[:,-1] = 0.0
elif(l.initialization_scheme == 'scawi'):
#taken from
#Statistically Controlled Weight Initialization (SCAWI)
#Gian Paolo Drago and Sandro Ridella
#there is a slight modification to the formula used for the
#first layer
if index == 0:
C = 1.3/np.sqrt(1 + (l.node_count_input+1)*0.5 )
else:
C = 1.3/np.sqrt(1 + (l.node_count_input+1)*0.3 )
#the bottom row is the weights for the bias neuron
# -- this neuron output is always set to 1.0 and these weights are essentially ignored
l.weights = C*2*(np.random.random([l.node_count_output+1, l.node_count_input+1]) - 0.5)
if(l.activation == 'lwta' or l.activation == 'maxout'):
l.weights[:,-1] = 0.0
elif(l.initialization_scheme == 'prelu'):
#taken from "Delving Deep into Rectifiers: Surpassing Human-Level Performance on
#ImageNet Classification"
if(index == 0):
std = np.sqrt(1./l.node_count_input)
else:
std = np.sqrt(2./l.node_count_input)
l.weights = np.random.normal(0.0,std,[l.node_count_output+1, l.node_count_input+1])
l.weights[:,-1] = 0.0
else:
#by default, use the method promoted in lecun's Effecient Backprop paper
C = 1/np.sqrt(l.node_count_input+1)
l.weights = C*2*(np.random.random([l.node_count_output+1, l.node_count_input+1]) - 0.5)
#a large bias weight for LWTA and Maxout can make a unit win the max too often
#as described in "An Emperical Investigation of Catastrophic Forgetting in Neural Networks"
#We set bias weights to 0 for these types of nets
if(l.activation == 'lwta' or l.activation == 'maxout'):
l.weights[:,-1] = 0.0
if(l.use_float32):
l.weights = np.asarray(l.weights,np.float32)
def zero_gradients(self):
for l in self.layer:
l.gradient = np.zeros(l.weights.shape,dtype=l.weights.dtype)
@property
def input(self):
return self._input
@input.setter
def input(self,value):
self._input = value
self._input = np.append(self._input,np.ones((1,self._input.shape[1]),dtype=value.dtype),axis=0)
@input.deleter
def input(self):
del self._input
def feed_forward(self,input=None):
#optionally allow passing input as an argument
if input is not None:
self.input = input
for index,l in enumerate(self.layer):
if(index == 0):
input = self._input
else:
input = self.layer[index-1].output
l.input = input
l.weighted_sums = np.dot(l.weights,l.input)
#apply activation function
if(l.activation == 'squash'):
l.output = l.weighted_sums / (1+np.abs(l.weighted_sums))
elif(l.activation == 'sigmoid'):
l.output = 1/(1 + np.exp(-1*l.weighted_sums))
elif(l.activation == 'tanh'):
l.output = 1.7159*np.tanh((2.0/3.0)*l.weighted_sums)
elif(l.activation == 'linear_rectifier'):
l.output = np.maximum(0,l.weighted_sums)
elif(l.activation == 'softmax'):
l.output = np.exp(l.weighted_sums)
#ignore bottom row in the summation since it does not represent any class at all
l.output = l.output/np.sum(l.output[0:-1,:],axis=0)
else: #base case is linear
l.output = l.weighted_sums
if(l.sparse_penalty is not None):
#first pass - compute the mean
#every other pass - maintain moving average
if(l.mean_estimate_count is None):
l.mean_estimate = np.mean(l.output,axis=1)
l.mean_estimate_count = 0
else:
l.mean_estimate = 0.99*l.mean_estimate + .01*np.mean(l.output,axis=1);
l.mean_estimate_count = l.mean_estimate_count + 1;
if(l.select_func is not None):
l.select_func(l);
if(l.dropout is not None and self.train == True):
#Multiple code paths to optimize for speed. The common case for dropout is to use
#with rectified linear activations. In that case we do not need to save
#l.d_selected. l.d_selected is saved to allow gradients to be ignored for weights
#that were dropped out. linear rectified gradients are ignored anyway (if the
#output is 0).
if(l.activation == 'linear_rectifier'):
if(l.dropout == 0.5): #randint is slightly faster, and 0.5 is a common case
l.output = l.output*(np.random.randint(0,2,l.output.shape).astype(np.float32));
else:
l.output = l.output*(np.random.binomial(1,(1 - l.dropout),l.output.shape).astype(np.float32));
else:
if(l.dropout == 0.5):
l.d_selected = np.random.randint(0,2,l.output.shape).astype(np.float32);
l.output = l.output*l.d_selected
else:
l.d_selected = np.random.binomial(1,(1 - l.dropout),l.output.shape).astype(np.float32);
l.output = l.output*l.d_selected
elif(l.dropout is not None and self.train == False):
l.output = l.output*(1.0 - l.dropout);
#one row in output is bias, set it to 1
#note that bias 'input' is enabled even if dropout disabled it.
l.output[-1,:] = 1.0
#ignore last row for network output
self.output = self.layer[len(self.layer)-1].output[0:-1,:]
def back_propagate(self,error=None):
if(error is not None):
self.error = error
for l in reversed(self.layer):
#if this is the last layer
if(l.index == len(self.layer)-1):
#must do this to account for the bias
delta_temp = np.append(self.error,np.zeros((1,self.error.shape[1]),dtype=self.error.dtype),axis=0)
else:
delta_temp = np.dot(self.layer[l.index+1].weights.transpose(),self.layer[l.index+1].delta);
if(l.activation == 'squash'):
l.activation_derivative = 1.0/((1+np.abs(l.weighted_sums)**2))
elif(l.activation == 'sigmoid'):
l.activation_derivative = l.output*(1 - l.output);
elif(l.activation == 'tanh'):
#l.activation_derivative = ((2.0/3.0)/1.7159)*(1.7159**2 - l.output**2)
l.activation_derivative = 0.3885230297025856*(2.94431281 - l.output*l.output)
elif(l.activation == 'linear_rectifier'):
#1 if greater than 0, 0 otherwise.
#This stores as bools
l.activation_derivative = np.greater(l.output,0);
else: #base case is linear or softmax (also applies to lwta and maxout)
l.activation_derivative = np.ones(l.output.shape,dtype=l.output.dtype);
#bottom row of activation derivative is the bias 'neuron'
#it's derivative is always 0
l.activation_derivative[-1,:] = 0.0
#add sparsity error to delta
#from http://ufldl.stanford.edu/wiki/index.php/Autoencoders_and_Sparsity
if(l.sparse_penalty is not None):
sparse_error = l.sparse_penalty*(-l.sparse_target/l.mean_estimate + (1.0 - l.sparse_target)/(1.0 - l.mean_estimate))
delta_temp = delta_temp + sparse_error[:,np.newaxis]
l.delta = l.activation_derivative*delta_temp;
#ignore deltas for weights that were dropped out.
if(l.dropout is not None and self.train == True):
#This is simply an optimization for speed. If we have rectified linear activations
#then dropout makes the activation be 0. gradients should be ignored anyway for
#0 activations. That gets rid of the need to do the below multiply.
if(l.activation != 'linear_rectifier'):
l.delta = l.delta*l.d_selected
#For maxout networks, we have a smaller weight matrix which means delta will be smaller than it should be
#It must be enlarged here (via np.repeat). The path that the gradient takes is accounted for in l.selected_neurons
#Bias neuron is removed then reinserted via np.append
if(l.activation == 'maxout'):
l.delta = np.repeat(l.delta[0:-1],l.nodes_per_group,axis=0)
l.delta = np.append(l.delta,np.ones((1,l.delta.shape[1]),l.weights.dtype),axis=0)
#zero out any deltas for neurons that were selected
#note: "selected" means the neuron was selected for being deactivated.
if(l.selected_neurons is not None):
l.delta[l.selected_neurons] = 0;
l.selected_neurons = None;
#calculate weight gradient
l.gradient = l.gradient + np.dot(l.delta,l.input.transpose());
self.epoch_size = self.epoch_size + self._input.shape[1];
def update_weights(self):
#Prevent calling update_weights() without calling back_propagate first
#(with a non-empty vector) from crashing.
if(self.epoch_size == 0):
return;
for l in reversed(self.layer):
if(l.rms_prop_rate is not None):
l.mean_square_avg = l.rms_prop_rate*l.mean_square_avg + (1.0 - l.rms_prop_rate)*(l.gradient**2)
l.gradient = l.gradient/(np.sqrt(l.mean_square_avg))
l.weight_change = -l.step_size*l.gradient/self.epoch_size;
if(l.momentum is not None):
l.vel = l.momentum*l.vel + l.weight_change
l.weight_change = l.vel
l.weights = l.weights + l.weight_change;
if(l.maxnorm is not None):
weight_norm = np.sum(l.weights**2,axis=0)**0.5
condition = weight_norm > l.maxnorm
l.weights = l.maxnorm*(l.weights/weight_norm)*condition + l.weights*(1 - condition)
l.gradient = np.zeros(l.weights.shape,dtype=l.weights.dtype);
self.epoch_size = 0;
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.cred}, now with 30% more starch.
"""
import hmac
from zope.interface import implements, Interface
from twisted.trial import unittest
from twisted.cred import portal, checkers, credentials, error
from twisted.python import components
from twisted.internet import defer
from twisted.internet.defer import deferredGenerator as dG, waitForDeferred as wFD
try:
from crypt import crypt
except ImportError:
crypt = None
try:
from twisted.cred.pamauth import callIntoPAM
except ImportError:
pamauth = None
else:
from twisted.cred import pamauth
class ITestable(Interface):
pass
class TestAvatar:
def __init__(self, name):
self.name = name
self.loggedIn = False
self.loggedOut = False
def login(self):
assert not self.loggedIn
self.loggedIn = True
def logout(self):
self.loggedOut = True
class Testable(components.Adapter):
implements(ITestable)
# components.Interface(TestAvatar).adaptWith(Testable, ITestable)
components.registerAdapter(Testable, TestAvatar, ITestable)
class IDerivedCredentials(credentials.IUsernamePassword):
pass
class DerivedCredentials(object):
implements(IDerivedCredentials, ITestable)
def __init__(self, username, password):
self.username = username
self.password = password
def checkPassword(self, password):
return password == self.password
class TestRealm:
implements(portal.IRealm)
def __init__(self):
self.avatars = {}
def requestAvatar(self, avatarId, mind, *interfaces):
if self.avatars.has_key(avatarId):
avatar = self.avatars[avatarId]
else:
avatar = TestAvatar(avatarId)
self.avatars[avatarId] = avatar
avatar.login()
return (interfaces[0], interfaces[0](avatar),
avatar.logout)
class NewCredTest(unittest.TestCase):
def setUp(self):
r = self.realm = TestRealm()
p = self.portal = portal.Portal(r)
up = self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
up.addUser("bob", "hello")
p.registerChecker(up)
def testListCheckers(self):
expected = [credentials.IUsernamePassword, credentials.IUsernameHashedPassword]
got = self.portal.listCredentialsInterfaces()
expected.sort()
got.sort()
self.assertEqual(got, expected)
def testBasicLogin(self):
l = []; f = []
self.portal.login(credentials.UsernamePassword("bob", "hello"),
self, ITestable).addCallback(
l.append).addErrback(f.append)
if f:
raise f[0]
# print l[0].getBriefTraceback()
iface, impl, logout = l[0]
# whitebox
self.assertEqual(iface, ITestable)
self.failUnless(iface.providedBy(impl),
"%s does not implement %s" % (impl, iface))
# greybox
self.failUnless(impl.original.loggedIn)
self.failUnless(not impl.original.loggedOut)
logout()
self.failUnless(impl.original.loggedOut)
def test_derivedInterface(self):
"""
Login with credentials implementing an interface inheriting from an
interface registered with a checker (but not itself registered).
"""
l = []
f = []
self.portal.login(DerivedCredentials("bob", "hello"), self, ITestable
).addCallback(l.append
).addErrback(f.append)
if f:
raise f[0]
iface, impl, logout = l[0]
# whitebox
self.assertEqual(iface, ITestable)
self.failUnless(iface.providedBy(impl),
"%s does not implement %s" % (impl, iface))
# greybox
self.failUnless(impl.original.loggedIn)
self.failUnless(not impl.original.loggedOut)
logout()
self.failUnless(impl.original.loggedOut)
def testFailedLogin(self):
l = []
self.portal.login(credentials.UsernamePassword("bob", "h3llo"),
self, ITestable).addErrback(
lambda x: x.trap(error.UnauthorizedLogin)).addCallback(l.append)
self.failUnless(l)
self.assertEqual(error.UnauthorizedLogin, l[0])
def testFailedLoginName(self):
l = []
self.portal.login(credentials.UsernamePassword("jay", "hello"),
self, ITestable).addErrback(
lambda x: x.trap(error.UnauthorizedLogin)).addCallback(l.append)
self.failUnless(l)
self.assertEqual(error.UnauthorizedLogin, l[0])
class CramMD5CredentialsTestCase(unittest.TestCase):
def testIdempotentChallenge(self):
c = credentials.CramMD5Credentials()
chal = c.getChallenge()
self.assertEqual(chal, c.getChallenge())
def testCheckPassword(self):
c = credentials.CramMD5Credentials()
chal = c.getChallenge()
c.response = hmac.HMAC('secret', chal).hexdigest()
self.failUnless(c.checkPassword('secret'))
def testWrongPassword(self):
c = credentials.CramMD5Credentials()
self.failIf(c.checkPassword('secret'))
class OnDiskDatabaseTestCase(unittest.TestCase):
users = [
('user1', 'pass1'),
('user2', 'pass2'),
('user3', 'pass3'),
]
def testUserLookup(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
for (u, p) in self.users:
self.failUnlessRaises(KeyError, db.getUser, u.upper())
self.assertEqual(db.getUser(u), (u, p))
def testCaseInSensitivity(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
for (u, p) in self.users:
self.assertEqual(db.getUser(u.upper()), (u, p))
def testRequestAvatarId(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
creds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults(
[defer.maybeDeferred(db.requestAvatarId, c) for c in creds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testRequestAvatarId_hashed(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
creds = [credentials.UsernameHashedPassword(u, p) for u, p in self.users]
d = defer.gatherResults(
[defer.maybeDeferred(db.requestAvatarId, c) for c in creds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
class HashedPasswordOnDiskDatabaseTestCase(unittest.TestCase):
users = [
('user1', 'pass1'),
('user2', 'pass2'),
('user3', 'pass3'),
]
def hash(self, u, p, s):
return crypt(p, s)
def setUp(self):
dbfile = self.mktemp()
self.db = checkers.FilePasswordDB(dbfile, hash=self.hash)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, crypt(p, u[:2])))
f.close()
r = TestRealm()
self.port = portal.Portal(r)
self.port.registerChecker(self.db)
def testGoodCredentials(self):
goodCreds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults([self.db.requestAvatarId(c) for c in goodCreds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testGoodCredentials_login(self):
goodCreds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults([self.port.login(c, None, ITestable)
for c in goodCreds])
d.addCallback(lambda x: [a.original.name for i, a, l in x])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testBadCredentials(self):
badCreds = [credentials.UsernamePassword(u, 'wrong password')
for u, p in self.users]
d = defer.DeferredList([self.port.login(c, None, ITestable)
for c in badCreds], consumeErrors=True)
d.addCallback(self._assertFailures, error.UnauthorizedLogin)
return d
def testHashedCredentials(self):
hashedCreds = [credentials.UsernameHashedPassword(u, crypt(p, u[:2]))
for u, p in self.users]
d = defer.DeferredList([self.port.login(c, None, ITestable)
for c in hashedCreds], consumeErrors=True)
d.addCallback(self._assertFailures, error.UnhandledCredentials)
return d
def _assertFailures(self, failures, *expectedFailures):
for flag, failure in failures:
self.assertEqual(flag, defer.FAILURE)
failure.trap(*expectedFailures)
return None
if crypt is None:
skip = "crypt module not available"
class PluggableAuthenticationModulesTest(unittest.TestCase):
def setUp(self):
"""
Replace L{pamauth.callIntoPAM} with a dummy implementation with
easily-controlled behavior.
"""
self._oldCallIntoPAM = pamauth.callIntoPAM
pamauth.callIntoPAM = self.callIntoPAM
def tearDown(self):
"""
Restore the original value of L{pamauth.callIntoPAM}.
"""
pamauth.callIntoPAM = self._oldCallIntoPAM
def callIntoPAM(self, service, user, conv):
if service != 'Twisted':
raise error.UnauthorizedLogin('bad service: %s' % service)
if user != 'testuser':
raise error.UnauthorizedLogin('bad username: %s' % user)
questions = [
(1, "Password"),
(2, "Message w/ Input"),
(3, "Message w/o Input"),
]
replies = conv(questions)
if replies != [
("password", 0),
("entry", 0),
("", 0)
]:
raise error.UnauthorizedLogin('bad conversion: %s' % repr(replies))
return 1
def _makeConv(self, d):
def conv(questions):
return defer.succeed([(d[t], 0) for t, q in questions])
return conv
def testRequestAvatarId(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'password', 2:'entry', 3:''})
creds = credentials.PluggableAuthenticationModules('testuser',
conv)
d = db.requestAvatarId(creds)
d.addCallback(self.assertEqual, 'testuser')
return d
def testBadCredentials(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'', 2:'', 3:''})
creds = credentials.PluggableAuthenticationModules('testuser',
conv)
d = db.requestAvatarId(creds)
self.assertFailure(d, error.UnauthorizedLogin)
return d
def testBadUsername(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'password', 2:'entry', 3:''})
creds = credentials.PluggableAuthenticationModules('baduser',
conv)
d = db.requestAvatarId(creds)
self.assertFailure(d, error.UnauthorizedLogin)
return d
if not pamauth:
skip = "Can't run without PyPAM"
class CheckersMixin:
def testPositive(self):
for chk in self.getCheckers():
for (cred, avatarId) in self.getGoodCredentials():
r = wFD(chk.requestAvatarId(cred))
yield r
self.assertEqual(r.getResult(), avatarId)
testPositive = dG(testPositive)
def testNegative(self):
for chk in self.getCheckers():
for cred in self.getBadCredentials():
r = wFD(chk.requestAvatarId(cred))
yield r
self.assertRaises(error.UnauthorizedLogin, r.getResult)
testNegative = dG(testNegative)
class HashlessFilePasswordDBMixin:
credClass = credentials.UsernamePassword
diskHash = None
networkHash = staticmethod(lambda x: x)
_validCredentials = [
('user1', 'password1'),
('user2', 'password2'),
('user3', 'password3')]
def getGoodCredentials(self):
for u, p in self._validCredentials:
yield self.credClass(u, self.networkHash(p)), u
def getBadCredentials(self):
for u, p in [('user1', 'password3'),
('user2', 'password1'),
('bloof', 'blarf')]:
yield self.credClass(u, self.networkHash(p))
def getCheckers(self):
diskHash = self.diskHash or (lambda x: x)
hashCheck = self.diskHash and (lambda username, password, stored: self.diskHash(password))
for cache in True, False:
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('%s:%s\n' % (u, diskHash(p)))
fObj.close()
yield checkers.FilePasswordDB(fn, cache=cache, hash=hashCheck)
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('%s dingle dongle %s\n' % (diskHash(p), u))
fObj.close()
yield checkers.FilePasswordDB(fn, ' ', 3, 0, cache=cache, hash=hashCheck)
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('zip,zap,%s,zup,%s\n' % (u.title(), diskHash(p)))
fObj.close()
yield checkers.FilePasswordDB(fn, ',', 2, 4, False, cache=cache, hash=hashCheck)
class LocallyHashedFilePasswordDBMixin(HashlessFilePasswordDBMixin):
diskHash = staticmethod(lambda x: x.encode('hex'))
class NetworkHashedFilePasswordDBMixin(HashlessFilePasswordDBMixin):
networkHash = staticmethod(lambda x: x.encode('hex'))
class credClass(credentials.UsernameHashedPassword):
def checkPassword(self, password):
return self.hashed.decode('hex') == password
class HashlessFilePasswordDBCheckerTestCase(HashlessFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
class LocallyHashedFilePasswordDBCheckerTestCase(LocallyHashedFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
class NetworkHashedFilePasswordDBCheckerTestCase(NetworkHashedFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
|
|
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service."""
import copy
import datetime
import mock
import uuid
from cinder import exception
import cinder.image.glance
from cinder.tests.unit import fake_constants
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': fake_constants.IMAGE_ID,
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'private',
'protected': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64'},
'size': 12345678}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'},
'size': 1}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'},
'size': 1000000000000}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'},
'size': 20000000}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'size': 1024,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {
'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None},
'size': 50000}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'False'},
'size': 7777777}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'True'},
'size': 1234000000}
image8 = {'id': 'b0a599e0-41d7-3582-b260-769f443c862a',
'name': 'fakeimage8',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'bare',
'disk_format': 'raw',
'properties':
{'block_device_mapping': [
{'boot_index': 0, 'source_type': 'snapshot',
'snapshot_id': fake_constants.SNAPSHOT_ID}],
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self.create(None, image8)
self._imagedata = {}
self.temp_images = mock.MagicMock()
super(_FakeImageService, self).__init__()
# TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def download(self, context, image_id, data):
self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises Duplicate: if the image already exist.
"""
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises ImageNotFound: if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except Exception:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises ImageNotFound: if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
def add_location(self, context, image_id, url, metadata):
self.update(context, image_id, {'locations': [{'url': url,
'metadata': metadata}]})
return True
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def mock_image_service(testcase):
testcase.mock_object(cinder.image.glance, 'get_remote_image_service',
lambda x, y: (FakeImageService(), y))
testcase.mock_object(cinder.image.glance, 'get_default_image_service',
mock.Mock(side_effect=FakeImageService))
|
|
# Copyright (C) 2018 Zhixian MA <zx@mazhixian.me>
import os
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from scipy.misc import imsave
class printException(Exception):
"""
Ref: http://blog.csdn.net/kwsy2008/article/details/48468345
"""
pass
class LevelSet():
"""
Piecewise constant levelset
Inputs
======
imgshape: tuple
shape of the image to be segmented
mu: float
coefficient of the boundry
nu: float
coefficient of the segmented region
lambda1: float
coefficient of the internal region
lambda2: float
coefficient of the external region
dt: float
time interval
Reference
=========
[1] Getreuer. P., "Chan-Vese Segmentation"
http://dx.doi.org/10.5201/ipol.2012.g-cv
"""
def __init__(self, imgshape, mu=1.0, nu=1.0,
lambda1=1.0, lambda2=1.0, dt=0.1,
init_mode=None, radius=None):
"""
The initializer
"""
self.imgshape = imgshape
self.mu = mu
self.nu = nu
self.lambda1 = lambda1
self.lambda2 = lambda2
self.dt = dt
self.yita = 1e-8 # A little trick for avoiding divided by zero
self.init_mode = init_mode
self.radius = radius
# Init phi
if self.init_mode is None:
self.initPhi()
elif self.init_mode == "cir":
self.initPhi_cir(radius=self.radius)
else:
raise printException("InitModeError")
def initPhi_cir(self, radius=None):
"""
Init the phi function, i.e., the level set, circle case
"""
rows,cols = self.imgshape
if radius is None:
radius = min(rows, cols) // 4
# Init
self.phi = np.ones((rows, cols))
y = np.arange(-rows//2, rows//2)
x = np.arange(-cols//2, cols//2)
X,Y = np.meshgrid(x,y)
z = np.sqrt(X**2+Y**2)
id_row,id_col = np.where(z > radius)
self.phi[id_row, id_col] = -1
def initPhi(self):
"""
Init the phi function, i.e., the level set
Reference
=========
[1] Getreuer. P., "Chan-Vese Segmentation"
http://dx.doi.org/10.5201/ipol.2012.g-cv
"""
rows,cols = self.imgshape
# Init
x = np.arange(0, cols, 1)
y = np.arange(0, rows, 1)
X, Y = np.meshgrid(x,y)
self.phi = np.sin(X*np.pi/5.0) * np.sin(Y*np.pi/5.0)
def getNormalization(self, img, logflag=False):
"""Normalize the image into [0.,1.0]"""
if logflag:
img = np.log10(img - img.min() + 1e-5)
img_max = img.max()
img_min = img.min()
return (img - img_min) / (img_max - img_min)
def calcCentroids(self, img):
"""Calculate centroids of the internal and external regions
segmented by the levelset function.
"""
idx_c1r, idx_c1c = np.where(self.phi > 0)
idx_c2r, idx_c2c = np.where(self.phi < 0)
c1 = np.sum(img[idx_c1r, idx_c1r]) / (len(idx_c1r)+self.yita)
c2 = np.sum(img[idx_c2r, idx_c2r]) / (len(idx_c2r)+self.yita)
return c1,c2
def calcSegmentation(self, img, niter=100, phi_total=1.0,
normflag=True, logflag=False, snappath=None):
"""Do segmentation"""
if normflag:
img = self.getNormalization(img, logflag=logflag)
# calc the region centroids as constands
self.c1, self.c2 = self.calcCentroids(img)
# snappath
if not os.path.exists(snappath):
os.mkdir(snappath)
# Iterate to optimize phi
for it in range(niter):
phidiffnorm = 0.0
for j in range(self.imgshape[0]):
# top margin
if j == 0:
idu = 0
else:
idu = -1
# bottom margin
if j == self.imgshape[0] - 1:
idd = 0
else:
idd = 1
for i in range(self.imgshape[1]):
# left margin
if i == 0:
idl = 0
else:
idl = -1
# right margin
if i == self.imgshape[1]-1:
idr = 0
else:
idr = 1
# main body
Delta = self.dt/(np.pi*(1+self.phi[j,i]*self.phi[j,i]))
phi_x = self.phi[j,i+idr]-self.phi[j,i]
phi_y = (self.phi[j+idd,i]-self.phi[j+idu,i])/2.0
IDivR = 1.0/np.sqrt(self.yita+phi_x**2+phi_y**2)
phi_x = self.phi[j,i]-self.phi[j,i+idl]
IDivL = 1.0/np.sqrt(self.yita+phi_x**2 + phi_y**2)
phi_x = (self.phi[j,i+idr] - self.phi[j,i+idl])/2.0
phi_y = self.phi[j+idd,i] - self.phi[j,i]
IDivD = 1.0/np.sqrt(self.yita + phi_x**2 + phi_y**2)
phi_y = self.phi[j,i] - self.phi[j+idu,i]
IDivU = 1.0/np.sqrt(self.yita + phi_x**2 + phi_y**2)
# Distances
dist1 = (img[j,i] - self.c1)**2
dist2 = (img[j,i] - self.c2)**2
# Update phi at current point j,i
phi_last = self.phi[j,i]
self.phi[j,i] = ((self.phi[j,i] +
Delta*(self.mu*
(self.phi[j,i+idr]*IDivR +
self.phi[j,i+idl]*IDivL +
self.phi[j+idd,i]*IDivD +
self.phi[j+idu,i]*IDivU
)-
self.nu - self.lambda1 * dist1 +
self.lambda2 * dist2)
) /
(1.0 + Delta*self.mu*(IDivR+IDivL+IDivD+IDivU)))
phidiff = self.phi[j,i] - phi_last
phidiffnorm += phidiff ** 2
if phidiffnorm <= phi_total and it >= 2:
break
# update c1 and c2
self.c1,self.c2 = self.calcCentroids(img)
if np.mod(it, 5) == 0:
t = time.strftime('%Y-%m-%d: %H:%M:%S', time.localtime(time.time()))
if snappath is not None:
savepath = os.path.join(snappath,"phi1_{0}.png".format(it))
imsave(savepath, self.phi)
print("[%s] Iter: %d PhiDiffNorm: %.8f" % (t, it, phidiffnorm))
def drawResult(self,img,normflag=True,logflag=False):
"""draw the segmentation curve"""
if normflag:
img = self.getNormalization(img, logflag=logflag)
plt.rcParams["figure.figsize"] = [10.0, 4.0]
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1])
ax0 = plt.subplot(gs[0])
ax0 = plt.imshow(img)
ax0 = plt.contour(self.phi,levels=[0.0]);
plt.xlabel("horizontal")
plt.ylabel("vertical")
img_seg = np.zeros(img.shape)
img_seg[self.phi>0.0] = 1
ax0 = plt.subplot(gs[1])
# ax1 = plt.contour(self.phi)
ax1 = plt.imshow(img_seg)
plt.xlabel("horizontal")
plt.ylabel("vertical")
|
|
import numpy as np
import pandas as pd
from .pycompat import basestring, iteritems, suppress, dask_array_type, bytes_type
from . import formatting
from .utils import SortedKeysDict, not_implemented
class ImplementsArrayReduce(object):
@classmethod
def _reduce_method(cls, func, include_skipna, numeric_only):
if include_skipna:
def wrapped_func(self, dim=None, axis=None, skipna=None,
keep_attrs=False, **kwargs):
return self.reduce(func, dim, axis, keep_attrs=keep_attrs,
skipna=skipna, allow_lazy=True, **kwargs)
else:
def wrapped_func(self, dim=None, axis=None, keep_attrs=False,
**kwargs):
return self.reduce(func, dim, axis, keep_attrs=keep_attrs,
allow_lazy=True, **kwargs)
return wrapped_func
_reduce_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension(s) over which to apply `{name}`.
axis : int or sequence of int, optional
Axis(es) over which to apply `{name}`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
`{name}` is calculated over axes."""
class ImplementsDatasetReduce(object):
@classmethod
def _reduce_method(cls, func, include_skipna, numeric_only):
if include_skipna:
def wrapped_func(self, dim=None, keep_attrs=False, skipna=None,
**kwargs):
return self.reduce(func, dim, keep_attrs, skipna=skipna,
numeric_only=numeric_only, allow_lazy=True,
**kwargs)
else:
def wrapped_func(self, dim=None, keep_attrs=False, **kwargs):
return self.reduce(func, dim, keep_attrs,
numeric_only=numeric_only, allow_lazy=True,
**kwargs)
return wrapped_func
_reduce_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension(s) over which to apply `func`. By default `func` is
applied over all dimensions."""
class ImplementsRollingArrayReduce(object):
@classmethod
def _reduce_method(cls, func):
def wrapped_func(self, **kwargs):
return self.reduce(func, **kwargs)
return wrapped_func
@classmethod
def _bottleneck_reduce(cls, func):
def wrapped_func(self, **kwargs):
from .dataarray import DataArray
if isinstance(self.obj.data, dask_array_type):
raise NotImplementedError(
'Rolling window operation does not work with dask arrays')
# bottleneck doesn't allow min_count to be 0, although it should
# work the same as if min_count = 1
if self.min_periods is not None and self.min_periods == 0:
min_count = self.min_periods + 1
else:
min_count = self.min_periods
values = func(self.obj.data, window=self.window,
min_count=min_count, axis=self._axis_num)
result = DataArray(values, self.obj.coords)
if self.center:
result = self._center_result(result)
return result
return wrapped_func
@classmethod
def _bottleneck_reduce_without_min_count(cls, func):
def wrapped_func(self, **kwargs):
from .dataarray import DataArray
if self.min_periods is not None:
raise ValueError('Rolling.median does not accept min_periods')
if isinstance(self.obj.data, dask_array_type):
raise NotImplementedError(
'Rolling window operation does not work with dask arrays')
values = func(self.obj.data, window=self.window, axis=self._axis_num)
result = DataArray(values, self.obj.coords)
if self.center:
result = self._center_result(result)
return result
return wrapped_func
class AbstractArray(ImplementsArrayReduce, formatting.ReprMixin):
def __bool__(self):
return bool(self.values)
# Python 3 uses __bool__, Python 2 uses __nonzero__
__nonzero__ = __bool__
def __float__(self):
return float(self.values)
def __int__(self):
return int(self.values)
def __complex__(self):
return complex(self.values)
def __long__(self):
return long(self.values)
def __array__(self, dtype=None):
return np.asarray(self.values, dtype=dtype)
def __repr__(self):
return formatting.array_repr(self)
def _iter(self):
for n in range(len(self)):
yield self[n]
def __iter__(self):
if self.ndim == 0:
raise TypeError('iteration over a 0-d array')
return self._iter()
@property
def T(self):
return self.transpose()
def get_axis_num(self, dim):
"""Return axis number(s) corresponding to dimension(s) in this array.
Parameters
----------
dim : str or iterable of str
Dimension name(s) for which to lookup axes.
Returns
-------
int or tuple of int
Axis number or numbers corresponding to the given dimensions.
"""
if isinstance(dim, basestring):
return self._get_axis_num(dim)
else:
return tuple(self._get_axis_num(d) for d in dim)
def _get_axis_num(self, dim):
try:
return self.dims.index(dim)
except ValueError:
raise ValueError("%r not found in array dimensions %r" %
(dim, self.dims))
class AttrAccessMixin(object):
"""Mixin class that allows getting keys with attribute access
"""
_initialized = False
@property
def _attr_sources(self):
"""List of places to look-up items for attribute-style access"""
return [self, self.attrs]
def __getattr__(self, name):
if name != '__setstate__':
# this avoids an infinite loop when pickle looks for the
# __setstate__ attribute before the xarray object is initialized
for source in self._attr_sources:
with suppress(KeyError):
return source[name]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, name))
def __setattr__(self, name, value):
if self._initialized:
try:
# Allow setting instance variables if they already exist
# (e.g., _attrs). We use __getattribute__ instead of hasattr
# to avoid key lookups with attribute-style access.
self.__getattribute__(name)
except AttributeError:
raise AttributeError(
"cannot set attribute %r on a %r object. Use __setitem__ "
"style assignment (e.g., `ds['name'] = ...`) instead to "
"assign variables." % (name, type(self).__name__))
object.__setattr__(self, name, value)
def __dir__(self):
"""Provide method name lookup and completion. Only provide 'public'
methods.
"""
extra_attrs = [
item for sublist in self._attr_sources for item in sublist
if isinstance(item, basestring)]
return sorted(set(dir(type(self)) + extra_attrs))
class BaseDataObject(AttrAccessMixin):
def _calc_assign_results(self, kwargs):
results = SortedKeysDict()
for k, v in kwargs.items():
if callable(v):
results[k] = v(self)
else:
results[k] = v
return results
def assign_coords(self, **kwargs):
"""Assign new coordinates to this object, returning a new object
with all the original data in addition to the new coordinates.
Parameters
----------
kwargs : keyword, value pairs
keywords are the variables names. If the values are callable, they
are computed on this object and assigned to new coordinate
variables. If the values are not callable, (e.g. a DataArray,
scalar, or array), they are simply assigned.
Returns
-------
assigned : same type as caller
A new object with the new coordinates in addition to the existing
data.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign_coords``
is possible, but you cannot reference other variables created within
the same ``assign_coords`` call.
See also
--------
Dataset.assign
"""
data = self.copy(deep=False)
results = self._calc_assign_results(kwargs)
data.coords.update(results)
return data
def pipe(self, func, *args, **kwargs):
"""
Apply func(self, *args, **kwargs)
This method replicates the pandas method of the same name.
Parameters
----------
func : function
function to apply to this xarray object (Dataset/DataArray).
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the xarray object.
args : positional arguments passed into ``func``.
kwargs : a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
xarray or pandas objects, e.g., instead of writing
>>> f(g(h(ds), arg1=a), arg2=b, arg3=c)
You can write
>>> (ds.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (ds.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
See Also
--------
pandas.DataFrame.pipe
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
msg = '%s is both the pipe target and a keyword argument' % target
raise ValueError(msg)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def groupby(self, group, squeeze=True):
"""Returns a GroupBy object for performing grouped operations.
Parameters
----------
group : str, DataArray or Coordinate
Array whose unique values should be used to group this array. If a
string, must be the name of a variable contained in this dataset.
squeeze : boolean, optional
If "group" is a dimension of any arrays in this dataset, `squeeze`
controls whether the subarrays have a dimension of length 1 along
that dimension or if the dimension is squeezed out.
Returns
-------
grouped : GroupBy
A `GroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
"""
if isinstance(group, basestring):
group = self[group]
return self.groupby_cls(self, group, squeeze=squeeze)
def groupby_bins(self, group, bins, right=True, labels=None, precision=3,
include_lowest=False, squeeze=True):
"""Returns a GroupBy object for performing grouped operations.
Rather than using all unique values of `group`, the values are discretized
first by applying `pandas.cut` [1]_ to `group`.
Parameters
----------
group : str, DataArray or Coordinate
Array whose binned values should be used to group this array. If a
string, must be the name of a variable contained in this dataset.
bins : int or array of scalars
If bins is an int, it defines the number of equal-width bins in the
range of x. However, in this case, the range of x is extended by .1%
on each side to include the min or max values of x. If bins is a
sequence it defines the bin edges allowing for non-uniform bin
width. No extension of the range of x is done in this case.
right : boolean, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, string bin labels are assigned by
`pandas.cut`.
precision : int
The precision at which to store and display the bins labels.
include_lowest : bool
Whether the first interval should be left-inclusive or not.
squeeze : boolean, optional
If "group" is a dimension of any arrays in this dataset, `squeeze`
controls whether the subarrays have a dimension of length 1 along
that dimension or if the dimension is squeezed out.
Returns
-------
grouped : GroupBy
A `GroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
The name of the group has the added suffix `_bins` in order to
distinguish it from the original variable.
References
----------
.. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html
"""
if isinstance(group, basestring):
group = self[group]
return self.groupby_cls(self, group, squeeze=squeeze, bins=bins,
cut_kwargs={'right': right, 'labels': labels,
'precision': precision,
'include_lowest': include_lowest})
def rolling(self, min_periods=None, center=False, **windows):
"""
Rolling window object.
Rolling window aggregations are much faster when bottleneck is
installed.
Parameters
----------
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
**windows : dim=window
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
Returns
-------
rolling : type of input argument
"""
return self.rolling_cls(self, min_periods=min_periods,
center=center, **windows)
def resample(self, freq, dim, how='mean', skipna=None, closed=None,
label=None, base=0, keep_attrs=False):
"""Resample this object to a new temporal resolution.
Handles both downsampling and upsampling. Upsampling with filling is
not yet supported; if any intervals contain no values in the original
object, they will be given the value ``NaN``.
Parameters
----------
freq : str
String in the '#offset' to specify the step-size along the
resampled dimension, where '#' is an (optional) integer multipler
(default 1) and 'offset' is any pandas date offset alias. Examples
of valid offsets include:
* 'AS': year start
* 'QS-DEC': quarterly, starting on December 1
* 'MS': month start
* 'D': day
* 'H': hour
* 'Min': minute
The full list of these offset aliases is documented in pandas [1]_.
dim : str
Name of the dimension to resample along (e.g., 'time').
how : str or func, optional
Used for downsampling. If a string, ``how`` must be a valid
aggregation operation supported by xarray. Otherwise, ``how`` must be
a function that can be called like ``how(values, axis)`` to reduce
ndarray values along the given axis. Valid choices that can be
provided as a string include all the usual Dataset/DataArray
aggregations (``all``, ``any``, ``argmax``, ``argmin``, ``max``,
``mean``, ``median``, ``min``, ``prod``, ``sum``, ``std`` and
``var``), as well as ``first`` and ``last``.
skipna : bool, optional
Whether to skip missing values when aggregating in downsampling.
closed : 'left' or 'right', optional
Side of each interval to treat as closed.
label : 'left or 'right', optional
Side of each interval to use for labeling.
base : int, optionalt
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '24H' frequency, base could
range from 0 through 23.
keep_attrs : bool, optional
If True, the object's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
resampled : same type as caller
This object resampled.
References
----------
.. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
"""
from .dataarray import DataArray
RESAMPLE_DIM = '__resample_dim__'
if isinstance(dim, basestring):
dim = self[dim]
group = DataArray(dim, [(RESAMPLE_DIM, dim)], name=RESAMPLE_DIM)
time_grouper = pd.TimeGrouper(freq=freq, how=how, closed=closed,
label=label, base=base)
gb = self.groupby_cls(self, group, grouper=time_grouper)
if isinstance(how, basestring):
f = getattr(gb, how)
if how in ['first', 'last']:
result = f(skipna=skipna, keep_attrs=keep_attrs)
else:
result = f(dim=dim.name, skipna=skipna, keep_attrs=keep_attrs)
else:
result = gb.reduce(how, dim=dim.name, keep_attrs=keep_attrs)
result = result.rename({RESAMPLE_DIM: dim.name})
return result
def where(self, cond, other=None, drop=False):
"""Return an object of the same shape with all entries where cond is
True and all other entries masked.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic.
Parameters
----------
cond : boolean DataArray or Dataset
other : unimplemented, optional
Unimplemented placeholder for compatibility with future numpy / pandas versions
drop : boolean, optional
Coordinate labels that only correspond to NA values should be dropped
Returns
-------
same type as caller or if drop=True same type as caller with dimensions
reduced for dim element where mask is True
Examples
--------
>>> import numpy as np
>>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=('x', 'y'))
>>> a.where((a > 6) & (a < 18))
<xarray.DataArray (x: 5, y: 5)>
array([[ nan, nan, nan, nan, nan],
[ nan, nan, 7., 8., 9.],
[ 10., 11., 12., 13., 14.],
[ 15., 16., 17., nan, nan],
[ nan, nan, nan, nan, nan]])
Coordinates:
* y (y) int64 0 1 2 3 4
* x (x) int64 0 1 2 3 4
>>> a.where((a > 6) & (a < 18), drop=True)
<xarray.DataArray (x: 5, y: 5)>
array([[ nan, nan, 7., 8., 9.],
[ 10., 11., 12., 13., 14.],
[ 15., 16., 17., nan, nan],
Coordinates:
* x (x) int64 1 2 3
* y (y) int64 0 1 2 3 4
"""
if other is not None:
raise NotImplementedError("The optional argument 'other' has not yet been implemented")
if drop:
from .dataarray import DataArray
from .dataset import Dataset
# get cond with the minimal size needed for the Dataset
if isinstance(cond, Dataset):
clipcond = cond.to_array().any('variable')
elif isinstance(cond, DataArray):
clipcond = cond
else:
raise TypeError("Cond argument is %r but must be a %r or %r" %
(cond, Dataset, DataArray))
# clip the data corresponding to coordinate dims that are not used
clip = dict(zip(clipcond.dims, [np.unique(adim)
for adim in np.nonzero(clipcond.values)]))
outcond = cond.isel(**clip)
outobj = self.sel(**outcond.indexes)
else:
outobj = self
outcond = cond
return outobj._where(outcond)
# this has no runtime function - these are listed so IDEs know these methods
# are defined and don't warn on these operations
__lt__ = __le__ =__ge__ = __gt__ = __add__ = __sub__ = __mul__ = \
__truediv__ = __floordiv__ = __mod__ = __pow__ = __and__ = __xor__ = \
__or__ = __div__ = __eq__ = __ne__ = not_implemented
def squeeze(xarray_obj, dims, dim=None):
"""Squeeze the dims of an xarray object."""
if dim is None:
dim = [d for d, s in iteritems(dims) if s == 1]
else:
if isinstance(dim, basestring):
dim = [dim]
if any(dims[k] > 1 for k in dim):
raise ValueError('cannot select a dimension to squeeze out '
'which has length greater than one')
return xarray_obj.isel(**dict((d, 0) for d in dim))
def _maybe_promote(dtype):
"""Simpler equivalent of pandas.core.common._maybe_promote"""
# N.B. these casting rules should match pandas
if np.issubdtype(dtype, float):
fill_value = np.nan
elif np.issubdtype(dtype, int):
# convert to floating point so NaN is valid
dtype = float
fill_value = np.nan
elif np.issubdtype(dtype, complex):
fill_value = np.nan + np.nan * 1j
elif np.issubdtype(dtype, np.datetime64):
fill_value = np.datetime64('NaT')
elif np.issubdtype(dtype, np.timedelta64):
fill_value = np.timedelta64('NaT')
else:
dtype = object
fill_value = np.nan
return np.dtype(dtype), fill_value
def _possibly_convert_objects(values):
"""Convert arrays of datetime.datetime and datetime.timedelta objects into
datetime64 and timedelta64, according to the pandas convention.
"""
return np.asarray(pd.Series(values.ravel())).reshape(values.shape)
def _get_fill_value(dtype):
"""Return a fill value that appropriately promotes types when used with
np.concatenate
"""
_, fill_value = _maybe_promote(dtype)
return fill_value
def _full_like_dataarray(arr, keep_attrs=False, fill_value=None):
"""empty DataArray"""
from .dataarray import DataArray
attrs = arr.attrs if keep_attrs else {}
if fill_value is None:
values = np.empty_like(arr)
elif fill_value is True:
dtype, fill_value = _maybe_promote(arr.dtype)
values = np.full_like(arr, fill_value=fill_value, dtype=dtype)
else:
dtype, _ = _maybe_promote(np.array(fill_value).dtype)
values = np.full_like(arr, fill_value=fill_value, dtype=dtype)
return DataArray(values, dims=arr.dims, coords=arr.coords, attrs=attrs)
def _full_like(xray_obj, keep_attrs=False, fill_value=None):
"""Return a new object with the same shape and type as a given object.
Parameters
----------
xray_obj : DataArray or Dataset
Return a full object with the same shape/dims/coords/attrs.
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
fill_value : scalar, optional
Value to fill DataArray(s) with before returning.
Returns
-------
out : same as xray_obj
New object with the same shape and type as a given object.
"""
from .dataarray import DataArray
from .dataset import Dataset
if isinstance(xray_obj, Dataset):
attrs = xray_obj.attrs if keep_attrs else {}
return Dataset(dict((k, _full_like_dataarray(v, keep_attrs=keep_attrs,
fill_value=fill_value))
for k, v in iteritems(xray_obj.data_vars)),
name=xray_obj.name, attrs=attrs)
elif isinstance(xray_obj, DataArray):
return _full_like_dataarray(xray_obj, keep_attrs=keep_attrs,
fill_value=fill_value)
|
|
#!/usr/bin/env python
"""
This sample application presents itself as a BBMD sitting on an IP network
that is also a router to a VLAN. The VLAN has a device on it with an analog
value object that returns a random value for the present value.
Note that the device instance number of the virtual device will be 100 times
the network number plus the address (net2 * 100 + addr2).
"""
import random
import argparse
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ArgumentParser
from bacpypes.core import run, deferred
from bacpypes.comm import bind
from bacpypes.pdu import Address, LocalBroadcast
from bacpypes.netservice import NetworkServiceAccessPoint, NetworkServiceElement
from bacpypes.bvllservice import BIPBBMD, AnnexJCodec, UDPMultiplexer
from bacpypes.app import Application
from bacpypes.appservice import StateMachineAccessPoint, ApplicationServiceAccessPoint
from bacpypes.local.device import LocalDeviceObject
from bacpypes.service.device import WhoIsIAmServices
from bacpypes.service.object import ReadWritePropertyServices
from bacpypes.primitivedata import Real
from bacpypes.object import AnalogValueObject, Property
from bacpypes.vlan import Network, Node
from bacpypes.errors import ExecutionError
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# RandomValueProperty
#
@bacpypes_debugging
class RandomValueProperty(Property):
def __init__(self, identifier):
if _debug: RandomValueProperty._debug("__init__ %r", identifier)
Property.__init__(self, identifier, Real, default=None, optional=True, mutable=False)
def ReadProperty(self, obj, arrayIndex=None):
if _debug: RandomValueProperty._debug("ReadProperty %r arrayIndex=%r", obj, arrayIndex)
# access an array
if arrayIndex is not None:
raise ExecutionError(errorClass='property', errorCode='propertyIsNotAnArray')
# return a random value
value = random.random() * 100.0
if _debug: RandomValueProperty._debug(" - value: %r", value)
return value
def WriteProperty(self, obj, value, arrayIndex=None, priority=None, direct=False):
if _debug: RandomValueProperty._debug("WriteProperty %r %r arrayIndex=%r priority=%r direct=%r", obj, value, arrayIndex, priority, direct)
raise ExecutionError(errorClass='property', errorCode='writeAccessDenied')
#
# Random Value Object Type
#
@bacpypes_debugging
class RandomAnalogValueObject(AnalogValueObject):
properties = [
RandomValueProperty('presentValue'),
]
def __init__(self, **kwargs):
if _debug: RandomAnalogValueObject._debug("__init__ %r", kwargs)
AnalogValueObject.__init__(self, **kwargs)
#
# VLANApplication
#
@bacpypes_debugging
class VLANApplication(Application, WhoIsIAmServices, ReadWritePropertyServices):
def __init__(self, vlan_device, vlan_address, aseID=None):
if _debug: VLANApplication._debug("__init__ %r %r aseID=%r", vlan_device, vlan_address, aseID)
Application.__init__(self, vlan_device, aseID=aseID)
# include a application decoder
self.asap = ApplicationServiceAccessPoint()
# pass the device object to the state machine access point so it
# can know if it should support segmentation
self.smap = StateMachineAccessPoint(vlan_device)
# the segmentation state machines need access to the same device
# information cache as the application
self.smap.deviceInfoCache = self.deviceInfoCache
# a network service access point will be needed
self.nsap = NetworkServiceAccessPoint()
# give the NSAP a generic network layer service element
self.nse = NetworkServiceElement()
bind(self.nse, self.nsap)
# bind the top layers
bind(self, self.asap, self.smap, self.nsap)
# create a vlan node at the assigned address
self.vlan_node = Node(vlan_address)
# bind the stack to the node, no network number, no address
self.nsap.bind(self.vlan_node)
def request(self, apdu):
if _debug: VLANApplication._debug("[%s]request %r", self.vlan_node.address, apdu)
Application.request(self, apdu)
def indication(self, apdu):
if _debug: VLANApplication._debug("[%s]indication %r", self.vlan_node.address, apdu)
Application.indication(self, apdu)
def response(self, apdu):
if _debug: VLANApplication._debug("[%s]response %r", self.vlan_node.address, apdu)
Application.response(self, apdu)
def confirmation(self, apdu):
if _debug: VLANApplication._debug("[%s]confirmation %r", self.vlan_node.address, apdu)
Application.confirmation(self, apdu)
#
# VLANRouter
#
@bacpypes_debugging
class VLANRouter:
def __init__(self, local_address, local_network):
if _debug: VLANRouter._debug("__init__ %r %r", local_address, local_network)
# a network service access point will be needed
self.nsap = NetworkServiceAccessPoint()
# give the NSAP a generic network layer service element
self.nse = NetworkServiceElement()
bind(self.nse, self.nsap)
# create a BBMD, bound to the Annex J server
# on the UDP multiplexer
self.bip = BIPBBMD(local_address)
self.annexj = AnnexJCodec()
self.mux = UDPMultiplexer(local_address)
# bind the bottom layers
bind(self.bip, self.annexj, self.mux.annexJ)
# bind the BIP stack to the local network
self.nsap.bind(self.bip, local_network, local_address)
#
# __main__
#
def main():
# parse the command line arguments
parser = ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
# add an argument for interval
parser.add_argument('addr1', type=str,
help='address of first network',
)
# add an argument for interval
parser.add_argument('net1', type=int,
help='network number of first network',
)
# add an argument for interval
parser.add_argument('addr2', type=str,
help='address of second network',
)
# add an argument for interval
parser.add_argument('net2', type=int,
help='network number of second network',
)
# now parse the arguments
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
local_address = Address(args.addr1)
local_network = args.net1
vlan_address = Address(args.addr2)
vlan_network = args.net2
# create the VLAN router, bind it to the local network
router = VLANRouter(local_address, local_network)
# create a VLAN
vlan = Network(broadcast_address=LocalBroadcast())
# create a node for the router, address 1 on the VLAN
router_addr = Address(1)
router_node = Node(router_addr)
vlan.add_node(router_node)
# bind the router stack to the vlan network through this node
router.nsap.bind(router_node, vlan_network, router_addr)
# send network topology
deferred(router.nse.i_am_router_to_network)
# device identifier is assigned from the address
device_instance = vlan_network * 100 + int(args.addr2)
_log.debug(" - device_instance: %r", device_instance)
# make a vlan device object
vlan_device = \
LocalDeviceObject(
objectName="VLAN Node %d" % (device_instance,),
objectIdentifier=('device', device_instance),
maxApduLengthAccepted=1024,
segmentationSupported='noSegmentation',
vendorIdentifier=15,
)
_log.debug(" - vlan_device: %r", vlan_device)
# make the application, add it to the network
vlan_app = VLANApplication(vlan_device, vlan_address)
vlan.add_node(vlan_app.vlan_node)
_log.debug(" - vlan_app: %r", vlan_app)
# make a random value object
ravo = RandomAnalogValueObject(
objectIdentifier=('analogValue', 1),
objectName='Device%d/Random1' % (device_instance,),
)
_log.debug(" - ravo1: %r", ravo)
# add it to the device
vlan_app.add_object(ravo)
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid as stdlib_uuid
import feedparser
from lxml import etree
import webob
from nova.api.openstack.compute import versions
from nova.api.openstack.compute import views
from nova.api.openstack import xmlutil
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import common
from nova.tests.api.openstack import fakes
from nova.tests import matchers
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
LINKS = {
'v2.0': {
'pdf': 'http://docs.openstack.org/'
'api/openstack-compute/1.1/os-compute-devguide-1.1.pdf',
'wadl': 'http://docs.openstack.org/'
'api/openstack-compute/1.1/wadl/os-compute-1.1.wadl',
},
}
VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "application/pdf",
"href": LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": LINKS['v2.0']['wadl'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml;version=2",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
},
],
},
}
class VersionsTest(test.TestCase):
def setUp(self):
super(VersionsTest, self).setUp()
self.stubs.Set(versions, 'VERSIONS', VERSIONS)
def test_get_version_list(self):
req = webob.Request.blank('/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
versions = jsonutils.loads(res.body)["versions"]
expected = [
{
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
}],
},
]
self.assertEqual(versions, expected)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 302)
redirect_req = webob.Request.blank('/v2/')
self.assertEqual(res.location, redirect_req.url)
def test_get_version_2_detail(self):
req = webob.Request.blank('/v2/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {
"version": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
},
{
"rel": "describedby",
"type": "application/pdf",
"href": LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": LINKS['v2.0']['wadl'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/"
"vnd.openstack.compute+xml;version=2",
},
{
"base": "application/json",
"type": "application/"
"vnd.openstack.compute+json;version=2",
},
],
},
}
self.assertEqual(expected, version)
def test_get_version_2_detail_content_type(self):
req = webob.Request.blank('/')
req.accept = "application/json;version=2"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {
"version": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
},
{
"rel": "describedby",
"type": "application/pdf",
"href": LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": LINKS['v2.0']['wadl'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/"
"vnd.openstack.compute+xml;version=2",
},
{
"base": "application/json",
"type": "application/"
"vnd.openstack.compute+json;version=2",
},
],
},
}
self.assertEqual(expected, version)
def test_get_version_2_detail_xml(self):
req = webob.Request.blank('/v2/')
req.accept = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/xml")
version = etree.XML(res.body)
xmlutil.validate_schema(version, 'version')
expected = VERSIONS['v2.0']
self.assertTrue(version.xpath('/ns:version', namespaces=NS))
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.compare_media_types(media_types,
expected['media-types']))
for key in ['id', 'status', 'updated']:
self.assertEqual(version.get(key), expected[key])
links = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(links,
[{'rel': 'self', 'href': 'http://localhost/v2/'}]
+ expected['links']))
def test_get_version_list_xml(self):
req = webob.Request.blank('/')
req.accept = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/xml")
root = etree.XML(res.body)
print res.body
xmlutil.validate_schema(root, 'versions')
self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
versions = root.xpath('ns:version', namespaces=NS)
self.assertEqual(len(versions), 1)
for i, v in enumerate(['v2.0']):
version = versions[i]
expected = VERSIONS[v]
for key in ['id', 'status', 'updated']:
self.assertEqual(version.get(key), expected[key])
(link,) = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(link,
[{'rel': 'self', 'href': 'http://localhost/%s/' % v}]))
def test_get_version_2_detail_atom(self):
req = webob.Request.blank('/v2/')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual("application/atom+xml", res.content_type)
xmlutil.validate_schema(etree.XML(res.body), 'atom')
f = feedparser.parse(res.body)
self.assertEqual(f.feed.title, 'About This Version')
self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
self.assertEqual(f.feed.id, 'http://localhost/v2/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 1)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://localhost/v2/')
self.assertEqual(entry.title, 'Version v2.0')
self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
self.assertEqual(len(entry.links), 3)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(entry.links[0]['rel'], 'self')
self.assertEqual(entry.links[1], {
'href': LINKS['v2.0']['pdf'],
'type': 'application/pdf',
'rel': 'describedby'})
self.assertEqual(entry.links[2], {
'href': LINKS['v2.0']['wadl'],
'type': 'application/vnd.sun.wadl+xml',
'rel': 'describedby'})
def test_get_version_list_atom(self):
req = webob.Request.blank('/')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/atom+xml")
f = feedparser.parse(res.body)
self.assertEqual(f.feed.title, 'Available API Versions')
self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
self.assertEqual(f.feed.id, 'http://localhost/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://localhost/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 1)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://localhost/v2/')
self.assertEqual(entry.title, 'Version v2.0')
self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
self.assertEqual(len(entry.links), 1)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(entry.links[0]['rel'], 'self')
def test_multi_choice_image(self):
req = webob.Request.blank('/images/1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v2.0",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml"
";version=2"
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
def test_multi_choice_image_xml(self):
req = webob.Request.blank('/images/1')
req.accept = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/xml")
root = etree.XML(res.body)
self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
versions = root.xpath('ns:version', namespaces=NS)
self.assertEqual(len(versions), 1)
version = versions[0]
self.assertEqual(version.get('id'), 'v2.0')
self.assertEqual(version.get('status'), 'CURRENT')
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.compare_media_types(media_types,
VERSIONS['v2.0']['media-types']))
links = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(links,
[{'rel': 'self', 'href': 'http://localhost/v2/images/1'}]))
def test_multi_choice_server_atom(self):
"""
Make sure multi choice responses do not have content-type
application/atom+xml (should use default of json)
"""
req = webob.Request.blank('/servers')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
def test_multi_choice_server(self):
uuid = str(stdlib_uuid.uuid4())
req = webob.Request.blank('/servers/' + uuid)
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v2.0",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml"
";version=2"
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
class VersionsViewBuilderTests(test.TestCase):
def test_view_builder(self):
base_url = "http://example.org/"
version_data = {
"v3.2.1": {
"id": "3.2.1",
"status": "CURRENT",
"updated": "2011-07-18T11:30:00Z",
}
}
expected = {
"versions": [
{
"id": "3.2.1",
"status": "CURRENT",
"updated": "2011-07-18T11:30:00Z",
"links": [
{
"rel": "self",
"href": "http://example.org/v2/",
},
],
}
]
}
builder = views.versions.ViewBuilder(base_url)
output = builder.build_versions(version_data)
self.assertEqual(output, expected)
def test_generate_href(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href()
self.assertEqual(actual, expected)
class VersionsSerializerTests(test.TestCase):
def test_versions_list_xml_serializer(self):
versions_data = {
'versions': [
{
"id": "2.7",
"updated": "2011-07-18T11:30:00Z",
"status": "DEPRECATED",
"links": [
{
"rel": "self",
"href": "http://test/v2",
},
],
},
]
}
serializer = versions.VersionsTemplate()
response = serializer.serialize(versions_data)
root = etree.XML(response)
xmlutil.validate_schema(root, 'versions')
self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
version_elems = root.xpath('ns:version', namespaces=NS)
self.assertEqual(len(version_elems), 1)
version = version_elems[0]
self.assertEqual(version.get('id'), versions_data['versions'][0]['id'])
self.assertEqual(version.get('status'),
versions_data['versions'][0]['status'])
(link,) = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(link, [{
'rel': 'self',
'href': 'http://test/v2',
'type': 'application/atom+xml'}]))
def test_versions_multi_xml_serializer(self):
versions_data = {
'choices': [
{
"id": "2.7",
"updated": "2011-07-18T11:30:00Z",
"status": "DEPRECATED",
"media-types": VERSIONS['v2.0']['media-types'],
"links": [
{
"rel": "self",
"href": "http://test/v2/images",
},
],
},
]
}
serializer = versions.ChoicesTemplate()
response = serializer.serialize(versions_data)
root = etree.XML(response)
self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
(version,) = root.xpath('ns:version', namespaces=NS)
self.assertEqual(version.get('id'), versions_data['choices'][0]['id'])
self.assertEqual(version.get('status'),
versions_data['choices'][0]['status'])
media_types = list(version)[0]
self.assertEqual(media_types.tag.split('}')[1], "media-types")
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.compare_media_types(media_types,
versions_data['choices'][0]['media-types']))
(link,) = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(link,
versions_data['choices'][0]['links']))
def test_versions_list_atom_serializer(self):
versions_data = {
'versions': [
{
"id": "2.9.8",
"updated": "2011-07-20T11:40:00Z",
"status": "CURRENT",
"links": [
{
"rel": "self",
"href": "http://test/2.9.8",
},
],
},
]
}
serializer = versions.VersionsAtomSerializer()
response = serializer.serialize(versions_data)
f = feedparser.parse(response)
self.assertEqual(f.feed.title, 'Available API Versions')
self.assertEqual(f.feed.updated, '2011-07-20T11:40:00Z')
self.assertEqual(f.feed.id, 'http://test/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://test/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 1)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://test/2.9.8')
self.assertEqual(entry.title, 'Version 2.9.8')
self.assertEqual(entry.updated, '2011-07-20T11:40:00Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)')
self.assertEqual(len(entry.links), 1)
self.assertEqual(entry.links[0]['href'], 'http://test/2.9.8')
self.assertEqual(entry.links[0]['rel'], 'self')
def test_version_detail_atom_serializer(self):
versions_data = {
"version": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
},
{
"rel": "describedby",
"type": "application/pdf",
"href": LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": LINKS['v2.0']['wadl'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml"
";version=2",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2",
}
],
},
}
serializer = versions.VersionAtomSerializer()
response = serializer.serialize(versions_data)
f = feedparser.parse(response)
self.assertEqual(f.feed.title, 'About This Version')
self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
self.assertEqual(f.feed.id, 'http://localhost/v2/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 1)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://localhost/v2/')
self.assertEqual(entry.title, 'Version v2.0')
self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
self.assertEqual(len(entry.links), 3)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
self.assertEqual(entry.links[0]['rel'], 'self')
self.assertEqual(entry.links[1], {
'rel': 'describedby',
'type': 'application/pdf',
'href': LINKS['v2.0']['pdf']})
self.assertEqual(entry.links[2], {
'rel': 'describedby',
'type': 'application/vnd.sun.wadl+xml',
'href': LINKS['v2.0']['wadl'],
})
|
|
# Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import binascii
import pytest
from utils import assert_equal, assert_not_equal, read_crypto_test_vectors
from nacl.bindings import crypto_sign_PUBLICKEYBYTES, crypto_sign_SEEDBYTES
from nacl.encoding import Base64Encoder, HexEncoder
from nacl.exceptions import BadSignatureError
from nacl.signing import SignedMessage, SigningKey, VerifyKey
def tohex(b):
return binascii.hexlify(b).decode("ascii")
def ed25519_known_answers():
# Known answers taken from: http://ed25519.cr.yp.to/python/sign.input
# hex-encoded fields on each input line: sk||pk, pk, msg, signature||msg
# known answer fields: sk, pk, msg, signature, signed
DATA = "ed25519"
lines = read_crypto_test_vectors(DATA, delimiter=b":")
return [
(
x[0][:64], # secret key
x[1], # public key
x[2], # message
x[3][:128], # signature
x[3], # signed message
)
for x in lines
]
class TestSigningKey:
def test_initialize_with_generate(self):
SigningKey.generate()
def test_wrong_length(self):
with pytest.raises(ValueError):
SigningKey(b"")
def test_bytes(self):
k = SigningKey(b"\x00" * crypto_sign_SEEDBYTES)
assert bytes(k) == b"\x00" * crypto_sign_SEEDBYTES
def test_equal_keys_are_equal(self):
k1 = SigningKey(b"\x00" * crypto_sign_SEEDBYTES)
k2 = SigningKey(b"\x00" * crypto_sign_SEEDBYTES)
assert_equal(k1, k1)
assert_equal(k1, k2)
def test_equal_keys_have_equal_hashes(self):
k1 = SigningKey(b"\x00" * crypto_sign_SEEDBYTES)
k2 = SigningKey(b"\x00" * crypto_sign_SEEDBYTES)
assert hash(k1) == hash(k2)
assert id(k1) != id(k2)
@pytest.mark.parametrize(
"k2",
[
b"\x00" * crypto_sign_SEEDBYTES,
SigningKey(b"\x01" * crypto_sign_SEEDBYTES),
SigningKey(b"\x00" * (crypto_sign_SEEDBYTES - 1) + b"\x01"),
],
)
def test_different_keys_are_not_equal(self, k2):
k1 = SigningKey(b"\x00" * crypto_sign_SEEDBYTES)
assert_not_equal(k1, k2)
@pytest.mark.parametrize(
"seed",
[b"77076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c2a"],
)
def test_initialization_with_seed(self, seed):
SigningKey(seed, encoder=HexEncoder)
@pytest.mark.parametrize(
("seed", "_public_key", "message", "signature", "expected"),
ed25519_known_answers(),
)
def test_message_signing(
self, seed, _public_key, message, signature, expected
):
signing_key = SigningKey(
seed,
encoder=HexEncoder,
)
signed = signing_key.sign(
binascii.unhexlify(message),
encoder=HexEncoder,
)
assert signed == expected
assert signed.message == message
assert signed.signature == signature
class TestVerifyKey:
def test_wrong_length(self):
with pytest.raises(ValueError):
VerifyKey(b"")
def test_bytes(self):
k = VerifyKey(b"\x00" * crypto_sign_PUBLICKEYBYTES)
assert bytes(k) == b"\x00" * crypto_sign_PUBLICKEYBYTES
def test_equal_keys_are_equal(self):
k1 = VerifyKey(b"\x00" * crypto_sign_PUBLICKEYBYTES)
k2 = VerifyKey(b"\x00" * crypto_sign_PUBLICKEYBYTES)
assert_equal(k1, k1)
assert_equal(k1, k2)
def test_equal_keys_have_equal_hashes(self):
k1 = VerifyKey(b"\x00" * crypto_sign_PUBLICKEYBYTES)
k2 = VerifyKey(b"\x00" * crypto_sign_PUBLICKEYBYTES)
assert hash(k1) == hash(k2)
assert id(k1) != id(k2)
@pytest.mark.parametrize(
"k2",
[
b"\x00" * crypto_sign_PUBLICKEYBYTES,
VerifyKey(b"\x01" * crypto_sign_PUBLICKEYBYTES),
VerifyKey(b"\x00" * (crypto_sign_PUBLICKEYBYTES - 1) + b"\x01"),
],
)
def test_different_keys_are_not_equal(self, k2):
k1 = VerifyKey(b"\x00" * crypto_sign_PUBLICKEYBYTES)
assert_not_equal(k1, k2)
@pytest.mark.parametrize(
("_seed", "public_key", "message", "signature", "signed"),
ed25519_known_answers(),
)
def test_valid_signed_message(
self, _seed, public_key, message, signature, signed
):
key = VerifyKey(
public_key,
encoder=HexEncoder,
)
assert (
binascii.hexlify(
key.verify(signed, encoder=HexEncoder),
)
== message
)
assert (
binascii.hexlify(
key.verify(
message, HexEncoder.decode(signature), encoder=HexEncoder
),
)
== message
)
def test_invalid_signed_message(self):
skey = SigningKey.generate()
smessage = skey.sign(b"A Test Message!")
signature, message = smessage.signature, b"A Forged Test Message!"
# Small sanity check
assert skey.verify_key.verify(smessage)
with pytest.raises(BadSignatureError):
skey.verify_key.verify(message, signature)
with pytest.raises(BadSignatureError):
forged = SignedMessage(signature + message)
skey.verify_key.verify(forged)
def test_invalid_signature_length(self):
skey = SigningKey.generate()
message = b"hello"
signature = skey.sign(message).signature
# Sanity checks
assert skey.verify_key.verify(message, signature)
assert skey.verify_key.verify(signature + message)
with pytest.raises(ValueError):
skey.verify_key.verify(message, b"")
with pytest.raises(ValueError):
skey.verify_key.verify(message, signature * 2)
with pytest.raises(ValueError):
skey.verify_key.verify(signature + message, b"")
def test_base64_smessage_with_detached_sig_matches_with_attached_sig(self):
sk = SigningKey.generate()
vk = sk.verify_key
smsg = sk.sign(b"Hello World in base64", encoder=Base64Encoder)
msg = smsg.message
b64sig = smsg.signature
sig = Base64Encoder.decode(b64sig)
assert vk.verify(msg, sig, encoder=Base64Encoder) == vk.verify(
smsg, encoder=Base64Encoder
)
assert Base64Encoder.decode(msg) == b"Hello World in base64"
def test_hex_smessage_with_detached_sig_matches_with_attached_sig(self):
sk = SigningKey.generate()
vk = sk.verify_key
smsg = sk.sign(b"Hello World in hex", encoder=HexEncoder)
msg = smsg.message
hexsig = smsg.signature
sig = HexEncoder.decode(hexsig)
assert vk.verify(msg, sig, encoder=HexEncoder) == vk.verify(
smsg, encoder=HexEncoder
)
assert HexEncoder.decode(msg) == b"Hello World in hex"
def test_key_conversion(self):
keypair_seed = (
b"421151a459faeade3d247115f94aedae"
b"42318124095afabe4d1451a559faedee"
)
signing_key = SigningKey(binascii.unhexlify(keypair_seed))
verify_key = signing_key.verify_key
private_key = bytes(signing_key.to_curve25519_private_key())
public_key = bytes(verify_key.to_curve25519_public_key())
assert tohex(private_key) == (
"8052030376d47112be7f73ed7a019293"
"dd12ad910b654455798b4667d73de166"
)
assert tohex(public_key) == (
"f1814f0e8ff1043d8a44d25babff3ced"
"cae6c22c3edaa48f857ae70de2baae50"
)
def check_type_error(expected, f, *args):
with pytest.raises(TypeError) as e:
f(*args)
assert expected in str(e.value)
def test_wrong_types():
sk = SigningKey.generate()
check_type_error(
"SigningKey must be created from a 32 byte seed", SigningKey, 12
)
check_type_error(
"SigningKey must be created from a 32 byte seed", SigningKey, sk
)
check_type_error(
"SigningKey must be created from a 32 byte seed",
SigningKey,
sk.verify_key,
)
check_type_error("VerifyKey must be created from 32 bytes", VerifyKey, 13)
check_type_error("VerifyKey must be created from 32 bytes", VerifyKey, sk)
check_type_error(
"VerifyKey must be created from 32 bytes", VerifyKey, sk.verify_key
)
def verify_detached_signature(x):
sk.verify_key.verify(b"", x)
check_type_error(
"Verification signature must be created from 64 bytes",
verify_detached_signature,
13,
)
check_type_error(
"Verification signature must be created from 64 bytes",
verify_detached_signature,
sk,
)
check_type_error(
"Verification signature must be created from 64 bytes",
verify_detached_signature,
sk.verify_key,
)
|
|
# Gay Porn Collector
import cookielib, cgi, re, os, platform, json, urllib
PLUGIN_LOG_TITLE='Gay Porn Collector' # Log Title
VERSION_NO = '2017.07.26.0'
# Delay used when requesting HTML, may be good to have to prevent being
# banned from the site
REQUEST_DELAY = 0
# URLS
BASE_URL='http://www.gayporncollector.com'
BASE_VIDEO_DETAILS_URL = BASE_URL + '%s'
# Example Search URL
# http://www.gayporncollector.com/wp-json/milkshake/v2/pornmovies/?movie_title=%23helix:%20Twink%20Confessions%202
BASE_SEARCH_URL_MOVIES = 'http://www.gayporncollector.com/wp-json/milkshake/v2/pornmovies/?movie_title='
# http://www.gayporncollector.com/wp-json/milkshake/v2/pornscenes/?scene_title=Wet%20&%20Wild%20With%20Blake%20Mitchell
BASE_SEARCH_URL_SCENES = 'http://www.gayporncollector.com/wp-json/milkshake/v2/pornscenes/'
# http://www.gayporncollector.com/wp-json/milkshake/v2/pornscenes/3620
BASE_SEARCH_URL_STARS = 'http://www.gayporncollector.com/wp-json/milkshake/v2/pornstars/'
# File names to match for this agent
file_name_pattern = re.compile(Prefs['regex'])
#replace # with %27 and ' with %23
def Start():
HTTP.CacheTime = CACHE_1WEEK
HTTP.Headers['User-agent'] = 'Mozilla/4.0 (compatible; MSIE 8.0; ' \
'Windows NT 6.2; Trident/4.0; SLCC2; .NET CLR 2.0.50727; ' \
'.NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)'
class GayPornCollector(Agent.Movies):
name = 'Gay Porn Collector'
languages = [Locale.Language.NoLanguage, Locale.Language.English]
fallback_agent = False
primary_provider = False
contributes_to = ['com.plexapp.agents.cockporn']
def Log(self, message, *args):
if Prefs['debug']:
Log(PLUGIN_LOG_TITLE + ' - ' + message, *args)
def intTest(self, s):
try:
int(s)
return int(s)
except ValueError:
return False
def search(self, results, media, lang, manual):
self.Log('-----------------------------------------------------------------------')
self.Log('SEARCH CALLED v.%s', VERSION_NO)
self.Log('SEARCH - Platform: %s %s', platform.system(), platform.release())
self.Log('SEARCH - media.title - %s', media.title)
self.Log('SEARCH - media.items[0].parts[0].file - %s', media.items[0].parts[0].file)
self.Log('SEARCH - media.primary_metadata.title - %s', media.primary_metadata.title)
self.Log('SEARCH - media.items - %s', media.items)
self.Log('SEARCH - media.filename - %s', media.filename)
self.Log('SEARCH - lang - %s', lang)
self.Log('SEARCH - manual - %s', manual)
self.Log('SEARCH - Prefs->cover - %s', Prefs['cover'])
self.Log('SEARCH - Prefs->folders - %s', Prefs['folders'])
self.Log('SEARCH - Prefs->regex - %s', Prefs['regex'])
if not media.items[0].parts[0].file:
return
path_and_file = media.items[0].parts[0].file.lower()
self.Log('SEARCH - File Path: %s', path_and_file)
(file_dir, basename) = os.path.split(os.path.splitext(path_and_file)[0])
final_dir = os.path.split(file_dir)[1]
self.Log('SEARCH - Enclosing Folder: %s', final_dir)
if Prefs['folders'] != "*":
folder_list = re.split(',\s*', Prefs['folders'].lower())
if final_dir not in folder_list:
self.Log('SEARCH - Skipping %s because the folder %s is not in the acceptable folders list: %s', basename, final_dir, ','.join(folder_list))
return
m = file_name_pattern.search(basename)
if not m:
self.Log('SEARCH - Skipping %s because the file name is not in the expected format.', basename)
return
self.Log('SEARCH - File Name: %s', basename)
groups = m.groupdict()
scene_url_name = groups['clip_name']
scene_url = BASE_SEARCH_URL_SCENES + '?scene_title=' + urllib.quote(scene_url_name)
self.Log('SEARCH - Scene URL: %s', scene_url)
file_studio = final_dir #used in if statment for studio name
self.Log('SEARCH - final_dir: %s', final_dir)
self.Log('SEARCH - This is a scene: True')
file_name = basename.lower() #Sets string to lower.
file_name = re.sub('\(([^\)]+)\)', '', file_name) #Removes anything inside of () and the () themselves.
file_name = file_name.lstrip(' ') #Removes white spaces on the left end.
file_name = file_name.lstrip('- ') #Removes white spaces on the left end.
file_name = file_name.rstrip(' ') #Removes white spaces on the right end.
response = urllib.urlopen(scene_url)
search_results = json.loads(response.read())
score=10
if 'message' in search_results:
self.Log('SEARCH - Skipping %s because the results are empty.', basename)
return
self.Log('SEARCH - results size exact match: %s', len(search_results))
for result in search_results:
try:
studio = result['related_porn_studio'][0]['porn_studio_name']
self.Log('SEARCH - studio: %s', studio)
except:
studio = 'empty'
self.Log('SEARCH - studios: Empty')
pass
video_title = result['title']
video_title = video_title.lstrip(' ') #Removes white spaces on the left end.
video_title = video_title.rstrip(' ') #Removes white spaces on the right end.
video_title = video_title.replace(':', '')
if studio.lower() == file_studio.lower() and video_title.lower() == file_name.lower():
self.Log('SEARCH - video title: %s', video_title)
self.Log('SEARCH - video url: %s', result['link'])
self.Log('SEARCH - Exact Match "' + file_name.lower() + '" == "%s"', video_title.lower())
self.Log('SEARCH - Studio Match "' + studio.lower() + '" == "%s"', file_studio.lower())
results.Append(MetadataSearchResult(id = str(result['ID']), name = video_title, score = 100, lang = lang))
return
def update(self, metadata, media, lang, force=False):
self.Log('UPDATE CALLED')
if not media.items[0].parts[0].file:
return
file_path = media.items[0].parts[0].file
self.Log('UPDATE - File Path: %s', file_path)
self.Log('UPDATE - Video URL: %s', metadata.id)
url = BASE_SEARCH_URL_SCENES + metadata.id
# Fetch HTML.
response = urllib.urlopen(url)
results = json.loads(response.read())
# Set tagline to URL.
metadata.tagline = results['link']
# Set video title.
video_title = results['title']
self.Log('UPDATE - video_title: "%s"', video_title)
metadata.title = video_title
metadata.content_rating = 'X'
# Try to get and process the director posters.
valid_image_poster_names = list()
try:
self.Log('UPDATE - video_image_list: "%s"', results['poster'])
poster_url = results['poster']['guid']
valid_image_poster_names.append(poster_url)
if poster_url not in metadata.posters:
metadata.posters[poster_url]=Proxy.Preview(HTTP.Request(poster_url))
except Exception as e:
self.Log('UPDATE - Error getting posters: %s', e)
pass
# Try to get and process the background art.
valid_image_background_names = list()
try:
i = 0
video_image_list = results['gallery']
self.Log('UPDATE - video_image_list: "%s"', video_image_list)
coverPrefs = Prefs['cover']
for image in video_image_list:
if i <= (self.intTest(coverPrefs)-1) or coverPrefs == "all available":
i = i + 1
art_url = image['guid']
valid_image_background_names.append(art_url)
if art_url not in metadata.art:
try:
metadata.art[art_url]=Proxy.Preview(HTTP.Request(art_url), sort_order = i)
except: pass
except Exception as e:
self.Log('UPDATE - Error getting posters: %s', e)
pass
# Try to get description text.
try:
about_text=results['scene_description']
self.Log('UPDATE - About Text %s', about_text)
metadata.summary=about_text
except Exception as e:
self.Log('UPDATE - Error getting description text: %s', e)
pass
# Try to get and process the release date.
try:
rd=results['release_date']
self.Log('UPDATE - Release Date: %s', rd)
metadata.originally_available_at = Datetime.ParseDate(rd).date()
metadata.year = metadata.originally_available_at.year
except Exception as e:
self.Log('UPDATE - Error getting release date: %s', e)
pass
# Try to get and process the video genres.
try:
metadata.genres.clear()
genres = results['porn_scene_genres']
self.Log('UPDATE - video_genres count from scene: "%s"', len(genres))
self.Log('UPDATE - video_genres from scene: "%s"', genres)
for genre in genres:
metadata.genres.add(genre['name'])
except Exception as e:
self.Log('UPDATE - Error getting video genres: %s', e)
pass
# Crew.
# Try to get and process the director.
try:
metadata.directors.clear()
director = metadata.directors.new()
director.name = results['scene_director']
self.Log('UPDATE - director: "%s"', director)
except Exception as e:
self.Log('UPDATE - Error getting director: %s', e)
pass
# Try to get and process the video cast.
try:
metadata.roles.clear()
casts = results['related_porn_stars']
self.Log('UPDATE - cast scene count: "%s"', len(casts))
if len(casts) > 0:
roleCount = {}
for cast in casts:
cname = cast['porn_star_name']
self.Log('UPDATE - cast: "%s"', cname)
role = metadata.roles.new()
role.name = cname
try:
c_id = cast['porn_star_id']
self.Log('UPDATE - cast id: "%s"', c_id)
url = BASE_SEARCH_URL_STARS + c_id
# Fetch HTML.
response = urllib.urlopen(url)
star = json.loads(response.read())
role.photo = star['poster']['guid']
roleCount[star['role']] = 1 + roleCount.get(star['role'],0)
if roleCount[star['role']] != 1:
role.role = star['role'] + " " + str(roleCount[star['role']])
else:
role.role = star['role']
except Exception as e:
self.Log('UPDATE - Error getting cast: %s', e)
pass
except Exception as e:
self.Log('UPDATE - Error getting cast: %s', e)
pass
# Try to get and process the studio name.
try:
studio = results['related_porn_studio'][0]['porn_studio_name']
self.Log('UPDATE - studio: "%s"', studio)
metadata.studio=studio
except Exception as e:
self.Log('UPDATE - Error getting studio name: %s', e)
pass
# Try to get and process the country.
try:
metadata.countries.clear()
country_name = results['related_porn_studio'][0]['porn_studio_country']
metadata.countries.add(country_name)
self.Log('UPDATE - country: "%s"', country_name)
except Exception as e:
self.Log('UPDATE - Error getting country name: %s', e)
pass
# Try to get and process the country.
try:
metadata.collections.clear()
movie_names = results['related_porn_movie']
if movie_names is not None:
self.Log('UPDATE - collections count: "%s"', len(movie_names))
if len(movie_names) > 0:
for movie in movie_names:
movie_name = movie['porn_movie_title']
self.Log('UPDATE - collection: "%s"', movie_name)
collection = metadata.collections.add(movie_name)
except Exception as e:
self.Log('UPDATE - Error getting collections name: %s', e)
pass
metadata.art.validate_keys(valid_image_background_names)
metadata.posters.validate_keys(valid_image_poster_names)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Csiszar f-Divergence and helpers.
@@amari_alpha
@@arithmetic_geometric
@@chi_square
@@csiszar_vimco
@@dual_csiszar_function
@@jeffreys
@@jensen_shannon
@@kl_forward
@@kl_reverse
@@log1p_abs
@@modified_gan
@@monte_carlo_csiszar_f_divergence
@@pearson
@@squared_hellinger
@@symmetrized_csiszar_function
@@total_variation
@@triangular
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.bayesflow.python.ops import monte_carlo_impl as monte_carlo
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
def amari_alpha(logu, alpha=1., self_normalized=False, name=None):
"""The Amari-alpha Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Amari-alpha Csiszar-function is:
```none
f(u) = { -log(u) + (u - 1), alpha = 0
{ u log(u) - (u - 1), alpha = 1
{ [(u**alpha - 1) - alpha (u - 1)] / (alpha (alpha - 1)), otherwise
```
When `self_normalized = False` the `(u - 1)` terms are omitted.
Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
non-log-space calculations and may therefore be numerically unstable for
`|logu| >> 0`.
For more information, see:
A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences:
Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp.
1532-1568, 2010.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
alpha: `float`-like Python scalar. (See Mathematical Details for meaning.)
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
amari_alpha_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
Raises:
TypeError: if `alpha` is `None` or a `Tensor`.
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with ops.name_scope(name, "amari_alpha", [logu]):
if alpha is None or contrib_framework.is_tensor(alpha):
raise TypeError("`alpha` cannot be `None` or `Tensor` type.")
if self_normalized is None or contrib_framework.is_tensor(self_normalized):
raise TypeError("`self_normalized` cannot be `None` or `Tensor` type.")
logu = ops.convert_to_tensor(logu, name="logu")
if alpha == 0.:
f = -logu
elif alpha == 1.:
f = math_ops.exp(logu) * logu
else:
f = math_ops.expm1(alpha * logu) / (alpha * (alpha - 1.))
if not self_normalized:
return f
if alpha == 0.:
return f + math_ops.expm1(logu)
elif alpha == 1.:
return f - math_ops.expm1(logu)
else:
return f - math_ops.expm1(logu) / (alpha - 1.)
def kl_reverse(logu, self_normalized=False, name=None):
"""The reverse Kullback-Leibler Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the KL-reverse Csiszar-function is:
```none
f(u) = -log(u) + (u - 1)
```
When `self_normalized = False` the `(u - 1)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[q, p]
```
The KL is "reverse" because in maximum likelihood we think of minimizing `q`
as in `KL[p, q]`.
Warning: when self_normalized = True` this function makes non-log-space
calculations and may therefore be numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
kl_reverse_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
Raises:
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with ops.name_scope(name, "kl_reverse", [logu]):
return amari_alpha(logu, alpha=0., self_normalized=self_normalized)
def kl_forward(logu, self_normalized=False, name=None):
"""The forward Kullback-Leibler Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the KL-forward Csiszar-function is:
```none
f(u) = u log(u) - (u - 1)
```
When `self_normalized = False` the `(u - 1)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[p, q]
```
The KL is "forward" because in maximum likelihood we think of minimizing `q`
as in `KL[p, q]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
kl_forward_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
Raises:
TypeError: if `self_normalized` is `None` or a `Tensor`.
"""
with ops.name_scope(name, "kl_forward", [logu]):
return amari_alpha(logu, alpha=1., self_normalized=self_normalized)
def jensen_shannon(logu, self_normalized=False, name=None):
"""The Jensen-Shannon Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:
```none
f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
```
When `self_normalized = False` the `(u + 1) log(2)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[p, m] + KL[q, m]
m(x) = 0.5 p(x) + 0.5 q(x)
```
In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
f-Divergence.
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
For more information, see:
Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
Inf. Th., 37, 145-151, 1991.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
jensen_shannon_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
"""
with ops.name_scope(name, "jensen_shannon", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
npdt = logu.dtype.as_numpy_dtype
y = nn_ops.softplus(logu)
if self_normalized:
y -= np.log(2).astype(npdt)
return math_ops.exp(logu) * logu - (1. + math_ops.exp(logu)) * y
def arithmetic_geometric(logu, self_normalized=False, name=None):
"""The Arithmetic-Geometric Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True` the Arithmetic-Geometric Csiszar-function is:
```none
f(u) = (1 + u) log( (1 + u) / sqrt(u) ) - (1 + u) log(2)
```
When `self_normalized = False` the `(1 + u) log(2)` term is omitted.
Observe that as an f-Divergence, this Csiszar-function implies:
```none
D_f[p, q] = KL[m, p] + KL[m, q]
m(x) = 0.5 p(x) + 0.5 q(x)
```
In a sense, this divergence is the "reverse" of the Jensen-Shannon
f-Divergence.
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
arithmetic_geometric_of_u: `float`-like `Tensor` of the
Csiszar-function evaluated at `u = exp(logu)`.
"""
with ops.name_scope(name, "arithmetic_geometric", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
y = nn_ops.softplus(logu) - 0.5 * logu
if self_normalized:
y -= np.log(2.).astype(logu.dtype.as_numpy_dtype)
return (1. + math_ops.exp(logu)) * y
def total_variation(logu, name=None):
"""The Total Variation Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Total-Variation Csiszar-function is:
```none
f(u) = 0.5 |u - 1|
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
total_variation_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
"""
with ops.name_scope(name, "total_variation", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
return 0.5 * math_ops.abs(math_ops.expm1(logu))
def pearson(logu, name=None):
"""The Pearson Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Pearson Csiszar-function is:
```none
f(u) = (u - 1)**2
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pearson_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at
`u = exp(logu)`.
"""
with ops.name_scope(name, "pearson", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
return math_ops.square(math_ops.expm1(logu))
def squared_hellinger(logu, name=None):
"""The Squared-Hellinger Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Squared-Hellinger Csiszar-function is:
```none
f(u) = (sqrt(u) - 1)**2
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
squared_hellinger_of_u: `float`-like `Tensor` of the Csiszar-function
evaluated at `u = exp(logu)`.
"""
with ops.name_scope(name, "squared_hellinger", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
return pearson(0.5 * logu)
def triangular(logu, name=None):
"""The Triangular Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Triangular Csiszar-function is:
```none
f(u) = (u - 1)**2 / (1 + u)
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
triangular_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with ops.name_scope(name, "triangular", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
return pearson(logu) / (1. + math_ops.exp(logu))
def t_power(logu, t, self_normalized=False, name=None):
"""The T-Power Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True` the T-Power Csiszar-function is:
```none
f(u) = s [ u**t - 1 - t(u - 1) ]
s = { -1 0 < t < 1
{ +1 otherwise
```
When `self_normalized = False` the `- t(u - 1)` term is omitted.
This is similar to the `amari_alpha` Csiszar-function, with the associated
divergence being the same up to factors depending only on `t`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
t: `Tensor` of same `dtype` as `logu` and broadcastable shape.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with ops.name_scope(name, "t_power", [logu, t]):
logu = ops.convert_to_tensor(logu, name="logu")
t = ops.convert_to_tensor(t, dtype=logu.dtype.base_dtype, name="t")
fu = math_ops.expm1(t * logu)
if self_normalized:
fu -= t * math_ops.expm1(logu)
fu *= array_ops.where(math_ops.logical_and(0. < t, t < 1.),
-array_ops.ones_like(t),
array_ops.ones_like(t))
return fu
def log1p_abs(logu, name=None):
"""The log1p-abs Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Log1p-Abs Csiszar-function is:
```none
f(u) = u**(sign(u-1)) - 1
```
This function is so-named because it was invented from the following recipe.
Choose a convex function g such that g(0)=0 and solve for f:
```none
log(1 + f(u)) = g(log(u)).
<=>
f(u) = exp(g(log(u))) - 1
```
That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis
is `log`-domain.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
log1p_abs_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with ops.name_scope(name, "log1p_abs", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
return math_ops.expm1(math_ops.abs(logu))
def jeffreys(logu, name=None):
"""The Jeffreys Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Jeffreys Csiszar-function is:
```none
f(u) = 0.5 ( u log(u) - log(u) )
= 0.5 kl_forward + 0.5 kl_reverse
= symmetrized_csiszar_function(kl_reverse)
= symmetrized_csiszar_function(kl_forward)
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with ops.name_scope(name, "jeffreys", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
return 0.5 * math_ops.expm1(logu) * logu
def chi_square(logu, name=None):
"""The chi-Square Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Chi-square Csiszar-function is:
```none
f(u) = u**2 - 1
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with ops.name_scope(name, "chi_square", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
return math_ops.expm1(2. * logu)
def modified_gan(logu, self_normalized=False, name=None):
"""The Modified-GAN Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
When `self_normalized = True` the modified-GAN (Generative/Adversarial
Network) Csiszar-function is:
```none
f(u) = log(1 + u) - log(u) + 0.5 (u - 1)
```
When `self_normalized = False` the `0.5 (u - 1)` is omitted.
The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with
`self_normalized = False`).
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
`f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
when `p, q` are unnormalized measures.
name: Python `str` name prefixed to Ops created by this function.
Returns:
chi_square_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with ops.name_scope(name, "chi_square", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
y = nn_ops.softplus(logu) - logu
if self_normalized:
y += 0.5 * math_ops.expm1(logu)
return y
def dual_csiszar_function(logu, csiszar_function, name=None):
"""Calculates the dual Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Csiszar-dual is defined as:
```none
f^*(u) = u f(1 / u)
```
where `f` is some other Csiszar-function.
For example, the dual of `kl_reverse` is `kl_forward`, i.e.,
```none
f(u) = -log(u)
f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u)
```
The dual of the dual is the original function:
```none
f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u)
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
csiszar_function: Python `callable` representing a Csiszar-function over
log-domain.
name: Python `str` name prefixed to Ops created by this function.
Returns:
dual_f_of_u: `float`-like `Tensor` of the result of calculating the dual of
`f` at `u = exp(logu)`.
"""
with ops.name_scope(name, "dual_csiszar_function", [logu]):
return math_ops.exp(logu) * csiszar_function(-logu)
def symmetrized_csiszar_function(logu, csiszar_function, name=None):
"""Symmetrizes a Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The symmetrized Csiszar-function is defined as:
```none
f_g(u) = 0.5 g(u) + 0.5 u g (1 / u)
```
where `g` is some other Csiszar-function.
We say the function is "symmetrized" because:
```none
D_{f_g}[p, q] = D_{f_g}[q, p]
```
for all `p << >> q` (i.e., `support(p) = support(q)`).
There exists alternatives for symmetrizing a Csiszar-function. For example,
```none
f_g(u) = max(f(u), f^*(u)),
```
where `f^*` is the dual Csiszar-function, also implies a symmetric
f-Divergence.
Example:
When either of the following functions are symmetrized, we obtain the
Jensen-Shannon Csiszar-function, i.e.,
```none
g(u) = -log(u) - (1 + u) log((1 + u) / 2) + u - 1
h(u) = log(4) + 2 u log(u / (1 + u))
```
implies,
```none
f_g(u) = f_h(u) = u log(u) - (1 + u) log((1 + u) / 2)
= jensen_shannon(log(u)).
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
csiszar_function: Python `callable` representing a Csiszar-function over
log-domain.
name: Python `str` name prefixed to Ops created by this function.
Returns:
symmetrized_g_of_u: `float`-like `Tensor` of the result of applying the
symmetrization of `g` evaluated at `u = exp(logu)`.
"""
with ops.name_scope(name, "symmetrized_csiszar_function", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
return 0.5 * (csiszar_function(logu)
+ dual_csiszar_function(logu, csiszar_function))
def monte_carlo_csiszar_f_divergence(
f,
p_log_prob,
q,
num_draws,
use_reparametrization=None,
seed=None,
name=None):
"""Monte-Carlo approximation of the Csiszar f-Divergence.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Csiszar f-Divergence for Csiszar-function f is given by:
```none
D_f[p(X), q(X)] := E_{q(X)}[ f( p(X) / q(X) ) ]
~= m**-1 sum_j^m f( p(x_j) / q(x_j) ),
where x_j ~iid q(X)
```
Tricks: Reparameterization and Score-Gradient
When q is "reparameterized", i.e., a diffeomorphic transformation of a
parameterless distribution (e.g.,
`Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and
expectation, i.e.,
`grad[Avg{ s_i : i=1...n }] = Avg{ grad[s_i] : i=1...n }` where `S_n=Avg{s_i}`
and `s_i = f(x_i), x_i ~iid q(X)`.
However, if q is not reparameterized, TensorFlow's gradient will be incorrect
since the chain-rule stops at samples of unreparameterized distributions. In
this circumstance using the Score-Gradient trick results in an unbiased
gradient, i.e.,
```none
grad[ E_q[f(X)] ]
= grad[ int dx q(x) f(x) ]
= int dx grad[ q(x) f(x) ]
= int dx [ q'(x) f(x) + q(x) f'(x) ]
= int dx q(x) [q'(x) / q(x) f(x) + f'(x) ]
= int dx q(x) grad[ f(x) q(x) / stop_grad[q(x)] ]
= E_q[ grad[ f(x) q(x) / stop_grad[q(x)] ] ]
```
Unless `q.reparameterization_type != distribution.FULLY_REPARAMETERIZED` it is
usually preferable to set `use_reparametrization = True`.
Example Application:
The Csiszar f-Divergence is a useful framework for variational inference.
I.e., observe that,
```none
f(p(x)) = f( E_{q(Z | x)}[ p(x, Z) / q(Z | x) ] )
<= E_{q(Z | x)}[ f( p(x, Z) / q(Z | x) ) ]
:= D_f[p(x, Z), q(Z | x)]
```
The inequality follows from the fact that the "perspective" of `f`, i.e.,
`(s, t) |-> t f(s / t))`, is convex in `(s, t)` when `s/t in domain(f)` and
`t` is a real. Since the above framework includes the popular Evidence Lower
BOund (ELBO) as a special case, i.e., `f(u) = -log(u)`, we call this framework
"Evidence Divergence Bound Optimization" (EDBO).
Args:
f: Python `callable` representing a Csiszar-function in log-space, i.e.,
takes `p_log_prob(q_samples) - q.log_prob(q_samples)`.
p_log_prob: Python `callable` taking (a batch of) samples from `q` and
returning the the natural-log of the probability under distribution `p`.
(In variational inference `p` is the joint distribution.)
q: `tf.Distribution`-like instance; must implement:
`reparameterization_type`, `sample(n, seed)`, and `log_prob(x)`.
(In variational inference `q` is the approximate posterior distribution.)
num_draws: Integer scalar number of draws used to approximate the
f-Divergence expectation.
use_reparametrization: Python `bool`. When `None` (the default),
automatically set to:
`q.reparameterization_type == distribution.FULLY_REPARAMETERIZED`.
When `True` uses the standard Monte-Carlo average. When `False` uses the
score-gradient trick. (See above for details.) When `False`, consider
using `csiszar_vimco`.
seed: Python `int` seed for `q.sample`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
monte_carlo_csiszar_f_divergence: `float`-like `Tensor` Monte Carlo
approximation of the Csiszar f-Divergence.
Raises:
ValueError: if `q` is not a reparameterized distribution and
`use_reparametrization = True`. A distribution `q` is said to be
"reparameterized" when its samples are generated by transforming the
samples of another distribution which does not depend on the
parameterization of `q`. This property ensures the gradient (with respect
to parameters) is valid.
TypeError: if `p_log_prob` is not a Python `callable`.
"""
with ops.name_scope(name, "monte_carlo_csiszar_f_divergence", [num_draws]):
if use_reparametrization is None:
use_reparametrization = (q.reparameterization_type
== distribution.FULLY_REPARAMETERIZED)
elif (use_reparametrization and
q.reparameterization_type != distribution.FULLY_REPARAMETERIZED):
# TODO(jvdillon): Consider only raising an exception if the gradient is
# requested.
raise ValueError(
"Distribution `q` must be reparameterized, i.e., a diffeomorphic "
"transformation of a parameterless distribution. (Otherwise this "
"function has a biased gradient.)")
if not callable(p_log_prob):
raise TypeError("`p_log_prob` must be a Python `callable` function.")
return monte_carlo.expectation(
f=lambda q_samples: f(p_log_prob(q_samples) - q.log_prob(q_samples)),
samples=q.sample(num_draws, seed=seed),
log_prob=q.log_prob, # Only used if use_reparametrization=False.
use_reparametrization=use_reparametrization)
def csiszar_vimco(f,
p_log_prob,
q,
num_draws,
num_batch_draws=1,
seed=None,
name=None):
"""Use VIMCO to lower the variance of gradient[csiszar_function(Avg(logu))].
This function generalizes "Variational Inference for Monte Carlo Objectives"
(VIMCO), i.e., https://arxiv.org/abs/1602.06725, to Csiszar f-Divergences.
Note: if `q.reparameterization_type = distribution.FULLY_REPARAMETERIZED`,
consider using `monte_carlo_csiszar_f_divergence`.
The VIMCO loss is:
```none
vimco = f(Avg{logu[i] : i=0,...,m-1})
where,
logu[i] = log( p(x, h[i]) / q(h[i] | x) )
h[i] iid~ q(H | x)
```
Interestingly, the VIMCO gradient is not the naive gradient of `vimco`.
Rather, it is characterized by:
```none
grad[vimco] - variance_reducing_term
where,
variance_reducing_term = Sum{ grad[log q(h[i] | x)] *
(vimco - f(log Avg{h[j;i] : j=0,...,m-1}))
: i=0, ..., m-1 }
h[j;i] = { u[j] j!=i
{ GeometricAverage{ u[k] : k!=i} j==i
```
(We omitted `stop_gradient` for brevity. See implementation for more details.)
The `Avg{h[j;i] : j}` term is a kind of "swap-out average" where the `i`-th
element has been replaced by the leave-`i`-out Geometric-average.
Args:
f: Python `callable` representing a Csiszar-function in log-space.
p_log_prob: Python `callable` representing the natural-log of the
probability under distribution `p`. (In variational inference `p` is the
joint distribution.)
q: `tf.Distribution`-like instance; must implement: `sample(n, seed)`, and
`log_prob(x)`. (In variational inference `q` is the approximate posterior
distribution.)
num_draws: Integer scalar number of draws used to approximate the
f-Divergence expectation.
num_batch_draws: Integer scalar number of draws used to approximate the
f-Divergence expectation.
seed: Python `int` seed for `q.sample`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
vimco: The Csiszar f-Divergence generalized VIMCO objective.
Raises:
ValueError: if `num_draws < 2`.
"""
with ops.name_scope(name, "csiszar_vimco", [num_draws, num_batch_draws]):
if num_draws < 2:
raise ValueError("Must specify num_draws > 1.")
stop = array_ops.stop_gradient # For readability.
x = stop(q.sample(sample_shape=[num_draws, num_batch_draws],
seed=seed))
logqx = q.log_prob(x)
logu = p_log_prob(x) - logqx
f_log_avg_u, f_log_sooavg_u = [f(r) for r in csiszar_vimco_helper(logu)]
dotprod = math_ops.reduce_sum(
logqx * stop(f_log_avg_u - f_log_sooavg_u),
axis=0) # Sum over iid samples.
# We now rewrite f_log_avg_u so that:
# `grad[f_log_avg_u] := grad[f_log_avg_u + dotprod]`.
# To achieve this, we use a trick that
# `f(x) - stop(f(x)) == zeros_like(f(x))`
# but its gradient is grad[f(x)].
# Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence
# this trick loses no precision. For more discussion regarding the relevant
# portions of the IEEE754 standard, see the StackOverflow question,
# "Is there a floating point value of x, for which x-x == 0 is false?"
# http://stackoverflow.com/q/2686644
f_log_avg_u += dotprod - stop(dotprod) # Add zeros_like(dot_prod).
return math_ops.reduce_mean(f_log_avg_u, axis=0) # Avg over batches.
def csiszar_vimco_helper(logu, name=None):
"""Helper to `csiszar_vimco`; computes `log_avg_u`, `log_sooavg_u`.
`axis = 0` of `logu` is presumed to correspond to iid samples from `q`, i.e.,
```none
logu[j] = log(u[j])
u[j] = p(x, h[j]) / q(h[j] | x)
h[j] iid~ q(H | x)
```
Args:
logu: Floating-type `Tensor` representing `log(p(x, h) / q(h | x))`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
log_avg_u: `logu.dtype` `Tensor` corresponding to the natural-log of the
average of `u`.
log_sooavg_u: `logu.dtype` `Tensor` characterized by the natural-log of the
average of `u`` except that the average swaps-out `u[i]` for the
leave-`i`-out Geometric-average, i.e.,
```none
log_sooavg_u[i] = log(Avg{h[j ; i] : j=0, ..., m-1})
h[j ; i] = { u[j] j!=i
{ GeometricAverage{u[k] : k != i} j==i
```
"""
with ops.name_scope(name, "csiszar_vimco_helper", [logu]):
logu = ops.convert_to_tensor(logu, name="logu")
n = logu.shape.with_rank_at_least(1)[0].value
if n is None:
n = array_ops.shape(logu)[0]
log_n = math_ops.log(math_ops.cast(n, dtype=logu.dtype))
nm1 = math_ops.cast(n - 1, dtype=logu.dtype)
else:
log_n = np.log(n).astype(logu.dtype.as_numpy_dtype)
nm1 = np.asarray(n - 1, dtype=logu.dtype.as_numpy_dtype)
# Throughout we reduce across axis=0 since this is presumed to be iid
# samples.
log_sum_u = math_ops.reduce_logsumexp(logu, axis=0)
# log_loosum_u[i] =
# = logsumexp(logu[j] : j != i)
# = log( exp(logsumexp(logu)) - exp(logu[i]) )
# = log( exp(logsumexp(logu - logu[i])) exp(logu[i]) - exp(logu[i]))
# = logu[i] + log(exp(logsumexp(logu - logu[i])) - 1)
# = logu[i] + softplus_inverse(logsumexp(logu - logu[i]))
log_loosum_u = logu + distribution_util.softplus_inverse(log_sum_u - logu)
# The swap-one-out-sum ("soosum") is n different sums, each of which
# replaces the i-th item with the i-th-left-out average, i.e.,
# soo_sum_u[i] = [exp(logu) - exp(logu[i])] + exp(mean(logu[!=i]))
# = exp(log_loosum_u[i]) + exp(looavg_logu[i])
looavg_logu = (math_ops.reduce_sum(logu, axis=0) - logu) / nm1
log_soosum_u = math_ops.reduce_logsumexp(
array_ops.stack([log_loosum_u, looavg_logu]),
axis=0)
return log_sum_u - log_n, log_soosum_u - log_n
|
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 Nebula, Inc.
# Copyright 2013 Alessio Ababilov
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception definitions.
"""
import inspect
import sys
import six
from congressclient.i18n import _
class ClientException(Exception):
"""The base exception class for all exceptions this library raises."""
pass
class MissingArgs(ClientException):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
class ValidationError(ClientException):
"""Error in validation on API client side."""
pass
class UnsupportedVersion(ClientException):
"""User is trying to use an unsupported version of the API."""
pass
class CommandError(ClientException):
"""Error in CLI tool."""
pass
class AuthorizationFailure(ClientException):
"""Cannot authorize API client."""
pass
class ConnectionRefused(ClientException):
"""Cannot connect to API service."""
pass
class AuthPluginOptionsMissing(AuthorizationFailure):
"""Auth plugin misses some options."""
def __init__(self, opt_names):
super(AuthPluginOptionsMissing, self).__init__(
_("Authentication failed. Missing options: %s") %
", ".join(opt_names))
self.opt_names = opt_names
class AuthSystemNotFound(AuthorizationFailure):
"""User has specified an AuthSystem that is not installed."""
def __init__(self, auth_system):
super(AuthSystemNotFound, self).__init__(
_("AuthSystemNotFound: %s") % repr(auth_system))
self.auth_system = auth_system
class NoUniqueMatch(ClientException):
"""Multiple entities found instead of one."""
pass
class EndpointException(ClientException):
"""Something is rotten in Service Catalog."""
pass
class EndpointNotFound(EndpointException):
"""Could not find requested endpoint in Service Catalog."""
pass
class AmbiguousEndpoints(EndpointException):
"""Found more than one matching endpoint in Service Catalog."""
def __init__(self, endpoints=None):
super(AmbiguousEndpoints, self).__init__(
_("AmbiguousEndpoints: %s") % repr(endpoints))
self.endpoints = endpoints
class HttpError(ClientException):
"""The base exception class for all HTTP exceptions."""
http_status = 0
message = _("HTTP Error")
def __init__(self, message=None, details=None,
response=None, request_id=None,
url=None, method=None, http_status=None):
self.http_status = http_status or self.http_status
self.message = message or self.message
self.details = details
self.request_id = request_id
self.response = response
self.url = url
self.method = method
formatted_string = "%s (HTTP %s)" % (self.message, self.http_status)
if request_id:
formatted_string += " (Request-ID: %s)" % request_id
super(HttpError, self).__init__(formatted_string)
class HTTPRedirection(HttpError):
"""HTTP Redirection."""
message = _("HTTP Redirection")
class HTTPClientError(HttpError):
"""Client-side HTTP error.
Exception for cases in which the client seems to have erred.
"""
message = _("HTTP Client Error")
class HttpServerError(HttpError):
"""Server-side HTTP error.
Exception for cases in which the server is aware that it has
erred or is incapable of performing the request.
"""
message = _("HTTP Server Error")
class MultipleChoices(HTTPRedirection):
"""HTTP 300 - Multiple Choices.
Indicates multiple options for the resource that the client may follow.
"""
http_status = 300
message = _("Multiple Choices")
class BadRequest(HTTPClientError):
"""HTTP 400 - Bad Request.
The request cannot be fulfilled due to bad syntax.
"""
http_status = 400
message = _("Bad Request")
class Unauthorized(HTTPClientError):
"""HTTP 401 - Unauthorized.
Similar to 403 Forbidden, but specifically for use when authentication
is required and has failed or has not yet been provided.
"""
http_status = 401
message = _("Unauthorized")
class PaymentRequired(HTTPClientError):
"""HTTP 402 - Payment Required.
Reserved for future use.
"""
http_status = 402
message = _("Payment Required")
class Forbidden(HTTPClientError):
"""HTTP 403 - Forbidden.
The request was a valid request, but the server is refusing to respond
to it.
"""
http_status = 403
message = _("Forbidden")
class NotFound(HTTPClientError):
"""HTTP 404 - Not Found.
The requested resource could not be found but may be available again
in the future.
"""
http_status = 404
message = _("Not Found")
class MethodNotAllowed(HTTPClientError):
"""HTTP 405 - Method Not Allowed.
A request was made of a resource using a request method not supported
by that resource.
"""
http_status = 405
message = _("Method Not Allowed")
class NotAcceptable(HTTPClientError):
"""HTTP 406 - Not Acceptable.
The requested resource is only capable of generating content not
acceptable according to the Accept headers sent in the request.
"""
http_status = 406
message = _("Not Acceptable")
class ProxyAuthenticationRequired(HTTPClientError):
"""HTTP 407 - Proxy Authentication Required.
The client must first authenticate itself with the proxy.
"""
http_status = 407
message = _("Proxy Authentication Required")
class RequestTimeout(HTTPClientError):
"""HTTP 408 - Request Timeout.
The server timed out waiting for the request.
"""
http_status = 408
message = _("Request Timeout")
class Conflict(HTTPClientError):
"""HTTP 409 - Conflict.
Indicates that the request could not be processed because of conflict
in the request, such as an edit conflict.
"""
http_status = 409
message = _("Conflict")
class Gone(HTTPClientError):
"""HTTP 410 - Gone.
Indicates that the resource requested is no longer available and will
not be available again.
"""
http_status = 410
message = _("Gone")
class LengthRequired(HTTPClientError):
"""HTTP 411 - Length Required.
The request did not specify the length of its content, which is
required by the requested resource.
"""
http_status = 411
message = _("Length Required")
class PreconditionFailed(HTTPClientError):
"""HTTP 412 - Precondition Failed.
The server does not meet one of the preconditions that the requester
put on the request.
"""
http_status = 412
message = _("Precondition Failed")
class RequestEntityTooLarge(HTTPClientError):
"""HTTP 413 - Request Entity Too Large.
The request is larger than the server is willing or able to process.
"""
http_status = 413
message = _("Request Entity Too Large")
def __init__(self, *args, **kwargs):
try:
self.retry_after = int(kwargs.pop('retry_after'))
except (KeyError, ValueError):
self.retry_after = 0
super(RequestEntityTooLarge, self).__init__(*args, **kwargs)
class RequestUriTooLong(HTTPClientError):
"""HTTP 414 - Request-URI Too Long.
The URI provided was too long for the server to process.
"""
http_status = 414
message = _("Request-URI Too Long")
class UnsupportedMediaType(HTTPClientError):
"""HTTP 415 - Unsupported Media Type.
The request entity has a media type which the server or resource does
not support.
"""
http_status = 415
message = _("Unsupported Media Type")
class RequestedRangeNotSatisfiable(HTTPClientError):
"""HTTP 416 - Requested Range Not Satisfiable.
The client has asked for a portion of the file, but the server cannot
supply that portion.
"""
http_status = 416
message = _("Requested Range Not Satisfiable")
class ExpectationFailed(HTTPClientError):
"""HTTP 417 - Expectation Failed.
The server cannot meet the requirements of the Expect request-header field.
"""
http_status = 417
message = _("Expectation Failed")
class UnprocessableEntity(HTTPClientError):
"""HTTP 422 - Unprocessable Entity.
The request was well-formed but was unable to be followed due to semantic
errors.
"""
http_status = 422
message = _("Unprocessable Entity")
class InternalServerError(HttpServerError):
"""HTTP 500 - Internal Server Error.
A generic error message, given when no more specific message is suitable.
"""
http_status = 500
message = _("Internal Server Error")
# NotImplemented is a python keyword.
class HttpNotImplemented(HttpServerError):
"""HTTP 501 - Not Implemented.
The server either does not recognize the request method, or it lacks
the ability to fulfill the request.
"""
http_status = 501
message = _("Not Implemented")
class BadGateway(HttpServerError):
"""HTTP 502 - Bad Gateway.
The server was acting as a gateway or proxy and received an invalid
response from the upstream server.
"""
http_status = 502
message = _("Bad Gateway")
class ServiceUnavailable(HttpServerError):
"""HTTP 503 - Service Unavailable.
The server is currently unavailable.
"""
http_status = 503
message = _("Service Unavailable")
class GatewayTimeout(HttpServerError):
"""HTTP 504 - Gateway Timeout.
The server was acting as a gateway or proxy and did not receive a timely
response from the upstream server.
"""
http_status = 504
message = _("Gateway Timeout")
class HttpVersionNotSupported(HttpServerError):
"""HTTP 505 - HttpVersion Not Supported.
The server does not support the HTTP protocol version used in the request.
"""
http_status = 505
message = _("HTTP Version Not Supported")
# _code_map contains all the classes that have http_status attribute.
_code_map = dict(
(getattr(obj, 'http_status', None), obj)
for name, obj in six.iteritems(vars(sys.modules[__name__]))
if inspect.isclass(obj) and getattr(obj, 'http_status', False)
)
def from_response(response, method, url):
"""Returns an instance of :class:`HttpError` or subclass based on response.
:param response: instance of `requests.Response` class
:param method: HTTP method used for request
:param url: URL used for request
"""
req_id = response.headers.get("x-openstack-request-id")
# NOTE(hdd) true for older versions of nova and cinder
if not req_id:
req_id = response.headers.get("x-compute-request-id")
kwargs = {
"http_status": response.status_code,
"response": response,
"method": method,
"url": url,
"request_id": req_id,
}
if "retry-after" in response.headers:
kwargs["retry_after"] = response.headers["retry-after"]
content_type = response.headers.get("Content-Type", "")
if content_type.startswith("application/json"):
try:
body = response.json()
except ValueError:
pass
else:
if isinstance(body, dict) and isinstance(body.get("error"), dict):
error = body["error"]
kwargs["message"] = error.get("message")
kwargs["details"] = error.get("details")
elif content_type.startswith("text/"):
kwargs["details"] = response.text
try:
cls = _code_map[response.status_code]
except KeyError:
if 500 <= response.status_code < 600:
cls = HttpServerError
elif 400 <= response.status_code < 500:
cls = HTTPClientError
else:
cls = HttpError
return cls(**kwargs)
|
|
"""
NPC Corporations, agents, and other fun.
"""
from django.db import models
class CrpActivity(models.Model):
"""
Activity types of corporations.
CCP Table: crpActivities
CCP Primary key: "activityID" tinyint(3)
"""
id = models.IntegerField(unique=True, primary_key=True)
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
class Meta:
app_label = 'eve_db'
ordering = ['id']
verbose_name = 'Corporate Activity'
verbose_name_plural = 'Corporate Activities'
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
class CrpNPCCorporation(models.Model):
"""
CCP Table: crpNPCCorporations
CCP Primary key: "corporationID" int(11)
"""
id = models.IntegerField(unique=True, primary_key=True)
name = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
CORP_SIZE_CHOICES = (('H', 'Huge'),
('L', 'Large'),
('M', 'Medium'),
('S', 'Small'),
('T', 'Tiny'))
size = models.CharField(choices=CORP_SIZE_CHOICES, max_length=1,
blank=True)
EXTENT_CHOICES = (('G', 'Global'),
('N', 'National'),
('R', 'Regional'),
('C', 'Constellational'),
('L', 'Local'))
extent = models.CharField(choices=EXTENT_CHOICES, max_length=1,
blank=True)
solar_system = models.ForeignKey('MapSolarSystem', blank=True, null=True)
investor1 = models.ForeignKey('self', blank=True, null=True,
related_name='invested1_set')
investor1_shares = models.IntegerField(blank=True, null=True)
investor2 = models.ForeignKey('self', blank=True, null=True,
related_name='invested2_set')
investor2_shares = models.IntegerField(blank=True, null=True)
investor3 = models.ForeignKey('self', blank=True, null=True,
related_name='invested3_set')
investor3_shares = models.IntegerField(blank=True, null=True)
investor4 = models.ForeignKey('self', blank=True, null=True,
related_name='invested4_set')
investor4_shares = models.IntegerField(blank=True, null=True)
friendly_corp = models.ForeignKey('self', blank=True, null=True,
related_name='friendly_with_set')
enemy_corp = models.ForeignKey('self', blank=True, null=True,
related_name='enemy_of_set')
public_share_percent = models.FloatField(blank=True, null=True)
initial_share_price = models.IntegerField(blank=True, null=True)
min_security = models.FloatField(blank=True, null=True)
stations_are_scattered = models.BooleanField(default=False)
fringe_systems = models.IntegerField(default=0)
corridor_systems = models.IntegerField(default=0)
hub_systems = models.IntegerField(default=0)
border_systems = models.IntegerField(default=0)
faction = models.ForeignKey('ChrFaction', blank=True, null=True)
size_factor = models.FloatField(blank=True, null=True)
station_count = models.IntegerField(default=0, blank=True, null=True)
station_system_count = models.IntegerField(default=0, blank=True, null=True)
icon_id = models.IntegerField(blank=True, null=True)
class Meta:
app_label = 'eve_db'
ordering = ['id']
verbose_name = 'NPC Corporation'
verbose_name_plural = 'NPC Corporations'
def __unicode__(self):
if self.name:
return self.name
else:
return "Corporation #%d" % self.id
def __str__(self):
return self.__unicode__()
class CrpNPCDivision(models.Model):
"""
Agent division types.
CCP Table: crpNPCDivisions
CCP Primary key: "divisionID" tinyint(3)
"""
id = models.IntegerField(unique=True, primary_key=True)
name = models.CharField(max_length=100, blank=True)
description = models.TextField(blank=True)
leader_type = models.CharField(max_length=100, blank=True)
class Meta:
app_label = 'eve_db'
ordering = ['id']
verbose_name = 'NPC Division'
verbose_name_plural = 'NPC Divisions'
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
class CrpNPCCorporationDivision(models.Model):
"""
Agent divisions available in corporations.
CCP Table: crpNPCCorporationDivisions
CCP Primary key: ("corporationID" int(11), "divisionID" tinyint(3))
"""
corporation = models.ForeignKey(CrpNPCCorporation)
division = models.ForeignKey(CrpNPCDivision)
size = models.IntegerField(blank=True, null=True)
class Meta:
app_label = 'eve_db'
ordering = ['id']
verbose_name = 'NPC Corporation Division'
verbose_name_plural = 'NPC Corporation Divisions'
unique_together = ('corporation', 'division')
def __unicode__(self):
return "%s: %s" % (self.corporation, self.division)
def __str__(self):
return self.__unicode__()
class CrpNPCCorporationTrade(models.Model):
"""
Market items the corporation buys or sells. Supply/demand has been removed
from dumps, see:
http://www.eveonline.com/ingameboard.asp?a=topic&threadID=835467&page=2#32.
CCP Table: crpNPCCorporationTrades
CCP Primary key: ("corporationID" int(11), "typeID" smallint(6))
"""
corporation = models.ForeignKey(CrpNPCCorporation)
type = models.ForeignKey('InvType', blank=True, null=True)
class Meta:
app_label = 'eve_db'
ordering = ['id']
verbose_name = 'NPC Corporation Trade'
verbose_name_plural = 'NPC Corporation Trades'
unique_together = ('corporation', 'type')
def __unicode__(self):
return "%s: %s" % (self.corporation, self.type)
def __str__(self):
return self.__unicode__()
class CrpNPCCorporationResearchField(models.Model):
"""
Research fields for R&D agents in corporations.
CCP Table: crpNPCCorporationResearchFields
CCP Primary key: ("skillID" smallint(6), "corporationID" int(11))
"""
corporation = models.ForeignKey(CrpNPCCorporation)
skill = models.ForeignKey('InvType', blank=True, null=True)
class Meta:
app_label = 'eve_db'
ordering = ['id']
verbose_name = 'NPC Corporation Research Field'
verbose_name_plural = 'NPC Corporation Research Fields'
unique_together = ('skill', 'corporation')
def __unicode__(self):
return "%s: %s" % (self.corporation, self.skill)
def __str__(self):
return self.__unicode__()
class AgtAgentType(models.Model):
"""
CCP Table: agtAgentTypes
CCP Primary key: "agentTypeID" tinyint(3)
"""
id = models.IntegerField(unique=True, primary_key=True)
name = models.CharField(max_length=255)
class Meta:
app_label = 'eve_db'
ordering = ['id']
verbose_name = 'Agent Type'
verbose_name_plural = 'Agent Types'
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
class AgtAgent(models.Model):
"""
CCP Table: agtAgents
CCP Primary key: "agentID" int(11)
"""
id = models.IntegerField(unique=True, primary_key=True)
name = models.CharField(max_length=255, blank=True)
division = models.ForeignKey(CrpNPCDivision, blank=True,
null=True)
corporation = models.ForeignKey(CrpNPCCorporation, blank=True, null=True)
location = models.ForeignKey('MapDenormalize', blank=True, null=True)
level = models.IntegerField(blank=True, null=True)
quality = models.IntegerField(blank=True, null=True)
type = models.ForeignKey(AgtAgentType, blank=True, null=True)
locator = models.BooleanField(default=False)
class Meta:
app_label = 'eve_db'
ordering = ['id']
verbose_name = 'Agent'
verbose_name_plural = 'Agents'
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
class AgtResearchAgent(models.Model):
"""
CCP Table: agtResearchAgents
CCP Primary key: "agentID" int(11)
"""
agent = models.ForeignKey(AgtAgent)
type = models.ForeignKey('InvType')
class Meta:
app_label = 'eve_db'
ordering = ['agent']
verbose_name = 'Research Agent'
verbose_name_plural = 'Research Agents'
unique_together = ('agent', 'type')
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals, print_function
import frappe
import imaplib
import re
import json
import socket
from frappe import _
from frappe.model.document import Document
from frappe.utils import validate_email_add, cint, get_datetime, DATE_FORMAT, strip, comma_or, sanitize_html
from frappe.utils.user import is_system_user
from frappe.utils.jinja import render_template
from frappe.email.smtp import SMTPServer
from frappe.email.receive import EmailServer, Email
from poplib import error_proto
from dateutil.relativedelta import relativedelta
from datetime import datetime, timedelta
from frappe.desk.form import assign_to
from frappe.utils.user import get_system_managers
from frappe.utils.background_jobs import enqueue, get_jobs
from frappe.core.doctype.communication.email import set_incoming_outgoing_accounts
from frappe.utils.scheduler import log
class SentEmailInInbox(Exception): pass
class EmailAccount(Document):
def autoname(self):
"""Set name as `email_account_name` or make title from Email Address."""
if not self.email_account_name:
self.email_account_name = self.email_id.split("@", 1)[0]\
.replace("_", " ").replace(".", " ").replace("-", " ").title()
self.name = self.email_account_name
def validate(self):
"""Validate Email Address and check POP3/IMAP and SMTP connections is enabled."""
if self.email_id:
validate_email_add(self.email_id, True)
if self.login_id_is_different:
if not self.login_id:
frappe.throw(_("Login Id is required"))
else:
self.login_id = None
duplicate_email_account = frappe.get_all("Email Account", filters={
"email_id": self.email_id,
"name": ("!=", self.name)
})
if duplicate_email_account:
frappe.throw(_("Email id must be unique, Email Account is already exist \
for {0}".format(frappe.bold(self.email_id))))
if frappe.local.flags.in_patch or frappe.local.flags.in_test:
return
#if self.enable_incoming and not self.append_to:
# frappe.throw(_("Append To is mandatory for incoming mails"))
if (not self.awaiting_password and not frappe.local.flags.in_install
and not frappe.local.flags.in_patch):
if self.password or self.smtp_server in ('127.0.0.1', 'localhost'):
if self.enable_incoming:
self.get_incoming_server()
self.no_failed = 0
if self.enable_outgoing:
self.check_smtp()
else:
if self.enable_incoming or self.enable_outgoing:
frappe.throw(_("Password is required or select Awaiting Password"))
if self.notify_if_unreplied:
if not self.send_notification_to:
frappe.throw(_("{0} is mandatory").format(self.meta.get_label("send_notification_to")))
for e in self.get_unreplied_notification_emails():
validate_email_add(e, True)
if self.enable_incoming and self.append_to:
valid_doctypes = [d[0] for d in get_append_to()]
if self.append_to not in valid_doctypes:
frappe.throw(_("Append To can be one of {0}").format(comma_or(valid_doctypes)))
def on_update(self):
"""Check there is only one default of each type."""
from frappe.core.doctype.user.user import ask_pass_update, setup_user_email_inbox
self.there_must_be_only_one_default()
setup_user_email_inbox(email_account=self.name, awaiting_password=self.awaiting_password,
email_id=self.email_id, enable_outgoing=self.enable_outgoing)
def there_must_be_only_one_default(self):
"""If current Email Account is default, un-default all other accounts."""
for field in ("default_incoming", "default_outgoing"):
if not self.get(field):
continue
for email_account in frappe.get_all("Email Account", filters={ field: 1 }):
if email_account.name==self.name:
continue
email_account = frappe.get_doc("Email Account", email_account.name)
email_account.set(field, 0)
email_account.save()
@frappe.whitelist()
def get_domain(self, email_id):
"""look-up the domain and then full"""
try:
domain = email_id.split("@")
fields = [
"name as domain", "use_imap", "email_server",
"use_ssl", "smtp_server", "use_tls",
"smtp_port"
]
return frappe.db.get_value("Email Domain", domain[1], fields, as_dict=True)
except Exception:
pass
def check_smtp(self):
"""Checks SMTP settings."""
if self.enable_outgoing:
if not self.smtp_server:
frappe.throw(_("{0} is required").format("SMTP Server"))
server = SMTPServer(login = getattr(self, "login_id", None) \
or self.email_id,
server = self.smtp_server,
port = cint(self.smtp_port),
use_tls = cint(self.use_tls)
)
if self.password:
server.password = self.get_password()
server.sess
def get_incoming_server(self, in_receive=False, email_sync_rule="UNSEEN"):
"""Returns logged in POP3/IMAP connection object."""
if frappe.cache().get_value("workers:no-internet") == True:
return None
args = frappe._dict({
"email_account":self.name,
"host": self.email_server,
"use_ssl": self.use_ssl,
"username": getattr(self, "login_id", None) or self.email_id,
"use_imap": self.use_imap,
"email_sync_rule": email_sync_rule,
"uid_validity": self.uidvalidity,
"initial_sync_count": self.initial_sync_count or 100
})
if self.password:
args.password = self.get_password()
if not args.get("host"):
frappe.throw(_("{0} is required").format("Email Server"))
email_server = EmailServer(frappe._dict(args))
try:
email_server.connect()
except (error_proto, imaplib.IMAP4.error) as e:
message = e.message.lower().replace(" ","")
if in_receive and any(map(lambda t: t in message, ['authenticationfail', 'loginviayourwebbrowser', #abbreviated to work with both failure and failed
'loginfailed', 'err[auth]', 'errtemporaryerror'])): #temporary error to deal with godaddy
# if called via self.receive and it leads to authentication error, disable incoming
# and send email to system manager
self.handle_incoming_connect_error(
description=_('Authentication failed while receiving emails from Email Account {0}. Message from server: {1}'.format(self.name, e.message))
)
return None
else:
frappe.throw(e.message)
except socket.error:
if in_receive:
# timeout while connecting, see receive.py connect method
description = frappe.message_log.pop() if frappe.message_log else "Socket Error"
if test_internet():
self.db_set("no_failed", self.no_failed + 1)
if self.no_failed > 2:
self.handle_incoming_connect_error(description=description)
else:
frappe.cache().set_value("workers:no-internet", True)
return None
else:
raise
if not in_receive:
if self.use_imap:
email_server.imap.logout()
# reset failed attempts count
self.set_failed_attempts_count(0)
return email_server
def handle_incoming_connect_error(self, description):
if test_internet():
if self.get_failed_attempts_count() > 2:
self.db_set("enable_incoming", 0)
for user in get_system_managers(only_name=True):
try:
assign_to.add({
'assign_to': user,
'doctype': self.doctype,
'name': self.name,
'description': description,
'priority': 'High',
'notify': 1
})
except assign_to.DuplicateToDoError:
frappe.message_log.pop()
pass
else:
self.set_failed_attempts_count(self.get_failed_attempts_count() + 1)
else:
frappe.cache().set_value("workers:no-internet", True)
def set_failed_attempts_count(self, value):
frappe.cache().set('{0}:email-account-failed-attempts'.format(self.name), value)
def get_failed_attempts_count(self):
return cint(frappe.cache().get('{0}:email-account-failed-attempts'.format(self.name)))
def receive(self, test_mails=None):
"""Called by scheduler to receive emails from this EMail account using POP3/IMAP."""
def get_seen(status):
if not status:
return None
seen = 1 if status == "SEEN" else 0
return seen
if self.enable_incoming:
uid_list = []
exceptions = []
seen_status = []
uid_reindexed = False
if frappe.local.flags.in_test:
incoming_mails = test_mails
else:
email_sync_rule = self.build_email_sync_rule()
email_server = None
try:
email_server = self.get_incoming_server(in_receive=True, email_sync_rule=email_sync_rule)
except Exception:
frappe.log_error(title=_("Error while connecting to email account {0}").format(self.name))
if not email_server:
return
emails = email_server.get_messages()
if not emails:
return
incoming_mails = emails.get("latest_messages", [])
uid_list = emails.get("uid_list", [])
seen_status = emails.get("seen_status", [])
uid_reindexed = emails.get("uid_reindexed", False)
for idx, msg in enumerate(incoming_mails):
uid = None if not uid_list else uid_list[idx]
try:
args = {
"uid": uid,
"seen": None if not seen_status else get_seen(seen_status.get(uid, None)),
"uid_reindexed": uid_reindexed
}
communication = self.insert_communication(msg, args=args)
except SentEmailInInbox:
frappe.db.rollback()
except Exception:
frappe.db.rollback()
log('email_account.receive')
if self.use_imap:
self.handle_bad_emails(email_server, uid, msg, frappe.get_traceback())
exceptions.append(frappe.get_traceback())
else:
frappe.db.commit()
if communication:
attachments = [d.file_name for d in communication._attachments]
communication.notify(attachments=attachments, fetched_from_email_account=True)
#notify if user is linked to account
if len(incoming_mails)>0 and not frappe.local.flags.in_test:
frappe.publish_realtime('new_email', {"account":self.email_account_name, "number":len(incoming_mails)})
if exceptions:
raise Exception(frappe.as_json(exceptions))
def handle_bad_emails(self, email_server, uid, raw, reason):
if cint(email_server.settings.use_imap):
import email
try:
mail = email.message_from_string(raw)
message_id = mail.get('Message-ID')
except Exception:
message_id = "can't be parsed"
unhandled_email = frappe.get_doc({
"raw": raw,
"uid": uid,
"reason":reason,
"message_id": message_id,
"doctype": "Unhandled Email",
"email_account": email_server.settings.email_account
})
unhandled_email.insert(ignore_permissions=True)
frappe.db.commit()
def insert_communication(self, msg, args={}):
if isinstance(msg, list):
raw, uid, seen = msg
else:
raw = msg
uid = -1
seen = 0
if args.get("uid", -1): uid = args.get("uid", -1)
if args.get("seen", 0): seen = args.get("seen", 0)
email = Email(raw)
if email.from_email == self.email_id and not email.mail.get("Reply-To"):
# gmail shows sent emails in inbox
# and we don't want emails sent by us to be pulled back into the system again
# dont count emails sent by the system get those
if frappe.flags.in_test:
print('WARN: Cannot pull email. Sender sames as recipient inbox')
raise SentEmailInInbox
if email.message_id:
names = frappe.db.sql("""select distinct name from tabCommunication
where message_id='{message_id}'
order by creation desc limit 1""".format(
message_id=email.message_id
), as_dict=True)
if names:
name = names[0].get("name")
# email is already available update communication uid instead
frappe.db.set_value("Communication", name, "uid", uid, update_modified=False)
return
communication = frappe.get_doc({
"doctype": "Communication",
"subject": email.subject,
"content": email.content,
'text_content': email.text_content,
"sent_or_received": "Received",
"sender_full_name": email.from_real_name,
"sender": email.from_email,
"recipients": email.mail.get("To"),
"cc": email.mail.get("CC"),
"email_account": self.name,
"communication_medium": "Email",
"uid": int(uid or -1),
"message_id": email.message_id,
"communication_date": email.date,
"has_attachment": 1 if email.attachments else 0,
"seen": seen or 0
})
self.set_thread(communication, email)
if communication.seen:
# get email account user and set communication as seen
users = frappe.get_all("User Email", filters={ "email_account": self.name },
fields=["parent"])
users = list(set([ user.get("parent") for user in users ]))
communication._seen = json.dumps(users)
communication.flags.in_receive = True
communication.insert(ignore_permissions = 1)
# save attachments
communication._attachments = email.save_attachments_in_doc(communication)
# replace inline images
dirty = False
for file in communication._attachments:
if file.name in email.cid_map and email.cid_map[file.name]:
dirty = True
email.content = email.content.replace("cid:{0}".format(email.cid_map[file.name]),
file.file_url)
if dirty:
# not sure if using save() will trigger anything
communication.db_set("content", sanitize_html(email.content))
# notify all participants of this thread
if self.enable_auto_reply and getattr(communication, "is_first", False):
self.send_auto_reply(communication, email)
return communication
def set_thread(self, communication, email):
"""Appends communication to parent based on thread ID. Will extract
parent communication and will link the communication to the reference of that
communication. Also set the status of parent transaction to Open or Replied.
If no thread id is found and `append_to` is set for the email account,
it will create a new parent transaction (e.g. Issue)"""
parent = None
parent = self.find_parent_from_in_reply_to(communication, email)
if not parent and self.append_to:
self.set_sender_field_and_subject_field()
if not parent and self.append_to:
parent = self.find_parent_based_on_subject_and_sender(communication, email)
if not parent and self.append_to and self.append_to!="Communication":
parent = self.create_new_parent(communication, email)
if parent:
communication.reference_doctype = parent.doctype
communication.reference_name = parent.name
# check if message is notification and disable notifications for this message
isnotification = email.mail.get("isnotification")
if isnotification:
if "notification" in isnotification:
communication.unread_notification_sent = 1
def set_sender_field_and_subject_field(self):
'''Identify the sender and subject fields from the `append_to` DocType'''
# set subject_field and sender_field
meta_module = frappe.get_meta_module(self.append_to)
meta = frappe.get_meta(self.append_to)
self.subject_field = getattr(meta_module, "subject_field", "subject")
if not meta.get_field(self.subject_field):
self.subject_field = None
self.sender_field = getattr(meta_module, "sender_field", "sender")
if not meta.get_field(self.sender_field):
self.sender_field = None
def find_parent_based_on_subject_and_sender(self, communication, email):
'''Find parent document based on subject and sender match'''
parent = None
if self.append_to and self.sender_field:
if self.subject_field:
# try and match by subject and sender
# if sent by same sender with same subject,
# append it to old coversation
subject = frappe.as_unicode(strip(re.sub("(^\s*(Fw|FW|fwd)[^:]*:|\s*(Re|RE)[^:]*:\s*)*",
"", email.subject)))
parent = frappe.db.get_all(self.append_to, filters={
self.sender_field: email.from_email,
self.subject_field: ("like", "%{0}%".format(subject)),
"creation": (">", (get_datetime() - relativedelta(days=10)).strftime(DATE_FORMAT))
}, fields="name")
# match only subject field
# when the from_email is of a user in the system
# and subject is atleast 10 chars long
if not parent and len(subject) > 10 and is_system_user(email.from_email):
parent = frappe.db.get_all(self.append_to, filters={
self.subject_field: ("like", "%{0}%".format(subject)),
"creation": (">", (get_datetime() - relativedelta(days=10)).strftime(DATE_FORMAT))
}, fields="name")
if parent:
parent = frappe._dict(doctype=self.append_to, name=parent[0].name)
return parent
def create_new_parent(self, communication, email):
'''If no parent found, create a new reference document'''
# no parent found, but must be tagged
# insert parent type doc
parent = frappe.new_doc(self.append_to)
if self.subject_field:
parent.set(self.subject_field, frappe.as_unicode(email.subject)[:140])
if self.sender_field:
parent.set(self.sender_field, frappe.as_unicode(email.from_email))
parent.flags.ignore_mandatory = True
try:
parent.insert(ignore_permissions=True)
except frappe.DuplicateEntryError:
# try and find matching parent
parent_name = frappe.db.get_value(self.append_to, {self.sender_field: email.from_email})
if parent_name:
parent.name = parent_name
else:
parent = None
# NOTE if parent isn't found and there's no subject match, it is likely that it is a new conversation thread and hence is_first = True
communication.is_first = True
return parent
def find_parent_from_in_reply_to(self, communication, email):
'''Returns parent reference if embedded in In-Reply-To header
Message-ID is formatted as `{message_id}@{site}`'''
parent = None
in_reply_to = (email.mail.get("In-Reply-To") or "").strip(" <>")
if in_reply_to and "@{0}".format(frappe.local.site) in in_reply_to:
# reply to a communication sent from the system
email_queue = frappe.db.get_value('Email Queue', dict(message_id=in_reply_to), ['communication','reference_doctype', 'reference_name'])
if email_queue:
parent_communication, parent_doctype, parent_name = email_queue
if parent_communication:
communication.in_reply_to = parent_communication
else:
reference, domain = in_reply_to.split("@", 1)
parent_doctype, parent_name = 'Communication', reference
if frappe.db.exists(parent_doctype, parent_name):
parent = frappe._dict(doctype=parent_doctype, name=parent_name)
# set in_reply_to of current communication
if parent_doctype=='Communication':
# communication.in_reply_to = email_queue.communication
if parent.reference_name:
# the true parent is the communication parent
parent = frappe.get_doc(parent.reference_doctype,
parent.reference_name)
return parent
def send_auto_reply(self, communication, email):
"""Send auto reply if set."""
if self.enable_auto_reply:
set_incoming_outgoing_accounts(communication)
if self.send_unsubscribe_message:
unsubscribe_message = _("Leave this conversation")
else:
unsubscribe_message = ""
frappe.sendmail(recipients = [email.from_email],
sender = self.email_id,
reply_to = communication.incoming_email_account,
subject = _("Re: ") + communication.subject,
content = render_template(self.auto_reply_message or "", communication.as_dict()) or \
frappe.get_template("templates/emails/auto_reply.html").render(communication.as_dict()),
reference_doctype = communication.reference_doctype,
reference_name = communication.reference_name,
in_reply_to = email.mail.get("Message-Id"), # send back the Message-Id as In-Reply-To
unsubscribe_message = unsubscribe_message)
def get_unreplied_notification_emails(self):
"""Return list of emails listed"""
self.send_notification_to = self.send_notification_to.replace(",", "\n")
out = [e.strip() for e in self.send_notification_to.split("\n") if e.strip()]
return out
def on_trash(self):
"""Clear communications where email account is linked"""
from frappe.core.doctype.user.user import remove_user_email_inbox
frappe.db.sql("update `tabCommunication` set email_account='' where email_account=%s", self.name)
remove_user_email_inbox(email_account=self.name)
def after_rename(self, old, new, merge=False):
frappe.db.set_value("Email Account", new, "email_account_name", new)
def build_email_sync_rule(self):
if not self.use_imap:
return "UNSEEN"
if self.email_sync_option == "ALL":
max_uid = get_max_email_uid(self.name)
last_uid = max_uid + int(self.initial_sync_count or 100) if max_uid == 1 else "*"
return "UID {}:{}".format(max_uid, last_uid)
else:
return self.email_sync_option or "UNSEEN"
def mark_emails_as_read_unread(self):
""" mark Email Flag Queue of self.email_account mails as read"""
if not self.use_imap:
return
flags = frappe.db.sql("""select name, communication, uid, action from
`tabEmail Flag Queue` where is_completed=0 and email_account='{email_account}'
""".format(email_account=self.name), as_dict=True)
uid_list = { flag.get("uid", None): flag.get("action", "Read") for flag in flags }
if flags and uid_list:
email_server = self.get_incoming_server()
if not email_server:
return
email_server.update_flag(uid_list=uid_list)
# mark communication as read
docnames = ",".join([ "'%s'"%flag.get("communication") for flag in flags \
if flag.get("action") == "Read" ])
self.set_communication_seen_status(docnames, seen=1)
# mark communication as unread
docnames = ",".join([ "'%s'"%flag.get("communication") for flag in flags \
if flag.get("action") == "Unread" ])
self.set_communication_seen_status(docnames, seen=0)
docnames = ",".join([ "'%s'"%flag.get("name") for flag in flags ])
frappe.db.sql(""" update `tabEmail Flag Queue` set is_completed=1
where name in ({docnames})""".format(docnames=docnames))
def set_communication_seen_status(self, docnames, seen=0):
""" mark Email Flag Queue of self.email_account mails as read"""
if not docnames:
return
frappe.db.sql(""" update `tabCommunication` set seen={seen}
where name in ({docnames})""".format(docnames=docnames, seen=seen))
@frappe.whitelist()
def get_append_to(doctype=None, txt=None, searchfield=None, start=None, page_len=None, filters=None):
if not txt: txt = ""
return [[d] for d in frappe.get_hooks("email_append_to") if txt in d]
def test_internet(host="8.8.8.8", port=53, timeout=3):
"""Returns True if internet is connected
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except Exception as ex:
print(ex.message)
return False
def notify_unreplied():
"""Sends email notifications if there are unreplied Communications
and `notify_if_unreplied` is set as true."""
for email_account in frappe.get_all("Email Account", "name", filters={"enable_incoming": 1, "notify_if_unreplied": 1}):
email_account = frappe.get_doc("Email Account", email_account.name)
if email_account.append_to:
# get open communications younger than x mins, for given doctype
for comm in frappe.get_all("Communication", "name", filters={
"sent_or_received": "Received",
"reference_doctype": email_account.append_to,
"unread_notification_sent": 0,
"email_account":email_account.name,
"creation": ("<", datetime.now() - timedelta(seconds = (email_account.unreplied_for_mins or 30) * 60)),
"creation": (">", datetime.now() - timedelta(seconds = (email_account.unreplied_for_mins or 30) * 60 * 3))
}):
comm = frappe.get_doc("Communication", comm.name)
if frappe.db.get_value(comm.reference_doctype, comm.reference_name, "status")=="Open":
# if status is still open
frappe.sendmail(recipients=email_account.get_unreplied_notification_emails(),
content=comm.content, subject=comm.subject, doctype= comm.reference_doctype,
name=comm.reference_name)
# update flag
comm.db_set("unread_notification_sent", 1)
def pull(now=False):
"""Will be called via scheduler, pull emails from all enabled Email accounts."""
if frappe.cache().get_value("workers:no-internet") == True:
if test_internet():
frappe.cache().set_value("workers:no-internet", False)
else:
return
queued_jobs = get_jobs(site=frappe.local.site, key='job_name')[frappe.local.site]
for email_account in frappe.get_list("Email Account",
filters={"enable_incoming": 1, "awaiting_password": 0}):
if now:
pull_from_email_account(email_account.name)
else:
# job_name is used to prevent duplicates in queue
job_name = 'pull_from_email_account|{0}'.format(email_account.name)
if job_name not in queued_jobs:
enqueue(pull_from_email_account, 'short', event='all', job_name=job_name,
email_account=email_account.name)
def pull_from_email_account(email_account):
'''Runs within a worker process'''
email_account = frappe.get_doc("Email Account", email_account)
email_account.receive()
# mark Email Flag Queue mail as read
email_account.mark_emails_as_read_unread()
def get_max_email_uid(email_account):
# get maximum uid of emails
max_uid = 1
result = frappe.db.get_all("Communication", filters={
"communication_medium": "Email",
"sent_or_received": "Received",
"email_account": email_account
}, fields=["ifnull(max(uid), 0) as uid"])
if not result:
return 1
else:
max_uid = int(result[0].get("uid", 0)) + 1
return max_uid
|
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import six
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import environment
from alembic import script as alembic_script
from alembic import util as alembic_util
from oslo_config import cfg
from oslo_utils import importutils
import pkg_resources
from neutron.common import utils
# TODO(ihrachyshka): maintain separate HEAD files per branch
HEAD_FILENAME = 'HEAD'
HEADS_FILENAME = 'HEADS'
CURRENT_RELEASE = "liberty"
EXPAND_BRANCH = 'expand'
CONTRACT_BRANCH = 'contract'
MIGRATION_BRANCHES = (EXPAND_BRANCH, CONTRACT_BRANCH)
MIGRATION_ENTRYPOINTS = 'neutron.db.alembic_migrations'
migration_entrypoints = {
entrypoint.name: entrypoint
for entrypoint in pkg_resources.iter_entry_points(MIGRATION_ENTRYPOINTS)
}
neutron_alembic_ini = os.path.join(os.path.dirname(__file__), 'alembic.ini')
VALID_SERVICES = ['fwaas', 'lbaas', 'vpnaas']
INSTALLED_SERVICES = [service_ for service_ in VALID_SERVICES
if 'neutron-%s' % service_ in migration_entrypoints]
INSTALLED_SERVICE_PROJECTS = ['neutron-%s' % service_
for service_ in INSTALLED_SERVICES]
INSTALLED_SUBPROJECTS = [project_ for project_ in migration_entrypoints
if project_ not in INSTALLED_SERVICE_PROJECTS]
service_help = (
_("Can be one of '%s'.") % "', '".join(INSTALLED_SERVICES)
if INSTALLED_SERVICES else
_("(No services are currently installed).")
)
_core_opts = [
cfg.StrOpt('core_plugin',
default='',
help=_('Neutron plugin provider module')),
cfg.StrOpt('service',
choices=INSTALLED_SERVICES,
help=(_("The advanced service to execute the command against. ")
+ service_help)),
cfg.StrOpt('subproject',
choices=INSTALLED_SUBPROJECTS,
help=(_("The subproject to execute the command against. "
"Can be one of %s.") % INSTALLED_SUBPROJECTS)),
cfg.BoolOpt('split_branches',
default=False,
help=_("Enforce using split branches file structure."))
]
_quota_opts = [
cfg.StrOpt('quota_driver',
default='',
help=_('Neutron quota driver class')),
]
_db_opts = [
cfg.StrOpt('connection',
deprecated_name='sql_connection',
default='',
secret=True,
help=_('URL to database')),
cfg.StrOpt('engine',
default='',
help=_('Database engine')),
]
CONF = cfg.ConfigOpts()
CONF.register_cli_opts(_core_opts)
CONF.register_cli_opts(_db_opts, 'database')
CONF.register_opts(_quota_opts, 'QUOTAS')
def do_alembic_command(config, cmd, *args, **kwargs):
project = config.get_main_option('neutron_project')
alembic_util.msg(_('Running %(cmd)s for %(project)s ...') %
{'cmd': cmd, 'project': project})
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(six.text_type(e))
alembic_util.msg(_('OK'))
def _get_alembic_entrypoint(project):
if project not in migration_entrypoints:
alembic_util.err(_('Sub-project %s not installed.') % project)
return migration_entrypoints[project]
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
validate_labels(config)
validate_heads_file(config)
def add_alembic_subparser(sub, cmd):
return sub.add_parser(cmd, help=getattr(alembic_command, cmd).__doc__)
def do_upgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision or ''
if '-' in revision:
raise SystemExit(_('Negative relative revision (downgrade) not '
'supported'))
delta = CONF.command.delta
if delta:
if '+' in revision:
raise SystemExit(_('Use either --delta or relative revision, '
'not both'))
if delta < 0:
raise SystemExit(_('Negative delta (downgrade) not supported'))
revision = '%s+%d' % (revision, delta)
# leave branchless 'head' revision request backward compatible by applying
# all heads in all available branches.
if revision == 'head':
revision = 'heads'
if not CONF.command.sql:
run_sanity_checks(config, revision)
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def no_downgrade(config, cmd):
raise SystemExit(_("Downgrade no longer supported"))
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def _get_branch_head(branch):
'''Get the latest @head specification for a branch.'''
return '%s@head' % branch
def do_revision(config, cmd):
'''Generate new revision files, one per branch.'''
addn_kwargs = {
'message': CONF.command.message,
'autogenerate': CONF.command.autogenerate,
'sql': CONF.command.sql,
}
if _use_separate_migration_branches(config):
for branch in MIGRATION_BRANCHES:
version_path = _get_version_branch_path(config, branch)
addn_kwargs['version_path'] = version_path
addn_kwargs['head'] = _get_branch_head(branch)
if not os.path.exists(version_path):
# Bootstrap initial directory structure
utils.ensure_dir(version_path)
# Mark the very first revision in the new branch with its label
addn_kwargs['branch_label'] = branch
do_alembic_command(config, cmd, **addn_kwargs)
else:
do_alembic_command(config, cmd, **addn_kwargs)
update_heads_file(config)
def _get_release_labels(labels):
result = set()
for label in labels:
result.add('%s_%s' % (CURRENT_RELEASE, label))
return result
def _compare_labels(revision, expected_labels):
# validate that the script has expected labels only
bad_labels = revision.branch_labels - expected_labels
if bad_labels:
# NOTE(ihrachyshka): this hack is temporary to accomodate those
# projects that already initialized their branches with liberty_*
# labels. Let's notify them about the deprecation for now and drop it
# later.
bad_labels_with_release = (revision.branch_labels -
_get_release_labels(expected_labels))
if not bad_labels_with_release:
alembic_util.warn(
_('Release aware branch labels (%s) are deprecated. '
'Please switch to expand@ and contract@ '
'labels.') % bad_labels)
return
script_name = os.path.basename(revision.path)
alembic_util.err(
_('Unexpected label for script %(script_name)s: %(labels)s') %
{'script_name': script_name,
'labels': bad_labels}
)
def _validate_single_revision_labels(script_dir, revision, label=None):
expected_labels = set()
if label is not None:
expected_labels.add(label)
_compare_labels(revision, expected_labels)
# if it's not the root element of the branch, expect the parent of the
# script to have the same label
if revision.down_revision is not None:
down_revision = script_dir.get_revision(revision.down_revision)
_compare_labels(down_revision, expected_labels)
def _validate_revision(script_dir, revision):
for branch in MIGRATION_BRANCHES:
if branch in revision.path:
_validate_single_revision_labels(
script_dir, revision, label=branch)
return
# validate script from branchless part of migration rules
_validate_single_revision_labels(script_dir, revision)
def validate_labels(config):
script_dir = alembic_script.ScriptDirectory.from_config(config)
revisions = [v for v in script_dir.walk_revisions(base='base',
head='heads')]
for revision in revisions:
_validate_revision(script_dir, revision)
def _get_sorted_heads(script):
'''Get the list of heads for all branches, sorted.'''
return sorted(script.get_heads())
def validate_heads_file(config):
'''Check that HEADS file contains the latest heads for each branch.'''
script = alembic_script.ScriptDirectory.from_config(config)
expected_heads = _get_sorted_heads(script)
heads_path = _get_active_head_file_path(config)
try:
with open(heads_path) as file_:
observed_heads = file_.read().split()
if observed_heads == expected_heads:
return
except IOError:
pass
alembic_util.err(
_('HEADS file does not match migration timeline heads, expected: %s')
% ', '.join(expected_heads))
def update_heads_file(config):
'''Update HEADS file with the latest branch heads.'''
script = alembic_script.ScriptDirectory.from_config(config)
heads = _get_sorted_heads(script)
heads_path = _get_active_head_file_path(config)
with open(heads_path, 'w+') as f:
f.write('\n'.join(heads))
if _use_separate_migration_branches(config):
old_head_file = _get_head_file_path(config)
if os.path.exists(old_head_file):
os.remove(old_head_file)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = add_alembic_subparser(subparsers, name)
parser.set_defaults(func=do_alembic_command)
help_text = (getattr(alembic_command, 'branches').__doc__ +
' and validate head file')
parser = subparsers.add_parser('check_migration', help=help_text)
parser.set_defaults(func=do_check_migration)
parser = add_alembic_subparser(subparsers, 'upgrade')
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.add_argument('--mysql-engine',
default='',
help='Change MySQL storage engine of current '
'existing tables')
parser.set_defaults(func=do_upgrade)
parser = subparsers.add_parser('downgrade', help="(No longer supported)")
parser.add_argument('None', nargs='?', help="Downgrade not supported")
parser.set_defaults(func=no_downgrade)
parser = add_alembic_subparser(subparsers, 'stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = add_alembic_subparser(subparsers, 'revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def _get_project_base(config):
'''Return the base python namespace name for a project.'''
script_location = config.get_main_option('script_location')
return script_location.split(':')[0].split('.')[0]
def _get_package_root_dir(config):
root_module = importutils.try_import(_get_project_base(config))
if not root_module:
project = config.get_main_option('neutron_project')
alembic_util.err(_("Failed to locate source for %s.") % project)
# The root_module.__file__ property is a path like
# '/opt/stack/networking-foo/networking_foo/__init__.py'
# We return just
# '/opt/stack/networking-foo'
return os.path.dirname(os.path.dirname(root_module.__file__))
def _get_root_versions_dir(config):
'''Return root directory that contains all migration rules.'''
root_dir = _get_package_root_dir(config)
script_location = config.get_main_option('script_location')
# Script location is something like:
# 'project_base.db.migration:alembic_migrations'
# Convert it to:
# 'project_base/db/migration/alembic_migrations/versions'
part1, part2 = script_location.split(':')
parts = part1.split('.') + part2.split('.') + ['versions']
# Return the absolute path to the versions dir
return os.path.join(root_dir, *parts)
def _get_head_file_path(config):
'''Return the path of the file that contains single head.'''
return os.path.join(
_get_root_versions_dir(config),
HEAD_FILENAME)
def _get_heads_file_path(config):
'''Return the path of the file that contains all latest heads, sorted.'''
return os.path.join(
_get_root_versions_dir(config),
HEADS_FILENAME)
def _get_active_head_file_path(config):
'''Return the path of the file that contains latest head(s), depending on
whether multiple branches are used.
'''
if _use_separate_migration_branches(config):
return _get_heads_file_path(config)
return _get_head_file_path(config)
def _get_version_branch_path(config, branch=None):
version_path = _get_root_versions_dir(config)
if branch:
return os.path.join(version_path, CURRENT_RELEASE, branch)
return version_path
def _use_separate_migration_branches(config):
'''Detect whether split migration branches should be used.'''
return (CONF.split_branches or
# Use HEADS file to indicate the new, split migration world
os.path.exists(_get_heads_file_path(config)))
def _set_version_locations(config):
'''Make alembic see all revisions in all migration branches.'''
version_paths = [_get_version_branch_path(config)]
if _use_separate_migration_branches(config):
for branch in MIGRATION_BRANCHES:
version_paths.append(_get_version_branch_path(config, branch))
config.set_main_option('version_locations', ' '.join(version_paths))
def _get_installed_entrypoint(subproject):
'''Get the entrypoint for the subproject, which must be installed.'''
if subproject not in migration_entrypoints:
alembic_util.err(_('Package %s not installed') % subproject)
return migration_entrypoints[subproject]
def _get_subproject_script_location(subproject):
'''Get the script location for the installed subproject.'''
entrypoint = _get_installed_entrypoint(subproject)
return ':'.join([entrypoint.module_name, entrypoint.attrs[0]])
def _get_service_script_location(service):
'''Get the script location for the service, which must be installed.'''
return _get_subproject_script_location('neutron-%s' % service)
def _get_subproject_base(subproject):
'''Get the import base name for the installed subproject.'''
entrypoint = _get_installed_entrypoint(subproject)
return entrypoint.module_name.split('.')[0]
def get_alembic_configs():
'''Return a list of alembic configs, one per project.
'''
# Get the script locations for the specified or installed projects.
# Which projects to get script locations for is determined by the CLI
# options as follows:
# --service X # only subproject neutron-X
# --subproject Y # only subproject Y
# (none specified) # neutron and all installed subprojects
script_locations = {}
if CONF.service:
script_location = _get_service_script_location(CONF.service)
script_locations['neutron-%s' % CONF.service] = script_location
elif CONF.subproject:
script_location = _get_subproject_script_location(CONF.subproject)
script_locations[CONF.subproject] = script_location
else:
for subproject, ep in migration_entrypoints.items():
script_locations[subproject] = _get_subproject_script_location(
subproject)
# Return a list of alembic configs from the projects in the
# script_locations dict. If neutron is in the list it is first.
configs = []
project_seq = sorted(script_locations.keys())
# Core neutron must be the first project if there is more than one
if len(project_seq) > 1 and 'neutron' in project_seq:
project_seq.insert(0, project_seq.pop(project_seq.index('neutron')))
for project in project_seq:
config = alembic_config.Config(neutron_alembic_ini)
config.set_main_option('neutron_project', project)
script_location = script_locations[project]
config.set_main_option('script_location', script_location)
_set_version_locations(config)
config.neutron_config = CONF
configs.append(config)
return configs
def get_neutron_config():
# Neutron's alembic config is always the first one
return get_alembic_configs()[0]
def run_sanity_checks(config, revision):
script_dir = alembic_script.ScriptDirectory.from_config(config)
def check_sanity(rev, context):
# TODO(ihrachyshka): here we use internal API for alembic; we may need
# alembic to expose implicit_base= argument into public
# iterate_revisions() call
for script in script_dir.revision_map.iterate_revisions(
revision, rev, implicit_base=True):
if hasattr(script.module, 'check_sanity'):
script.module.check_sanity(context.connection)
return []
with environment.EnvironmentContext(config, script_dir,
fn=check_sanity,
starting_rev=None,
destination_rev=revision):
script_dir.run_env()
def validate_cli_options():
if CONF.subproject and CONF.service:
alembic_util.err(_("Cannot specify both --service and --subproject."))
def main():
CONF(project='neutron')
validate_cli_options()
for config in get_alembic_configs():
#TODO(gongysh) enable logging
CONF.command.func(config, CONF.command.name)
|
|
#!/usr/bin/env python
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import errno
import getopt
import glob
import json
import os
import re
import sys
from collections import deque
from collections import namedtuple
from pip.req import parse_requirements as p_reqs
default_setup_cfg = deque()
default_stdeb_cfg = deque()
default_setup_cfg.append('[bdist_rpm]\n')
default_stdeb_cfg.append('[DEFAULT]\n')
def construct_cfgs(**kargs):
"""construct_cfgs
Performs actions to construct either the setup.cfg (rpm) or stdeb.cfg (deb)
files as per the operating system specified. This construction is done as
per the setup_requirements.txt file from within the working directory
specified.
This is a very tempermental function by design. It intentionally exits
with a non-zero if/when an error has occurred on its own. Therefore, it is
not suggested to use this function if you intend to get control back again.
"""
docker_dir = _check_args(**kargs)
if not docker_dir or not os.path.isdir(docker_dir):
print("Unable to determine the %s/%s combo under supported versioning"
% (kargs['operating_system'], kargs['version']))
exit_cleanly(error_number=errno.ENOSYS)
if kargs['operating_system'] == 'redhat':
_build_setup_cfg(kargs['working_directory'])
elif kargs['operating_system'] == 'ubuntu':
_build_stdeb_cfg(kargs['working_directory'])
else:
print("Unsure of what to do... operating_system(%s) is not recognized!"
% kargs['operating_system'])
def _construct_cfgs_from_json(args):
Args = namedtuple('Args', 'setup, reqs, fmt, start')
rpm = Args(args['setup_cfg'], args['setup_requirements'], '%s %s',
'requires = ')
deb = Args(args['stdeb_cfg'], args['setup_requirements'], '%s (%s), ',
'Depends:\n ')
_construct_file(rpm.setup, rpm.reqs, rpm.fmt, rpm.start)
_construct_file(deb.setup, deb.reqs, deb.fmt, deb.start)
print("Successfully constructed %s and\n%s" %
(args['setup_cfg'], args['stdeb_cfg']))
def _read_in_cfgs(cfg):
cfgs = deque()
break_re = re.compile('^[^#\W]')
req_re = re.compile('^Depends:|^requires\s*=\s*')
try:
with open(cfg, 'r') as fh:
read = True
line = fh.readline()
while line:
read = True if break_re.search(line) and not read else read
if req_re.search(line):
read = False
if read:
cfgs.append(line)
line = fh.readline()
except IOError:
pass # we then have empty contents... all well...
if not cfgs:
if cfg.endswith('setup.cfg'):
cfgs = default_setup_cfg
elif cfg.endswith('stdeb.cfg'):
cfgs = default_stdeb_cfg
else:
cfgs = deque()
return cfgs
def _build_setup_cfg(wkg):
setup_cfg = wkg + "/setup.cfg"
setup = wkg + "/setup_requirements.txt"
fmt = "%s%s"
_construct_file(setup_cfg, setup, fmt, 'requires = ')
def _build_stdeb_cfg(wkg):
setup_cfg = glob.glob(wkg + "/*-dist/deb_dist/stdeb.cfg")
setup = wkg + "/setup_requirements.txt"
fmt = "%s (%s),"
if not setup_cfg:
dist = glob.glob(wkg + "/*-dist")
if not dist:
print(str(EnvironmentError("Unable to find a *-dist directory")))
print("No dist directory under: " + wkg)
exit_cleanly(error_number=errno.ENOSYS)
deb_dist = dist[0] + "/deb_dist"
try:
os.mkdir(deb_dist)
except IOError as Error:
if not os.path.isdir(deb_dist):
print(str(Error))
print("Unable to determine the existence of: " + deb_dist)
exit_cleanly(error_number=errno.ENOSYS)
setup_cfg = deb_dist + "/stdeb.cfg"
else:
setup_cfg = setup_cfg[0]
_construct_file(setup_cfg, setup, fmt, 'Depends:\n ')
def handle_equals(fh, fmt, name, specifier, pkg_type='rpm'):
version_breakout = re.compile('(\d+)\.(\d+)\.(\d+)')
specifier_fmt = "{} {}.{}.{}"
if '-' in specifier:
specifier = re.sub('-\d+', '', specifier)
match = version_breakout.search(specifier)
assert match, "{} could not be matched with {}".format(
specifier, version_breakout.pattern)
major, minor, patch = match.groups()
lt_specifier = '<<' if pkg_type == 'deb' else '<'
fh.write(fmt % (name,
specifier_fmt.format(
lt_specifier, major, minor, int(patch) + 1)))
fh.write('\n ')
fh.write(fmt % (name, specifier_fmt.format(">=", major, minor, patch)))
def _construct_file(setup_cfg, setup, fmt, start):
if not os.path.isfile(setup) or not os.access(setup, os.R_OK):
print(setup + " does not exist or is not readable")
exit_cleanly(error_number=errno.ENOSYS)
contents = _read_in_cfgs(setup_cfg)
parsed_reqs = map(lambda x: (x.req), p_reqs(setup, session="pkg"))
if not parsed_reqs:
print("Nothing to do!\n%s\nDoes not contain any reqs parsable!" %
setup)
exit_cleanly(error_number=0)
try:
base_dir = os.path.dirname(setup_cfg)
if not os.path.isdir(base_dir):
try:
os.mkdir(base_dir)
except OSError as Error:
if Error.errno != errno.EEXIST:
raise
with open(setup_cfg, 'w') as fh:
pkg_type = ''
if contents:
while contents:
fh.write(contents.popleft())
fh.write(start)
for count in range(len(parsed_reqs)):
req = parsed_reqs[count]
if 'Depends' in start:
# special case for debian...
name = str(req.name) if 'python-' in str(req.name) else \
'python-' + str(req.name)
pkg_type = 'deb'
else:
name = str(req.name)
pkg_type = 'rpm'
specifier = str(req.specifier)
if '==' in specifier:
handle_equals(fh, fmt, name, specifier, pkg_type=pkg_type)
continue
else:
fh.write(fmt % (name, str(specifier)))
if count != len(parsed_reqs) - 1:
fh.write('\n ')
fh.write("\n") # some packages require a new line
except AssertionError as Error:
raise ValueError(str(Error))
except IOError as Error:
print(Error)
exit_cleanly(error_number=errno.EIO)
def _check_args(operating_system=None, version=None, working_directory=None):
accepted_docker_dir = None
try:
docker_dir = str(working_directory) + "/*-dist/Docker/%s/%s"
except TypeError as Error:
print("working_directory is of invalid type! (%s)" %
str(type(working_directory)))
exit_cleanly(error_number=errno.EINVAL)
except Exception as Error:
print(str(Error))
print("'%s' is not a valid working_directory!" %
str(working_directory))
exit_cleanly(error_number=errno.EINVAL)
if operating_system not in ['ubuntu', 'redhat']:
print("'%s' is not a valid or recognized operating_system option" %
operating_system)
exit_cleanly(error_number=errno.EINVAL)
else:
possible = glob.glob(docker_dir % (operating_system, version))
if possible:
accepted_docker_dir = possible[0]
if not accepted_docker_dir:
print("(operating_system=%s, version=%s, working_directory=%s)" %
(operating_system, version, working_directory))
print("Are not acceptable arguments resulting in a Docker " +
"file location")
exit_cleanly()
return accepted_docker_dir
def exit_cleanly(error_number=0):
"""exit_cleanly
Performs standard error notification and exiting statements as necessary. This
assures more consistent error handling within the script.
"""
default = "An Unknown error has occurred!"
descriptions = \
{22: 'An improper input error has occurred. Please see above stmt(s)',
29: 'An operation failed. Please see above stmt(s)',
5: 'An IO Error has occurred. Pelase see above stmt(s)'}
try:
error_number = int(error_number)
except TypeError:
stmt = default
error_number = -1
if error_number == 0:
stmt = "No error has been detected!"
elif not isinstance(error_number, int) and hasattr(errno, error_number):
error_number = getattr(errno, error_number)
stmt = descriptions[error_number] if error_number in descriptions \
else default
elif error_number in descriptions:
stmt = descriptions[error_number]
else:
stmt = default
if error_number:
print("""
%s [--opt [option]]
With opts:
working_directory - the full path to the working directory
operating_system - the full name of the operating system lower case
version - the version of the operating system
All of these options must be supplied, and if one is missing or if there is
no corresponding:
<working_directory>/*-dist/Docker/<operating_system>/<version>
Directory, then this script will exit cleanly reporting it as an error.
""" % sys.argv[0])
print("(%d) %s" % (error_number, stmt), file=sys.stderr)
sys.exit(error_number)
def _load_json(json_fl):
try:
with open(json_fl, 'r') as fh:
data = json.loads(fh.read())
except Exception as Error:
print(Error)
print("Unable to read in " + json_fl)
return data
def get_args():
"""get_args
This function extracts the script arguments within the arguments variable and
interprets their meaning before returning such content.
"""
expected = ['working_directory', 'operating_system', 'version']
possible = ['json'].extend(expected)
try:
opts, args = getopt.getopt(sys.argv[1:], '', map(lambda x: ('%s=' % x),
possible))
except getopt.GetoptError as err:
print(str(err))
exit_cleanly(error_number=errno.EINVAL)
arguments = dict()
for o, a in opts:
option = re.sub('^-*', '', o)
if 'json' in option:
arguments = _load_json(a)
break
if option in expected:
arguments[option] = a
error = 0
for item in expected:
if item not in arguments:
print("Missing: %s from arguments" % item)
error = errno.EINVAL
if error:
exit_cleanly(error_number=error)
return arguments
def main():
"""main
The entrypoint function. This function should also handle any runtime
errors and exceptions in a cleanly fashon.
"""
try:
args = get_args()
if 'setup_cfg' in args and 'stdeb_cfg' in args:
_construct_cfgs_from_json(args)
else:
construct_cfgs(**args)
except Exception as Error:
print(str(Error))
print("Exiting cleanly...")
exit_cleanly()
exit_cleanly(error_number=0)
if __name__ == '__main__':
main()
|
|
"""Miscellaneous utilities."""
import numpy as np
class Normalizer():
"""An abstract class for normalizing inputs.
"""
def normalize(self, X):
"""Return corresponding points in normalized domain.
Parameters
----------
X : ndarray
contains all input points one wishes to normalize
Returns
-------
X_norm : ndarray
contains the normalized inputs corresponding to `X`
Notes
-----
Points in `X` should be oriented as an m-by-n ndarray, where each row
corresponds to an m-dimensional point in the problem domain.
"""
raise NotImplementedError()
def unnormalize(self, X):
"""Return corresponding points shifted and scaled to [-1,1]^m.
Parameters
----------
X : ndarray
contains all input points one wishes to unnormalize
Returns
-------
X_unnorm : ndarray
contains the unnormalized inputs corresponding to `X`
Notes
-----
Points in `X` should be oriented as an m-by-n ndarray, where each row
corresponds to an m-dimensional point in the normalized domain.
"""
raise NotImplementedError()
class BoundedNormalizer(Normalizer):
"""A class for normalizing bounded inputs.
Attributes
----------
lb : ndarray
a matrix of size m-by-1 that contains lower bounds on the simulation
inputs
ub : ndarray
a matrix of size m-by-1 that contains upper bounds on the simulation
inputs
See Also
--------
utils.misc.UnboundedNormalizer
"""
lb, ub = None, None
def __init__(self, lb, ub):
"""Initialize a BoundedNormalizer.
Parameters
----------
lb : ndarray
a matrix of size m-by-1 that contains lower bounds on the simulation
inputs
ub : ndarray
a matrix of size m-by-1 that contains upper bounds on the simulation
inputs
"""
m = lb.size
self.lb = lb.reshape((1, m))
self.ub = ub.reshape((1, m))
def normalize(self, X):
"""Return corresponding points shifted and scaled to [-1,1]^m.
Parameters
----------
X : ndarray
contains all input points one wishes to normalize. The shape of `X`
is M-by-m. The components of each row of `X` should be between `lb`
and `ub`.
Returns
-------
X_norm : ndarray
contains the normalized inputs corresponding to `X`. The components
of each row of `X_norm` should be between -1 and 1.
"""
X, M, m = process_inputs(X)
X_norm = 2.0 * (X - self.lb) / (self.ub - self.lb) - 1.0
return X_norm
def unnormalize(self, X):
"""Return corresponding points shifted and scaled to `[lb, ub]`.
Parameters
----------
X : ndarray
contains all input points one wishes to unnormalize. The shape of
`X` is M-by-m. The components of each row of `X` should be between
-1 and 1.
Returns
-------
X_unnorm : ndarray
contains the unnormalized inputs corresponding to `X`. The
components of each row of `X_unnorm` should be between `lb` and
`ub`.
"""
X, M, m = process_inputs(X)
X_unnorm = (self.ub - self.lb) * (X + 1.0) / 2.0 + self.lb
return X_unnorm
class UnboundedNormalizer(Normalizer):
"""A class for normalizing unbounded, Gaussian inputs to standard normals.
Attributes
----------
mu : ndarray
a matrix of size m-by-1 that contains the mean of the Gaussian
simulation inputs
L : ndarray
a matrix size m-by-m that contains the Cholesky factor of the covariance
matrix of the Gaussian simulation inputs.
See Also
--------
utils.misc.BoundedNormalizer
Notes
-----
A simulation with unbounded inputs is assumed to have a Gaussian weight
function associated with the inputs. The covariance of the Gaussian weight
function should be full rank.
"""
mu, L = None, None
def __init__(self, mu, C):
"""Initialize an UnboundedNormalizer.
Parameters
----------
mu : ndarray
a matrix of size m-by-1 that contains the mean of the Gaussian
simulation inputs
C : ndarray
a matrix of size m-by-m that contains the covariance matrix of the
Gaussian simulation inputs
"""
self.mu = mu.reshape((1, mu.size))
self.L = np.linalg.cholesky(C)
def normalize(self, X):
"""Return points transformed to a standard normal distribution.
Parameters
----------
X : ndarray
contains all input points one wishes to normalize. The shape of `X`
is M-by-m. The components of each row of `X` should be a draw from a
Gaussian with mean `mu` and covariance `C`.
Returns
-------
X_norm : ndarray
contains the normalized inputs corresponding to `X`. The components
of each row of `X_norm` should be draws from a standard multivariate
normal distribution.
"""
X, M, m = process_inputs(X)
X0 = X - self.mu
X_norm = np.linalg.solve(self.L,X0.T).T
return X_norm
def unnormalize(self, X):
"""Transform points to original Gaussian.
Return corresponding points transformed to draws from a Gaussian
distribution with mean `mu` and covariance `C`.
Parameters
----------
X : ndarray
contains all input points one wishes to unnormalize. The shape of
`X` is M-by-m. The components of each row of `X` should be draws
from a standard multivariate normal.
Returns
-------
X_unnorm : ndarray
contains the unnormalized inputs corresponding to `X`. The
components of each row of `X_unnorm` should represent draws from a
multivariate normal with mean `mu` and covariance `C`.
"""
X, M, m = process_inputs(X)
X0 = np.dot(X,self.L.T)
X_unnorm = X0 + self.mu
return X_unnorm
def process_inputs(X):
"""Check a matrix of input values for the right shape.
Parameters
----------
X : ndarray
contains input points. The shape of `X` should be M-by-m.
Returns
-------
X : ndarray
the same as the input
M : int
number of rows in `X`
m : int
number of columns in `X`
"""
if len(X.shape) == 2:
M, m = X.shape
else:
raise ValueError('The inputs X should be a two-d numpy array.')
X = X.reshape((M, m))
return X, M, m
def process_inputs_outputs(X, f):
"""Check matrix of input values and a vector of outputs for correct shapes.
Parameters
----------
X : ndarray
contains input points. The shape of `X` should be M-by-m.
f : ndarray
M-by-1 matrix
Returns
-------
X : ndarray
the same as the input
f : ndarray
the same as the output
M : int
number of rows in `X`
m : int
number of columns in `X`
"""
X, M, m = process_inputs(X)
if len(f.shape) == 2:
Mf, mf = f.shape
else:
raise ValueError('The outputs f should be a two-d numpy array.')
if Mf != M:
raise Exception('Different number of inputs and outputs.')
if mf != 1:
raise Exception('Only scalar-valued functions.')
f = f.reshape((M, 1))
return X, f, M, m
def conditional_expectations(f, ind):
"""Compute conditional expectations and variances for given function values.
Parameters
----------
f : ndarray
an ndarry of function evaluations
ind : ndarray[int]
index array that tells which values of `f` correspond to the same value
for the active variable.
Returns
-------
Ef : ndarray
an ndarray containing the conditional expectations
Vf : ndarray
an ndarray containing the conditional variances
Notes
-----
This function computes the mean and variance for all values in the ndarray
`f` that have the same index in `ind`. The indices in `ind` correspond to
values of the active variables.
"""
n = int(np.amax(ind)) + 1
Ef, Vf = np.zeros((n, 1)), np.zeros((n, 1))
for i in range(n):
fi = f[ind == i]
Ef[i] = np.mean(fi)
Vf[i] = np.var(fi)
return Ef, Vf
# thanks to Trent for these functions!!!
def atleast_2d_col(A):
"""Wrapper for `atleast_2d(A, 'col')`
Notes
-----
Thanks to Trent Lukaczyk for these functions!
"""
return atleast_2d(A,'col')
def atleast_2d_row(A):
"""Wrapper for `atleast_2d(A, 'row')`
Notes
-----
Thanks to Trent Lukaczyk for these functions!
"""
return atleast_2d(A,'row')
def atleast_2d(A, oned_as='row'):
"""Ensures the array `A` is at least two dimensions.
Parameters
----------
A : ndarray
matrix
oned_as : str, optional
should be either 'row' or 'col'. It determines whether the array `A`
should be expanded as a 2d row or 2d column (default 'row')
"""
# not an array yet
if not isinstance(A,(np.ndarray,np.matrixlib.defmatrix.matrix)):
if not isinstance(A,(list,tuple)):
A = [A]
A = np.array(A)
# check rank
if np.ndim(A) < 2:
# expand row or col
if oned_as == 'row':
A = A[None,:]
elif oned_as == 'col':
A = A[:,None]
else:
raise Exception , "oned_as must be 'row' or 'col' "
return A
|
|
#! /usr/bin/python
# Joe Deller 2014
# A second day at the Minecraft Races
# Level : Intermediate
# Uses : Libraries, variables, loops, lists, methods, loops and logic
# Second version of the horse race program
# This time the horses will not race until we have
# clicked on a special stone block next to the start line
# TODO How about adding some steps and blocks at the side of the track
# to look like audience stands?
# How about adding some fence gates at the start of the track, then opening
# them , with setBlock (block.FENCE_GATE.id,4)
# Can you think what might go wrong with this?
# A more advanced version of this program would use threads to move the
# horses at the same time, rather than one after the other
import mcpi.minecraft as minecraft
import mcpi.block as block
import random
import time
def DrawCourse(x, y, z, track_length):
# Our track is going to be made with stripes
# to make it look a little neater
# Draw 8 tracks altogether, in gravel and sand
# At the end of the track draw some chequered wool
# x and z are where we want to start drawing the track from
# Don't make the track length too long or it will disappear into the distance.
# This is a nice simple straight course, it makes the racing code fairly simple
# A more advanced program could draw a looped track, but lets keep it
# simple for now
# Draw a couple of pillars at the end to make a finish archway
arch_height = 6
left_pillar_x = x
# track is 8 blocks wide so the right pillar is 9 from the left one
right_pillar_ = x + 9
mc.setBlocks(
left_pillar_x,
y,
z + track_length + 1,
left_pillar_x,
y + arch_height,
z + track_length + 1,
block.WOOD_PLANKS.id)
mc.setBlocks(
right_pillar_,
y,
z + track_length + 1,
right_pillar_,
y + arch_height,
z + track_length + 1,
block.WOOD_PLANKS.id)
# To make the track look pretty, we will alternate between gravel and sand
# This means our loop will count up in twos as we are drawing two tracks
# each time we go around the loop
for track in range(1, 9, 2):
# In this loop we need to work out what the x position of the tracks are
# each time around the loop. We use a variable to do this only once per loop
# as several lines of code need this value
# Can you see any other calculations that are done several times
# that we could do once and use a variable to store the result?
tx = x + track
mc.setBlocks(tx, y - 1, z, tx, y - 1, z + track_length, block.GRAVEL.id)
mc.setBlocks(tx + 1, y - 1, z, tx + 1, y - 1, z + track_length, block.SAND.id)
# At the finish line we have a chequered pattern
# 0 is white, 15 is black
mc.setBlock(tx, y - 1, z + track_length + 1, block.WOOL.id, 0)
mc.setBlock(tx, y - 1, z + track_length + 2, block.WOOL.id, 15)
mc.setBlock(tx + 1, y - 1, z + track_length + 1, block.WOOL.id, 15)
mc.setBlock(tx + 1, y - 1, z + track_length + 2, block.WOOL.id, 0)
# Draw a chequered pattern between two the two end pillars, above the
# ground at 6 and 7 blocks high
mc.setBlock(tx, y + 7, z + track_length + 1, block.WOOL.id, 0)
mc.setBlock(tx, y + 6, z + track_length + 1, block.WOOL.id, 15)
mc.setBlock(tx + 1, y + 7, z + track_length + 1, block.WOOL.id, 15)
mc.setBlock(tx + 1, y + 6, z + track_length + 1, block.WOOL.id, 0)
def readyHorses(x, y, start_line):
# Draw all the horses at the start line
# We cheat a little by using the horse number in the list as its color
# This is a trick that must be used carefully as it can get you into
# trouble!
# A better way would be to have a list of colours
for horse in range(0, 6):
mc.setBlock(x + horse, y, start_line, block.WOOL.id, horse)
def Race(x, y, start_line, finish_line):
# This is where we race the horses
# All the horses start at the starting line and race in a straight line
# towards the finish
# We will use a list to keep track of where our horses are in the race
# We will have 6 horses in the race
# A quick way of putting the information into our list
horses = [start_line] * 6
# At the start of the race there isn't a winner yet, so
# we can use the Python "None" keyword for this
winner = None
# Loop around until one of the horses is at the finish line
# This isn't totally fair as we work out how far each horse has travelled
# in strict order, so the first white horse
# has more chance of winning than the last green
# but for now works well enough as often there is a clear winner
# The horses move along the Z axis, away from where the player is standing
# Like a real race, several horses will cross the finish line close together
# but only the first one counts
# We will use a variable called race_won to keep track if we have a winner yet
# Variables used in this way are sometimes called "Flags"
# When they change, our code needs to take notice
race_won = False
while (max(horses) <= finish_line):
for horse in range(0, 6):
# each horse will randomly move forward between 0 and three squares
# Instead of a dice, we use something from the random library
# unlike a for loop, randint is inclusive of the end point
moves = random.randint(0, 3)
# For every move we have to animate the horse moving towards the finish, in the z direction
# so replace where it is now with a block of air, then draw it forward one square
# repeat this until we have reached the number of moves
# print "horse "+str(horse) +" moves = "+str(moves)
for move in range(0, moves):
# Where is our horse now ?
horse_z = horses[horse]
# Rub out the horse
mc.setBlock(x + horse, y, horse_z, block.AIR)
# now draw it one move towards the finish line
# again we use the trick of the horse number also being it's
# color
mc.setBlock(x + horse, y, horse_z + 1, block.WOOL.id, horse)
# Update our list with where our horse is now, this time we use the short hand way
# rather than writing out: horses[horse] = horses[horse] + 1
horses[horse] += 1
# Now that we have moved, is this horse at or past the finish?
if (horse_z >= finish_line) and (race_won is False):
# make a note of the winner, not entirely fair as the first
# horse always moves first
race_won = True
winner = horse
print "Winner is :" + str(winner)
# Wait a little bit, otherwise we won't see the race!
time.sleep(0.1)
# Unfortunately Minecraft does have a way for us to convert the color number
# of a wool block back into a word. We will make a list that matches
# the known list of wool colors and use this to find the winning color
# This is not without some possible problems. If Minecraft were to ever change
# its color numbers, then our code would report the wrong color
# They shouldn't ever do this, but things like that do happen
horse_colors = ["White", "Orange", "Magenta", "Blue", "Yellow", "Green"]
winning_horse = horse_colors[winner]
mc.postToChat(winning_horse + " is the winner!")
def CheckBlockIsHit(block_x, block_y, block_z):
# Has our special starter stone been right clicked with the sword
block_hits = mc.events.pollBlockHits()
if(block_hits):
for block_hit in block_hits:
print "some thing hit at:" + str(block_hit.pos.x) + " " + str(block_hit.pos.y) + " " + str(block_hit.pos.z)
# Was it our starter stone ?
if(block_hit.pos.x == block_x and block_hit.pos.y == block_y and block_hit.pos.z == block_z):
print "StartRace!"
return True
else:
# The player hit something, but not the right thing so return false
print "not the right block"
return False
def main():
# global x, y, z, track_length, start_line, finish_line, starting_block_x, start_race
x, y, z = mc.player.getTilePos()
track_length = 22
# Clean up the world so we have a nice flat space
mc.setBlocks(x - 10, y, z - 10, x + 20, y + 20, z + track_length + 10, block.AIR.id)
# Setup a grass floor
mc.setBlocks(x - 12, y - 2, z - 12, x + 20, y - 1, z + track_length + 10, block.GRASS.id)
# The start line will be one block back from where we are standing
start_line = z + 1
finish_line = start_line + track_length
# Draw a stone block right in front of where we are standing
# We we right click it with our sword it will start the race
starting_block_x = x
mc.setBlock(starting_block_x, y, start_line, block.STONE.id)
DrawCourse(x + 1, y, start_line, track_length)
readyHorses(x + 4, y, start_line)
start_race = False
mc.postToChat("Right click the stone block with sword to start the race.")
while (start_race is False):
time.sleep(0.1)
if (CheckBlockIsHit(starting_block_x, y, start_line)):
start_race = True
Race(x + 4, y, start_line, finish_line)
# Start here
mc = minecraft.Minecraft.create()
main()
|
|
import base64
from rancher import ApiError
import pytest
from .common import * # NOQA
CLUSTER_NAME = os.environ.get("CLUSTER_NAME", "")
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
def test_secret_create_all_ns():
"""
Verify creation of secrets is functional
"""
p_client = namespace["p_client"]
ns = namespace["ns"]
# Value is base64 encoded
value = base64.b64encode(b"valueall")
keyvaluepair = {"testall": value.decode('utf-8')}
cluster = namespace["cluster"]
project = namespace["project"]
c_client = namespace["c_client"]
new_ns = create_ns(c_client, cluster, project)
namespacelist = [ns, new_ns]
secret = create_secret(keyvaluepair)
# Create workloads with secret in existing namespaces
for ns in namespacelist:
create_and_validate_workload_with_secret_as_volume(p_client, secret,
ns,
keyvaluepair)
create_and_validate_workload_with_secret_as_env_variable(p_client,
secret,
ns,
keyvaluepair)
# Create a new namespace and workload in the new namespace using the secret
new_ns1 = create_ns(c_client, cluster, project)
create_and_validate_workload_with_secret_as_volume(p_client,
secret,
new_ns1,
keyvaluepair)
create_and_validate_workload_with_secret_as_env_variable(p_client,
secret,
new_ns1,
keyvaluepair)
c_client.delete(new_ns)
def test_secret_create_single_ns():
"""
Verify editing secrets is functional
"""
p_client = namespace["p_client"]
ns = namespace["ns"]
# Value is base64 encoded
value = base64.b64encode(b"valueall")
keyvaluepair = {"testall": value.decode('utf-8')}
secret = create_secret(keyvaluepair, singlenamespace=True)
# Create workloads with secret in existing namespace
create_and_validate_workload_with_secret_as_volume(p_client, secret, ns,
keyvaluepair)
create_and_validate_workload_with_secret_as_env_variable(p_client, secret,
ns, keyvaluepair)
def test_secret_delete_all_ns():
"""
Verify Deletion of secrets is functional
"""
p_client = namespace["p_client"]
ns = namespace["ns"]
# Value is base64 encoded
value = base64.b64encode(b"valuealldelete")
keyvaluepair = {"testalldelete": value.decode('utf-8')}
secret = create_secret(keyvaluepair)
delete_secret(p_client, secret, ns, keyvaluepair)
def test_secret_delete_single_ns():
p_client = namespace["p_client"]
ns = namespace["ns"]
# Value is base64 encoded
value = base64.b64encode(b"valuealldelete")
keyvaluepair = {"testalldelete": value.decode('utf-8')}
secret = create_secret(keyvaluepair, singlenamespace=True)
delete_secret(p_client, secret, ns, keyvaluepair)
def test_secret_edit_all_ns():
p_client = namespace["p_client"]
name = random_test_name("default")
# Value is base64 encoded
value = base64.b64encode(b"valueall")
keyvaluepair = {"testall": value.decode('utf-8')}
cluster = namespace["cluster"]
project = namespace["project"]
c_client = namespace["c_client"]
# Create a namespace
new_ns = create_ns(c_client, cluster, project)
secret = create_secret(keyvaluepair)
# Value is base64 encoded
value1 = base64.b64encode(b"valueall")
value2 = base64.b64encode(b"valueallnew")
updated_dict = {"testall": value1.decode(
'utf-8'), "testallnew": value2.decode('utf-8')}
updated_secret = p_client.update(secret, name=name, namespaceId='NULL',
data=updated_dict)
assert updated_secret['baseType'] == "secret"
updatedsecretdata = updated_secret['data']
print("UPDATED SECRET DATA")
print(updatedsecretdata)
assert updatedsecretdata.data_dict() == updated_dict
# Create workloads using updated secret in the existing namespace
create_and_validate_workload_with_secret_as_volume(p_client, secret,
new_ns,
updatedsecretdata)
create_and_validate_workload_with_secret_as_env_variable(
p_client, secret, new_ns, updatedsecretdata)
# Create a new namespace and workloads in the new namespace using secret
new_ns1 = create_ns(c_client, cluster, project)
create_and_validate_workload_with_secret_as_volume(p_client, secret,
new_ns1,
updatedsecretdata)
create_and_validate_workload_with_secret_as_env_variable(
p_client, secret, new_ns1, updatedsecretdata)
c_client.delete(new_ns)
def test_secret_edit_single_ns():
p_client = namespace["p_client"]
ns = namespace["ns"]
name = random_test_name("default")
# Value is base64 encoded
value = base64.b64encode(b"valueall")
keyvaluepair = {"testall": value.decode('utf-8')}
secret = create_secret(keyvaluepair, singlenamespace=True)
value1 = base64.b64encode(b"valueall")
value2 = base64.b64encode(b"valueallnew")
updated_dict = {"testall": value1.decode(
'utf-8'), "testallnew": value2.decode('utf-8')}
updated_secret = p_client.update(secret, name=name,
namespaceId=ns['name'],
data=updated_dict)
assert updated_secret['baseType'] == "namespacedSecret"
updatedsecretdata = updated_secret['data']
print("UPDATED SECRET DATA")
print(updatedsecretdata)
assert updatedsecretdata.data_dict() == updated_dict
# Create a workload with the updated secret in the existing namespace
create_and_validate_workload_with_secret_as_volume(p_client, secret,
ns,
updatedsecretdata)
create_and_validate_workload_with_secret_as_env_variable(
p_client, secret, ns, updatedsecretdata)
rbac_role_list = [
(CLUSTER_OWNER),
(PROJECT_OWNER),
(PROJECT_MEMBER),
]
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_secret_create(role):
"""
Verify creation of secrets for Cluster owner, project owner and project
member
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
rbac_secret_create(p_client, ns)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_secret_edit(role):
"""
Verify editing of secrets for Cluster owner, project owner and project
member
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
rbac_secret_edit(p_client, ns, project=project)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_secret_delete(role):
"""
Verify deletion of secrets for Cluster owner, project owner and project
member
"""
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
rbac_secret_delete(p_client, ns)
@if_test_rbac
def test_rbac_secret_create_cluster_member(remove_resource):
"""
Verify cluster member can create secret and deploy workload using secret
in the project he created
"""
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = \
create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"),
ns_name=random_test_name("ns-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
rbac_secret_create(p_client, ns)
# Create a project as cluster owner and verify the cluster member cannot
# create secret in this project
keyvaluepair = {"testall": "valueall"}
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
ownerproject, ns = \
create_project_and_ns(cluster_owner_token,
namespace["cluster"],
random_test_name("rbac-cluster-owner"))
cluster_member_client = get_project_client_for_token(ownerproject,
user_token)
remove_resource(project)
remove_resource(ownerproject)
with pytest.raises(ApiError) as e:
create_secret(keyvaluepair, singlenamespace=False,
p_client=cluster_member_client)
assert e.value.error.status == 403
assert e.value.error.code == 'PermissionDenied'
@if_test_rbac
def test_rbac_secret_edit_cluster_member(remove_resource):
"""
Verify cluster member can create secret and edit secret in the project he
created
"""
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = \
create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"),
ns_name=random_test_name("ns-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
rbac_secret_edit(p_client, ns, project=project)
# Create a project as cluster owner and verify the cluster member cannot
# edit secret in this project
keyvaluepair = {"testall": "valueall"}
value1 = ("valueall")
value2 = ("valueallnew")
updated_dict = {"testall": value1, "testallnew": value2}
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
ownerproject, ns = create_project_and_ns(
cluster_owner_token,
namespace["cluster"],
random_test_name("rbac-cluster-owner"))
cluster_owner_client = get_project_client_for_token(ownerproject,
cluster_owner_token)
cluster_member_client = get_project_client_for_token(ownerproject,
user_token)
ownersecret = create_secret(keyvaluepair, singlenamespace=False,
p_client=cluster_owner_client)
remove_resource(project)
remove_resource(ownerproject)
with pytest.raises(ApiError) as e:
cluster_member_client.update(ownersecret, namespaceId='NULL',
data=updated_dict)
assert e.value.error.status == 404
assert e.value.error.code == 'NotFound'
@if_test_rbac
def test_rbac_secret_delete_cluster_member(remove_resource):
"""
Verify cluster member can create secret and delete secret in the project he
created
"""
keyvaluepair = {"testall": "valueall"}
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = \
create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"),
ns_name=random_test_name("ns-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
rbac_secret_delete(p_client, ns)
# Create a project as cluster owner and verify the cluster member cannot
# delete secret in this project
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
ownerproject, ns = create_project_and_ns(
cluster_owner_token,
namespace["cluster"],
random_test_name("rbac-cluster-owner"))
cluster_owner_client = get_project_client_for_token(ownerproject,
cluster_owner_token)
cluster_member_client = get_project_client_for_token(ownerproject,
user_token)
ownersecret = create_secret(keyvaluepair, singlenamespace=False,
p_client=cluster_owner_client)
remove_resource(project)
remove_resource(ownerproject)
with pytest.raises(ApiError) as e:
delete_secret(cluster_member_client, ownersecret, ns, keyvaluepair)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_secret_create_project_readonly():
"""
Verify read-only user cannot create secret
"""
project = rbac_get_project()
user_token1 = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project, user_token1)
keyvaluepair = {"testall": "valueall"}
# Read Only member cannot create secrets
with pytest.raises(ApiError) as e:
create_secret(keyvaluepair, singlenamespace=False,
p_client=readonly_user_client)
assert e.value.error.status == 403
assert e.value.error.code == 'PermissionDenied'
@if_test_rbac
def test_rbac_secret_edit_project_readonly_member(remove_resource):
"""
Verify read-only user cannot edit secret
"""
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
ns = rbac_get_namespace()
user_token1 = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project, user_token1)
keyvaluepair = {"testall": "valueall"}
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
# As a cluster owner, create a secret
secret = create_secret(keyvaluepair, p_client=cluster_owner_p_client,
ns=ns)
# Readonly member cannot edit secret
value1 = ("valueall")
value2 = ("valueallnew")
updated_dict = {"testall": value1, "testallnew": value2}
remove_resource(secret)
with pytest.raises(ApiError) as e:
readonly_user_client.update(secret,
namespaceId=ns['name'],
data=updated_dict)
assert e.value.error.status == 404
assert e.value.error.code == 'NotFound'
@if_test_rbac
def test_rbac_secret_delete_project_readonly(remove_resource):
"""
Verify read-only user cannot delete secret
"""
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
ns = rbac_get_namespace()
user_token1 = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project, user_token1)
keyvaluepair = {"testall": "valueall"}
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
# As a cluster owner, create a secret
secret = create_secret(keyvaluepair, p_client=cluster_owner_p_client,
ns=ns)
remove_resource(secret)
# Assert read-only user cannot delete the secret
with pytest.raises(ApiError) as e:
delete_secret(readonly_user_client, secret, ns, keyvaluepair)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_secret_list(remove_resource, role):
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, user_token)
rbac_secret_list(p_client)
@if_test_rbac
def test_rbac_secret_list_cluster_member(remove_resource):
"""
Verify cluster member can list secret in the project he created
"""
keyvaluepair = {"testall": "valueall"}
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = \
create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"),
ns_name=random_test_name("ns-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
rbac_secret_list(p_client)
# Create a project as cluster owner and verify the cluster member cannot
# list secret in this project
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
ownerproject, ns = create_project_and_ns(
cluster_owner_token,
namespace["cluster"],
random_test_name("rbac-cluster-owner"))
cluster_owner_client = get_project_client_for_token(ownerproject,
cluster_owner_token)
cluster_member_client = get_project_client_for_token(ownerproject,
user_token)
ownersecret = create_secret(keyvaluepair, singlenamespace=False,
p_client=cluster_owner_client)
secretdict = cluster_member_client.list_secret(name=ownersecret.name)
secretdata = secretdict.get('data')
assert len(secretdata) == 0
cluster_owner_client.delete(ownersecret)
remove_resource(project)
remove_resource(ownerproject)
@if_test_rbac
def test_rbac_secret_list_project_readonly():
"""
Verify read-only user cannot list secret
"""
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
readonly_user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project,
readonly_user_token)
keyvaluepair = {"testall": "valueall"}
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
# As a cluster owner, create a secret
secret = create_secret(keyvaluepair, p_client=cluster_owner_p_client)
# Verify Read-Only user cannot list the secret
secretdict = readonly_user_client.list_secret(name=secret.name)
secretdata = secretdict.get('data')
assert len(secretdata) == 0
cluster_owner_p_client.delete(secret)
def rbac_secret_create(p_client, ns):
"""
Verify creating secret is functional.
The p_client passed as the parameter would be as per the role assigned
"""
keyvaluepair = {"testall": "valueall"}
secret = create_secret(keyvaluepair, singlenamespace=False,
p_client=p_client)
# Create workloads with secret in existing namespace
create_and_validate_workload_with_secret_as_volume(p_client, secret,
ns, keyvaluepair)
def rbac_secret_edit(p_client, ns, project=None):
"""
Verify creating, editing secret is functional.
The p_client passed as the parameter would be as per the role assigned
"""
value = base64.b64encode(b"valueall")
keyvaluepair = {"testall": value.decode('utf-8')}
cluster = namespace["cluster"]
c_client = namespace["c_client"]
# Create a namespace
secret = create_secret(keyvaluepair, singlenamespace=False,
p_client=p_client)
# Value is base64 encoded
value1 = base64.b64encode(b"valueall")
value2 = base64.b64encode(b"valueallnew")
updated_dict = {"testall": value1.decode(
'utf-8'), "testallnew": value2.decode('utf-8')}
updated_secret = p_client.update(secret, namespaceId='NULL',
data=updated_dict)
assert updated_secret['baseType'] == "secret"
updatedsecretdata = updated_secret['data']
print("UPDATED SECRET DATA")
print(updatedsecretdata)
assert updatedsecretdata.data_dict() == updated_dict
# Create workloads using updated secret in the existing namespace
create_and_validate_workload_with_secret_as_volume(p_client, secret,
ns,
updatedsecretdata)
create_and_validate_workload_with_secret_as_env_variable(
p_client, secret, ns, updatedsecretdata)
# Create a new namespace and workloads in the new namespace using secret
new_ns1 = create_ns(c_client, cluster, project)
create_and_validate_workload_with_secret_as_volume(p_client, secret,
new_ns1,
updatedsecretdata)
create_and_validate_workload_with_secret_as_env_variable(
p_client, secret, new_ns1, updatedsecretdata)
def rbac_secret_delete(p_client, ns):
"""
Verify creating, deleting secret is functional.
The p_client passed as the parameter would be as per the role assigned
"""
keyvaluepair = {"testall": "valueall"}
secret = create_secret(keyvaluepair, singlenamespace=False,
p_client=p_client)
# Verify deletion of secret
delete_secret(p_client, secret, ns, keyvaluepair)
def rbac_secret_list(p_client):
'''
Create a secret and list the secret
'''
keyvaluepair = {"testall": "valueall"}
secret = create_secret(keyvaluepair, singlenamespace=False,
p_client=p_client)
secretname = secret.name
secretdict = p_client.list_secret(name=secretname)
secretlist = secretdict.get('data')
testsecret = secretlist[0]
testsecret_data = testsecret['data']
assert len(secretlist) == 1
assert testsecret.type == "secret"
assert testsecret.name == secretname
assert testsecret_data.data_dict() == keyvaluepair
p_client.delete(testsecret)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testsecret")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
def validate_workload_with_secret(p_client, workload,
type, ns_name, keyvaluepair,
workloadwithsecretasVolume=False,
workloadwithsecretasenvvar=False,
podcount=1):
validate_workload(p_client, workload, type, ns_name, pod_count=podcount)
pod_list = p_client.list_pod(workloadId=workload.id).data
mountpath = "/test"
for i in range(0, len(keyvaluepair)):
key = list(keyvaluepair.keys())[i]
if workloadwithsecretasVolume:
key_file_in_pod = mountpath + "/" + key
command = "cat " + key_file_in_pod + ''
if is_windows():
command = 'powershell -NoLogo -NonInteractive -Command "& {{ cat {0} }}"'.format(key_file_in_pod)
result = kubectl_pod_exec(pod_list[0], command)
assert result.rstrip() == base64.b64decode(list(keyvaluepair.values())[i])
elif workloadwithsecretasenvvar:
command = 'env'
if is_windows():
command = 'powershell -NoLogo -NonInteractive -Command \'& {{ (Get-Item -Path Env:).Name | ' \
'% { "$_=$((Get-Item -Path Env:\\$_).Value)" }}\''
result = kubectl_pod_exec(pod_list[0], command)
if base64.b64decode(list(keyvaluepair.values())[i]) in result:
assert True
def delete_secret(client, secret, ns, keyvaluepair):
key = list(keyvaluepair.keys())[0]
secretname = secret.name
print("Delete Secret")
client.delete(secret)
# Sleep to allow for the secret to be deleted
time.sleep(5)
timeout = 30
print("Secret list after deleting secret")
secretdict = client.list_secret(name=secretname)
print(secretdict)
print(secretdict.get('data'))
start = time.time()
if len(secretdict.get('data')) > 0:
testdata = secretdict.get('data')
print("TESTDATA")
print(testdata[0]['data'])
while key in testdata[0]['data']:
if time.time() - start > timeout:
raise AssertionError("Timed out waiting for deletion")
time.sleep(.5)
secretdict = client.list_secret(name=secretname)
testdata = secretdict.get('data')
assert True
if len(secretdict.get('data')) == 0:
assert True
# Verify secret is deleted by "kubectl get secret" command
command = " get secret " + secret['name'] + " --namespace=" + ns.name
print("Command to obtain the secret")
print(command)
result = execute_kubectl_cmd(command, json_out=False, stderr=True)
print(result)
print("Verify that the secret does not exist "
"and the error code returned is non zero ")
if result != 0:
assert True
def create_and_validate_workload_with_secret_as_volume(p_client, secret, ns,
keyvaluepair,
name=None):
if name is None:
name = random_test_name("test")
# Create Workload with secret as volume
mountpath = "/test"
volumeMounts = [{"readOnly": False, "type": "volumeMount",
"mountPath": mountpath, "name": "vol1"}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts}]
secretName = secret['name']
volumes = [{"type": "volume", "name": "vol1",
"secret": {"type": "secretVolumeSource", "defaultMode": 256,
"secretName": secretName,
"optional": False, "items": "NULL"}}]
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id, volumes=volumes)
validate_workload_with_secret(p_client, workload, "deployment",
ns.name, keyvaluepair,
workloadwithsecretasVolume=True)
def create_and_validate_workload_with_secret_as_env_variable(p_client, secret,
ns, keyvaluepair,
name=None):
if name is None:
name = random_test_name("test")
# Create Workload with secret as env variable
secretName = secret['name']
environmentdata = [{
"source": "secret",
"sourceKey": None,
"sourceName": secretName
}]
con = [{"name": "test",
"image": TEST_IMAGE,
"environmentFrom": environmentdata}]
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload_with_secret(p_client, workload, "deployment",
ns.name, keyvaluepair,
workloadwithsecretasenvvar=True)
def create_secret(keyvaluepair, singlenamespace=False,
p_client=None, ns=None, name=None):
if p_client is None:
p_client = namespace["p_client"]
if name is None:
name = random_test_name("default")
if ns is None:
ns = namespace["ns"]
if not singlenamespace:
secret = p_client.create_secret(name=name, data=keyvaluepair)
assert secret['baseType'] == "secret"
else:
secret = p_client.create_namespaced_secret(name=name,
namespaceId=ns['name'],
data=keyvaluepair)
assert secret['baseType'] == "namespacedSecret"
print(secret)
secretdata = secret['data']
print("SECRET DATA")
print(secretdata)
assert secretdata.data_dict() == keyvaluepair
return secret
|
|
"""
Author: Jay Engineer
Home Page: https://github.com/jay754/Facebook-Graph-Info
Script: fbwrapper.py
Python/PHP Facebook-Graph API
PHP/Python Wrapper for Facebook-open-graph (no OAuth)
license: The BSD 3-Clause License
"""
import urllib2
import urllib
import simplejson as json
import json
import requests
import sys
import os
#sys.setdefaultencoding("utf-8")
class DownloadError(Exception): pass #download error
class fbwrapper:
def __init__(self, url, token = None):
self.url = url
self.token = token
def __getURL(self):
"""returns Url"""
return self.url
def __getToken(self):
"""returns token"""
return self.token
def _HTTPStatus(self, url):
"""get the http status code of a site"""
r = requests.get(url)
http_status = r.status_code
return http_status
def fbInfo(self, username):
"""Gets the basic info for the person
Returns a Dictionary of basic info of username"""
url = self.url
results = urllib2.urlopen(url+username)
json_decoded = json.load(results)
return json_decoded
def fbID(self, username):
"""Gets the fb id of the person"""
results = self.fbInfo(username)
Id = str(results["id"])
return Id
def getName(self, username):
"""Gets the First and Last Name of the person
Returns a Tuple of lastname first and than firstname"""
results = self.fbInfo(username)
Data = (str(results["last_name"]), str(results["first_name"]))
return Data
def getUsername(self, username):
"""Gets the fb username of the person"""
results = self.fbInfo(username)
Data = str(results["username"])
return Data
def getLink(self, username):
"""Gets the fb link of the person"""
results = self.fbInfo(username)
Data = str(results["link"])
return Data
def getGender(self, username):
"""Gets the fb gender of the person"""
results = self.fbInfo(username)
Data = str(results["gender"])
return Data
def getPic(self, username):
"""Gets the picture of the person and saved on your computer
-Saves the your facebook picture in the current directory and saved as jpg
-Just put the username of the person's picture you want
-Does not tell you if there's a duplicate saved"""
url = self.url
#results = urllib2.urlopen(url+username+"/picture").read()
cwd = os.getcwd()
try:
urllib.urlretrieve(url+username+"/picture", username+".jpg")
return "success, Your file is saved at " + cwd
except:
raise DownloadError
def getPageInfo(self, id):
"""gets the basic info of a facebook page
Returns a dictionary of info"""
url = self.url
results = urllib2.urlopen(url+id)
json_decoded = json.load(results)
data = dict()
data = {"Name" : str(json_decoded["name"]),
"Id" : str(json_decoded["id"]),
"Likes" : str(json_decoded["likes"]),
"Website" : str(json_decoded["website"]),
"People Talking about" : str(json_decoded["talking_about_count"]),
"About" : str(json_decoded["about"])}
return data
def appInfo(self, id):
"""gets the basic info of a facebook app"""
url = self.url
results = urllib2.urlopen(url+id)
json_decoded = json.load(results)
data = dict()
data = {"Name" : str(json_decoded["name"]),
"weekly active users" : str(json_decoded["weekly_active_users"]),
"monthly active users" : str(json_decoded["monthly_active_users"]),
"daily active users rank" : str(json_decoded["daily_active_users_rank"])}
return data
def getLikes(self, username):
"""gets like of the original person"""
url = self.url
token = self.token
http_status = self._HTTPStatus(url+username+"/likes?access_token="+token)
if http_status == 200:
results = urllib2.urlopen(url+username+"/likes?access_token="+token)
json_decoded = json.load(results)
data = json.dumps([i for i in json_decoded["data"]])
categories = json.dumps([i["category"] for i in json_decoded["data"]])
names = json.dumps([i["name"] for i in json_decoded["data"]])
ids = json.dumps([i["id"] for i in json_decoded["data"]])
data = {"Ids" : ids,
"categories" : categories,
"Names" : names}
return data
else:
return "bad request"
def getFriends(self, username):
"""gets the friends of the original person"""
url = self.url
token = self.token
http_status = self._HTTPStatus(url+username+"/friends?access_token="+token)
if http_status == 200:
results = urllib2.urlopen(url+username+"/friends?access_token="+token).read()
json_decoded = json.loads(results)
names = json.dumps([i["name"] for i in json_decoded["data"]])
ids = json.dumps([i["id"] for i in json_decoded["data"]])
data = {"Ids" : ids,
"Names" : names}
return data
else:
return "bad request"
def getGroups(self, username):
"""gets the groups that the person is in"""
url = self.url
token = self.token
http_status = self._HTTPStatus(url+username+"/groups?access_token="+token)
if http_status == 200:
results = urllib2.urlopen(url+username+"/groups?access_token="+token).read()
json_decoded = json.loads(results)
ids = json.dumps([i["id"] for i in json_decoded["data"]])
names = json.dumps([i["name"] for i in json_decoded["data"]])
data = {"Ids" : ids,
"Names" : names }
return data
else:
return "bad request"
def getMusic(self, username):
"""gets the music that the person listens to"""
url = self.url
token = self.token
http_status = self._HTTPStatus(url+username+"/music?access_token="+token)
if http_status == 200:
results = urllib2.urlopen(url+username+"/music?access_token="+token).read()
json_decoded = json.loads(results)
ids = json.dumps([i["id"] for i in json_decoded["data"]])
categories = json.dumps([i["category"] for i in json_decoded["data"]])
names = ids = json.dumps([i["name"] for i in json_decoded["data"]])
data = {"Ids" : ids,
"Names" : names,
"category" : categories}
return data
else:
return "bad request"
fbObject = fbwrapper("https://graph.facebook.com/",Token)
print fbObject.getMusic("jay.enginer")
|
|
""" Module "Request Manager" : Module for interacting via Habitica API
All fetching, scoring, editing, deleting activities are defined here.
"""
# Standard Library Imports
import requests
import time
import imp
import importlib
import datetime
# Custom Module Imports
import config as C
from screen import Screen
import global_objects as G
import helper as H
import menu as M
import task as T
import debug as DEBUG
import user as U
#Set up logging
import logging
logger = logging.getLogger(__name__)
logger.debug("Debug logging started for %s..." % __name__)
# URL Definitions
API_URL = "https://habitica.com:443/api/v3"
#Request Methods
request_methods = dict()
request_methods['get'] = requests.get
request_methods['put'] = requests.put
request_methods['post'] = requests.post
request_methods['delete'] = requests.delete
class RequestManager(object):
""" The main class for sending/receiving data to the habitica server """
def __init__(self):
self.headers = {'x-api-key': C.getConfig("key"), 'x-api-user': C.getConfig("uuid")}
self.ClearQueues()
# General Wrapper to fetch JSON data from server
def APIV3_call(self,path,params={},failure='hard',method='get',obj=None):
if method not in request_methods:
raise ValueError("Unknown Method type ",method)
url = API_URL+"/"+path
if(method == 'get'):
url+="?"
for param, value in params.iteritems():
url+=param + "=" + value
logger.warn("Calling V3 API: %s" % url)
resp = request_methods[method](url, headers=self.headers,json=obj)
# Need some error handling here
if resp.status_code == 200:
logger.debug("HTTP Response: 200 Okay!")
rval = resp.json()['data']
elif resp.status_code == 201:
logger.debug("HTTP Response: 201 Object Created")
rval = resp.json()['data']
else:
if(failure=='hard'):
raise ValueError("HTTP Response not recognized: %d" % resp.status_code)
else:
logger.warn("HTTP Response not recognized: %d" % resp.status_code)
rval = -1
return rval
################################
## V3 API Calls #
################################
#Fetches the 'content', which is basically all the strings and values that are constant in the game
# https://habitica.com/apidoc/#api-Content-ContentGet
def FetchGameContent(self):
return self.APIV3_call("content")
#Fetches the User Object from the API
# https://habitica.com/apidoc/#api-User-UserGet
def FetchUserData(self):
return self.APIV3_call("user")
#Fetches User Tasks from the API.
# https://habitica.com/apidoc/#api-Task-GetUserTasks
# task_type can be "habits", "dailys", "todos", "rewards", "completedTodos"
def FetchUserTasks(self,task_type=None):
tasks = None
if(task_type is None):
tasks = self.APIV3_call("tasks/user")
else:
if(task_type not in ["habits", "dailys", "todos", "rewards", "completedTodos"]):
raise ValueError("Unknown task type %s" % task_type)
tasks = self.APIV3_call("tasks/user",{'type':task_type})
return tasks
# Score a task up/down
# https://habitica.com/apidoc/#api-Task-ScoreTask
def ScoreTask(self,task_id,direction):
if(direction not in ['up','down']):
raise ValueError("Unknown task direction %s" % direction)
return self.APIV3_call("tasks/"+task_id+"/score/"+direction,method='post')
# Add a new task
# https://habitica.com/apidoc/#api-Task-CreateUserTasks
def CreateTask(self, task_obj):
return self.APIV3_call("tasks/user",method='post',obj=task_obj)
# Delete a task
# https://habitica.com/apidoc/#api-Task-DeleteTask
def DeleteTask(self, task_id):
return self.APIV3_call("tasks/"+task_id,method='delete')
# Update a task
# https://habitica.com/apidoc/#api-Task-UpdateTask
def UpdateTask(self, task_id, task_obj):
return self.APIV3_call("tasks/"+task_id,method='put',obj=task_obj)
#Fetches the User Object from the API
# https://habitica.com/apidoc/#api-Group-GetGroup
def FetchParty(self):
return self.APIV3_call("groups/party")
################################
## Deprecated Functions #
################################
# These are functions that don't really belong in the
# request manager (they're interface/model based, not
# request based
def CreateTask_orig(self,title,task_type):
task = {}
task['text'] = title.decode("utf-8")
task['type'] = task_type
task['priority'] = 1
if task_type == 'todo' or task_type == 'daily':
task['checklist'] = []
if task_type == "daily":
task['everyX'] = 1
task['frequency'] = 'weekly'
task['repeat'] = {'m': True, 't': True, 'w': True, 'th': True, 'f': True, 's': True, 'su': True}
if task_type == "habit":
task['up'] = True
task['down'] = True
DEBUG.Display("Creating Task...");
ret_task = self.CreateTask(task)
DEBUG.Display(" ")
logger.debug(ret_task)
if task_type == "habit":
item = T.Habit(ret_task)
menu_item = M.MenuItem(item, "habit", item.text)
G.HabitMenu.Insert(menu_item)
elif task_type == "daily":
item = T.Daily(ret_task)
menu_item = M.MenuItem(item, "daily", item.text)
G.DailyMenu.Insert(menu_item)
elif task_type == "todo":
item = T.TODO(ret_task)
menu_item = M.MenuItem(item, "todo", item.text)
G.TODOMenu.Insert(menu_item)
# Flush Queues (this doesn't belong as part of the reuqest manager!)
def ClearQueues(self):
self.MarkUpQueue = []
self.MarkDownQueue = []
self.MarkQueue = []
self.DeleteQueue = []
self.EditQueue = []
#Fetches basic user data for the interface
def FetchData(self):
G.LastUpdate = datetime.datetime.now()
#Get the user data from the API
DEBUG.Display("Connecting...")
user_json = self.FetchUserData()
task_json = self.FetchUserTasks()
DEBUG.Display("Connected")
time.sleep(1)
DEBUG.Display(" ")
# Initialize User Stats
G.user = U.User( user_json )
# These will contain the menu items passed to create the Habit, Daily
# and Todo menus
habit_items = []
dailies_items = []
todos_items = []
logger.debug("Found %d tasks" % len(task_json))
for i in task_json:
logger.debug("Processing a TODO: %s" % i['text'].encode("utf-8").strip())
if( i['type'] == "habit" ):
item = T.Habit(i)
habit_items += [M.MenuItem(item, "habit", item.text)]
elif( i['type'] == "daily" ):
item = T.Daily(i)
dailies_items += [M.MenuItem(item, "daily", item.text)]
elif( i['type'] == "todo" ):
if i['completed']:
continue
item = T.TODO(i)
todos_items += [M.MenuItem(item, "todo", item.text)]
elif( i['type'] == "reward" ):
logger.warn("Custom Rewards aren't implemented yet, but the user has one: %s" % i['text'])
else:
logger.debug("Weird task %s with type: %s" %(i['text'].encode("utf-8"), i['type'].encode("utf-8")))
raise ValueError("Unknown task type %s" % i['type'].encode("utf-8"))
# Generate the menus for the display
G.HabitMenu = M.Menu(habit_items, "Habits")
G.DailyMenu = M.Menu(dailies_items, "Dailies")
G.TODOMenu = M.Menu(todos_items, "TODOs")
# Write back changes to the server and update the interface
def Flush(self,flush_for_quit=False):
#TODO: most of this should not happen in the request manager
import content as CT
DEBUG.Display("Please Wait...")
Drops = list()
# Difference obtained in user stats due to these operations
origDict = {'hp': G.user.hp, 'gp': G.user.gp, 'mp': G.user.mp,
'exp': G.user.exp, 'lvl': G.user.lvl}
diffDict = origDict.copy()
#
#
#
# Habits marked as +
for i in self.MarkUpQueue:
logger.debug("Marking '%s' up" % str(i.taskname))
json = self.ScoreTask(i.task.taskID,'up')
for i in diffDict:
diffDict[i] = json[i]
# Check for drops
tmpdrp = CT.CheckDrops( json['_tmp'] )
if( tmpdrp is not None):
Drops.append(tmpdrp)
#
#
#
# Habits marked as -
for i in self.MarkDownQueue:
logger.debug("Marking '%s' down" % str(i))
json = self.ScoreTask(i.task.taskID,'down')
for i in diffDict:
diffDict[i] = json[i]
#
#
#
# Dailies and TODOS marked as completed
for i in self.MarkQueue:
direction = None
if i.task.task_type != "daily" or (not i.task.completed):
direction = "up"
else:
direction = "down"
if (direction is None):
continue
json = self.ScoreTask(i.task.taskID,direction)
if i.task.task_type == "todo":
G.TODOMenu.Remove(i.task.taskID)
elif i.task.task_type == "daily":
i.task.completed ^= True
for i in diffDict:
diffDict[i] = json[i]
# Check for drops
tmpdrp = CT.CheckDrops( json['_tmp'] )
if( tmpdrp is not None):
Drops.append(tmpdrp)
#
#
#
for i in self.DeleteQueue:
self.DeleteTask(i.task.taskID)
if i.task.task_type == "habit":
G.HabitMenu.Remove(i.task.taskID)
elif i.task.task_type == "daily":
G.DailyMenu.Remove(i.task.taskID)
elif i.task.task_type == "todo":
G.TODOMenu.Remove(i.task.taskID)
#
#
#
for i in self.EditQueue:
self.UpdateTask(i.task.taskID, i.task.data)
if(flush_for_quit):
return
#
#
# Update the Interface
G.screen.Erase()
G.user.PrintDiff(diffDict)
G.intf.Init()
G.user.PrintUserStats()
#
#
#
# Display Drop Messages
if Drops:
G.screen.SaveInRegister(1)
drop_items = []
for i in Drops:
DEBUG.Display("Processing Drop %s..." % i);
drop_items += [M.SimpleTextItem(i)]
dropMenu = M.SimpleTextMenu(drop_items, C.SCR_TEXT_AREA_LENGTH)
dropMenu.SetXY(C.SCR_FIRST_HALF_LENGTH, 5)
dropMenu.Display()
dropMenu.Input()
G.screen.RestoreRegister(1)
self.ClearQueues()
|
|
# Copyright 2017 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64, boto3, os, urllib2, hashlib
from tasks import task, TaskError, get, sh, Secret
class DockerImageBuilderError(TaskError):
report_traceback = False
pass
class DockerImageBuilder(object):
DOCKER = 'docker'
IMAGEBUILDER = 'imagebuilder'
@classmethod
def get_cmd_from_name(self, str):
if str == self.DOCKER:
def docker_build(directory, dockerfile, img, buildargs):
return ["docker", "build", directory, "-f", dockerfile, "-t", img] + buildargs
return docker_build
elif str == self.IMAGEBUILDER:
def imagebuilder_build(directory, dockerfile, img, buildargs):
return ["imagebuilder", "-f", dockerfile, "-t", img] + buildargs + [directory]
return imagebuilder_build
raise DockerImageBuilderError("No image builder named %s exists. Available builders are: %s" % (str, ", ".join([self.DOCKER, self.IMAGEBUILDER])))
def image(registry, namespace, name, version):
parts = (registry, namespace, "%s:%s" % (name, version))
return "/".join(p for p in parts if p)
class DockerBase(object):
def __init__(self):
self.image_cache = {}
self.logged_in = False
def _login(self):
if not self.logged_in:
self._do_login()
self.logged_in = True
@task()
def local_exists(self, name, version):
return bool(sh("docker", "images", "-q", self.image(name, version)).output)
@task()
def exists(self, name, version):
return self.remote_exists(name, version) or self.local_exists(name, version)
@task()
def needs_push(self, name, version):
return self.local_exists(name, version) and not self.remote_exists(name, version)
@task()
def pull(self, image):
self._login()
sh("docker", "pull", image)
@task()
def tag(self, source, name, version):
img = self.image(name, version)
sh("docker", "tag", source, img)
def _create_repo(self, name):
pass
@task()
def push(self, name, version):
self._login()
self._create_repo(name)
img = self.image(name, version)
self.image_cache.pop(img, None)
sh("docker", "push", img)
return img
@task()
def build(self, directory, dockerfile, name, version, args, builder=None):
args = args or {}
builder = builder or DockerImageBuilder.DOCKER
buildargs = []
for k, v in args.items():
buildargs.append("--build-arg")
buildargs.append("%s=%s" % (k, v))
img = self.image(name, version)
cmd = DockerImageBuilder.get_cmd_from_name(builder)
sh(*cmd(directory, dockerfile, img, buildargs))
return img
def get_changes(self, dockerfile):
entrypoint = None
cmd = None
with open(dockerfile) as f:
for line in f:
parts = line.split()
if parts and parts[0].lower() == "cmd":
cmd = line
elif parts and parts[0].lower() == "entrypoint":
entrypoint = line
return (entrypoint or 'ENTRYPOINT []', cmd or 'CMD []')
def builder_hash(self, dockerfile, args):
result = hashlib.sha1()
with open(dockerfile) as fd:
result.update(fd.read())
result.update("--")
for a in sorted(args.keys()):
result.update(a)
result.update("--")
result.update(args[a])
result.update("--")
return result.hexdigest()
def builder_prefix(self, name):
return "forge_%s" % name
def find_builders(self, name):
builder_prefix = self.builder_prefix(name)
containers = sh("docker", "ps", "-qaf", "name=%s" % builder_prefix, "--format", "{{.ID}} {{.Names}}")
for line in containers.output.splitlines():
id, builder_name = line.split()
yield id, builder_name
@task()
def builder(self, directory, dockerfile, name, version, args, builder=None):
# We hash the buildargs and Dockerfile so that we reconstruct
# the builder container if anything changes. This might want
# to be extended to cover other files the Dockerfile
# references somehow at some point. (Maybe we could use the
# spec stuff we use in .forgeignore?)
builder_name = "%s_%s" % (self.builder_prefix(name), self.builder_hash(dockerfile, args))
cid = None
for id, bname in self.find_builders(name):
if bname == builder_name:
cid = id
else:
Builder(self, id).kill()
if not cid:
image = self.build(directory, dockerfile, name, version, args, builder=None)
cid = sh("docker", "run", "--rm", "--name", builder_name, "-dit", "--entrypoint", "/bin/sh",
image).output.strip()
return Builder(self, cid, self.get_changes(dockerfile))
@task()
def clean(self, name):
for id, bname in self.find_builders(name):
Builder(self, id).kill()
@task()
def validate(self, name="forge_test"):
test_image = os.environ.get("FORGE_SETUP_IMAGE", "registry.hub.docker.com/datawire/forge-setup-test:1")
self.pull(test_image)
version = "dummy"
self.tag(test_image, name, version)
self.push(name, version)
assert self.remote_exists(name, version)
@task()
def run(self, name, version, cmd, *args):
return sh("docker", "run", "--rm", "-it", "--entrypoint", cmd, self.image(name, version), *args)
class Builder(object):
def __init__(self, docker, cid, changes=()):
self.docker = docker
self.cid = cid
self.changes = changes
def run(self, *args):
# XXX: for some reason when we put a -t here it messes up the
# terminal output
return sh("docker", "exec", "-i", self.cid, *args)
def cp(self, source, target):
return sh("docker", "cp", source, "{0}:{1}".format(self.cid, target))
def commit(self, name, version):
args = []
for change in self.changes:
args.append("-c")
args.append(change)
args.extend((self.cid, self.docker.image(name, version)))
return sh("docker", "commit", *args)
def kill(self):
sh("docker", "kill", self.cid, expected=(0, 1))
import json, base64
class Docker(DockerBase):
def __init__(self, registry, namespace, user, password, verify=True):
DockerBase.__init__(self)
self.registry = registry
self.namespace = namespace
self.user = user
self.password = password
self.verify = verify
self._run_login = bool(self.user)
if not self.user:
docker_config = os.path.join(os.environ.get("HOME"), ".docker/config.json")
if os.path.exists(docker_config):
with open(docker_config) as fd:
cfg = json.load(fd)
auths = cfg.get("auths", {})
auth = auths.get(self.registry, {}).get("auth")
if auth:
self.user, self.password = base64.decodestring(auth).split(":")
if not self._run_login and not self.user:
raise TaskError("unable to locate docker credentials, please run `docker login %s`" % self.registry)
@task()
def image(self, name, version):
return image(self.registry, self.namespace, name, version)
def _do_login(self):
if self._run_login:
sh("docker", "login", "-u", self.user, "-p", Secret(self.password), self.registry)
@task()
def registry_get(self, api):
url = "https://%s/v2/%s" % (self.registry, api)
response = get(url, auth=(self.user, self.password),
headers={"Accept": 'application/vnd.docker.distribution.manifest.v2+json'},
verify=self.verify)
if response.status_code == 401:
challenge = response.headers['Www-Authenticate']
if challenge.startswith("Bearer "):
challenge = challenge[7:]
opts = urllib2.parse_keqv_list(urllib2.parse_http_list(challenge))
authresp = get("{realm}?service={service}&scope={scope}".format(**opts), auth=(self.user, self.password),
verify=self.verify)
if authresp.ok:
token = authresp.json()['token']
response = get(url, headers={'Authorization': 'Bearer %s' % token},
verify=self.verify)
else:
raise TaskError("problem authenticating with docker registry: [%s] %s" % (authresp.status_code,
authresp.content))
return response
@task()
def repo_get(self, name, api):
return self.registry_get("%s/%s/%s" % (self.namespace, name, api))
@task()
def remote_exists(self, name, version):
self._login()
img = self.image(name, version)
if img in self.image_cache:
return self.image_cache[img]
response = self.repo_get(name, "manifests/%s" % version)
result = response.json()
# v1 and v2 manifest schemas look a bit different
if 'fsLayers' in result or 'layers' in result:
self.image_cache[img] = True
return True
elif 'errors' in result and result['errors']:
if result['errors'][0]['code'] in ('MANIFEST_UNKNOWN', 'NAME_UNKNOWN'):
self.image_cache[img] = False
return False
raise TaskError(response.content)
class GCRDocker(Docker):
def __init__(self, url, project, key):
Docker.__init__(self, url, project, "_json_key" if key else "_token", key)
def _do_login(self):
if self.user == "_token":
self.password = sh("gcloud", "auth", "print-access-token",
output_transform = lambda x: "<OUTPUT_ELIDED>").output.strip()
Docker._do_login(self)
def _get_account():
sts = boto3.client('sts')
return sts.get_caller_identity()["Account"]
def _get_region():
return boto3.Session().region_name
class ECRDocker(DockerBase):
def __init__(self, account=None, region=None, aws_access_key_id=None, aws_secret_access_key=None):
DockerBase.__init__(self)
self.account = account or _get_account()
self.region = region or _get_region()
kwargs = {}
if aws_access_key_id: kwargs['aws_access_key_id'] = aws_access_key_id
if aws_secret_access_key: kwargs['aws_secret_access_key'] = aws_secret_access_key
self.ecr = boto3.client('ecr', self.region, **kwargs)
self.url = "{}.dkr.ecr.{}.amazonaws.com".format(self.account, self.region)
@property
def registry(self):
return self.url
@property
def namespace(self):
return None
def _do_login(self):
response = self.ecr.get_authorization_token(registryIds=[self.account])
data = response['authorizationData'][0]
token = data['authorizationToken']
user, password = base64.decodestring(token).split(":")
proxy = data['proxyEndpoint']
sh("docker", "login", "-u", user, "-p", Secret(password), proxy)
@task()
def image(self, name, version):
return "{}/{}:{}".format(self.url, name, version)
#return image(self.registry, self.namespace, name, version)
def _create_repo(self, name):
try:
self.ecr.create_repository(repositoryName=name)
task.info('repository {} created'.format(name))
except self.ecr.exceptions.RepositoryAlreadyExistsException, e:
task.info('repository {} already exists'.format(name))
@task()
def remote_exists(self, name, version):
try:
task.info('checking for remote version: %r' % version)
response = self.ecr.describe_images(registryId=self.account,
repositoryName=name,
imageIds=[{'imageTag': version}])
tags = set([t for id in response['imageDetails'] for t in id['imageTags']])
return version in tags
except self.ecr.exceptions.ImageNotFoundException, e:
return False
except self.ecr.exceptions.RepositoryNotFoundException, e:
return False
class LocalDocker(DockerBase):
def image(self, name, version):
return "{}:{}".format(name, version)
def remote_exists(self, name, version):
return False
def needs_push(self, name, version):
return False
|
|
import os
import sys
sys.path.append(os.path.join(
os.path.dirname(os.path.realpath(__file__)), ".."))
from db.db_manager import db_sync_manager
from mapper.utils.filter import PathFinderTNtoSDNFilterUtils as FilterUtils
from mapper.utils.format import PathFinderTNtoSDNFormatUtils as FormatUtils
from mapper.utils.combination import PathFinderTNtoSDNCombinationUtils \
as CombinationUtils
from mapper.utils.org import PathFinderTNtoSDNOrganisationUtils as OrgUtils
from pprint import pprint
# import itertools
class PathFinderTNtoSDN(object):
def __init__(self, source_tn, destination_tn, *args, **kwargs):
# CIDs of source and destination TN endpoints
self.src_dom = source_tn
self.dst_dom = destination_tn
# Link type can be "nsi" or "gre". Empty means "all"
self.link_type = kwargs.get("link_type", "")
# Filters to match against required switches
self.src_of_cids = kwargs.get("src_of_switch_cids", [])
self.dst_of_cids = kwargs.get("dst_of_switch_cids", [])
self.of_cids_check_by_auth = kwargs.get(
"of_switch_cids_check_by_auth", False)
# Dummy list to reduce lines of code
self.src_dst_values = ["src", "dst"]
# Nodes and links from database
self.tn_nodes = [x for x in db_sync_manager.get_tn_nodes()]
self.se_links = [x for x in db_sync_manager.get_se_links()]
# Mapping structure to be returned is a list of possible src-dst paths
self.mapping_tn_se_of = []
# Update with parameters passed
self.__dict__.update(kwargs)
def format_verify_tn_interface(self, tn_interface):
# Ensure that the TN interfaces match with their original names
# under resource.tn.node. This is performed to restore the
# component_id values, previously changed
tn_interfaces_cids = self.get_tn_interfaces_cids(clean=False)
return FormatUtils.format_verify_tn_interface(
tn_interfaces_cids, tn_interface)
def get_tn_interfaces_cids(self, clean=False):
# Return a list with the component_id values for the TN interfaces
tn_interfaces = set()
for tn_node in self.tn_nodes:
tn_interfaces.update(
FormatUtils.get_tn_interfaces_cid_from_node(tn_node, clean))
return tn_interfaces
def get_se_interfaces_cids(self, clean=False):
# Return a list with the component_id values for the SE interfaces
se_interfaces = set()
for se_link in self.se_links:
se_interfaces.add(
FilterUtils.get_se_interfaces_cid_from_link(se_link, clean))
return se_interfaces
def find_tn_interfaces_for_domain(self, domain_name):
# Given a domain name (e.g. "kddi", "aist"), find possible TN ifaces
tn_interfaces_cids = self.get_tn_interfaces_cids(clean=True)
domain_names_alt = OrgUtils.get_organisation_mappings(domain_name)
return FilterUtils.find_tn_interfaces_for_domain(
tn_interfaces_cids, domain_names_alt, domain_name)
def filter_tn_interfaces_by_type(self, tn_interfaces_cids, link_type=""):
return FilterUtils.filter_tn_interfaces_by_type(
tn_interfaces_cids, link_type)
def find_se_interfaces_for_tn_interface(self, tn_interface):
return FilterUtils.find_se_interfaces_for_tn_interface(
self.se_links, tn_interface)
def find_se_interfaces_for_domain_names(self, src_domain, dst_domain):
return FilterUtils.find_se_interfaces_for_domain_names(
self.se_links, OrgUtils.organisation_name_mappings,
src_domain, dst_domain)
def find_sdn_interfaces_for_se_interface(self, se_interface,
negative_filter=[],
possitive_filter=[""]):
return FilterUtils.find_sdn_interfaces_for_se_interface(
self.se_links, se_interface, negative_filter, possitive_filter)
def find_se_sdn_links_for_se_node(self, se_node, negative_filter=[],
possitive_filter=[""]):
return FilterUtils.find_se_sdn_links_for_se_node(
self.se_links, se_node, negative_filter, possitive_filter)
def find_path_tn(self):
# Retrieve list of CIDs for TNRM interfaces
tn_interfaces_cids = self.get_tn_interfaces_cids(clean=True)
# Get proper TN interfaces for both SRC and DST TN interfaces
self.mapping_tn_se_of_src_partial = {}
self.mapping_tn_se_of_dst_partial = {}
# Get proper TN interfaces for (SRC, DST) TN interface
for src_dst_value in self.src_dst_values:
# Do a first clean of SRC and DST interface
src_dst_cid = FormatUtils.clean_tn_stp_cid(
getattr(self, "%s_dom" % src_dst_value))
dst_src_tn_interface_found = False
# Playing a bit with the language to be able
# to have all the processing in a single place
for tn_interface_cid in tn_interfaces_cids:
if src_dst_cid in tn_interface_cid \
and src_dst_cid.startswith("urn"):
dst_src_tn_interface_found = True
break
if dst_src_tn_interface_found is True:
setattr(self, "tn_candidates_%s" %
src_dst_value, [src_dst_cid])
else:
# Set is converted to list for easyness
list_interfaces = map(list, self.
find_tn_interfaces_for_domain(
src_dst_cid))[0]
# NOTE: only the first TN interface is retrieved...
# Filter by link type, if requested by user
setattr(self, "tn_candidates_%s" % src_dst_value, list(
self.filter_tn_interfaces_by_type(
list_interfaces, self.link_type)))
# Initialize structure with dictionary and append SRC and DST
# interfaces to the set
setattr(self, "mapping_tn_se_of_%s_partial" %
src_dst_value, {"tn": set()})
for tn_candidate in getattr(self, "tn_candidates_%s" %
src_dst_value):
mapping_partial = getattr(self, "mapping_tn_se_of_%s_partial"
% src_dst_value)
mapping_partial["tn"].add(tn_candidate)
# Place every path into the final structure
# combinations_src_dst_stps = zip(self.\
# mapping_tn_se_of_src_partial["tn"],
# self.mapping_tn_se_of_dst_partial["tn"])
# Find all possible combinations (order-independent)
src_stps = self.mapping_tn_se_of_src_partial["tn"]
dst_stps = self.mapping_tn_se_of_dst_partial["tn"]
combinations_src_dst_stps = CombinationUtils.\
yield_combinations_stp_pairs(src_stps, dst_stps)
# Filter out combinations whose STP have different types (i.e. NSI-GRE)
combinations_src_dst_stps_filter = []
for src_dst_stp in combinations_src_dst_stps:
stp_link_tmp = FilterUtils.ensure_same_type_tn_interfaces(
[src_dst_stp[0], src_dst_stp[1]])
if len(stp_link_tmp) == 2:
combinations_src_dst_stps_filter.append(stp_link_tmp)
combinations_src_dst_stps = combinations_src_dst_stps_filter
for tn_src_dst_pair in combinations_src_dst_stps:
# Tuple: 1st element (src), 2nd element (dst)
self.mapping_tn_se_of.append({
"src": {"tn": tn_src_dst_pair[0]},
"dst": {"tn": tn_src_dst_pair[1]}})
def find_path_se(self):
# Get SE interfaces for both SRC and DST TN interfaces
for path_source in self.mapping_tn_se_of:
for src_dst_value in self.src_dst_values:
# Preparing list of links for SE-SDN
path_source[src_dst_value]["links"] = []
se_candidates = self.\
find_se_interfaces_for_tn_interface(
path_source[src_dst_value]["tn"])
# Fill mapping structure
path_source[src_dst_value]["se"] = ""
if len(se_candidates) > 0:
path_source[src_dst_value]["se"] = se_candidates[0]
# Get SE interfaces without previous TN info
# (case of static links between islands)
# Assumption: name of 2 different islands/domains is provided
if len(self.mapping_tn_se_of) == 0:
partial_mapping = self.\
find_se_interfaces_for_domain_names(self.src_dom, self.dst_dom)
mapping_tn_se_of_path = {}
for src_dst_value in self.src_dst_values:
src_dst_value_struct = {}
for part in partial_mapping:
src_dst_domain = getattr(self, "%s_dom" % src_dst_domain)
index_serm = [src_dst_domain in l and "serm"
in l for l in list(part)].index(True)
index_sdnrm = len(part) - index_serm - 1
src_dst_value_struct = {}
src_dst_value_struct["se"] = part[index_serm]
part_mapping_sdn = part[index_sdnrm] if "ofam" \
in part[index_sdnrm] else None
# Only add proper links structure when both
# endpoints (SDN, SE) are correct
src_dst_value_struct["links"] = []
if part_mapping_sdn is not None:
src_dst_value_struct["links"] = \
[{"se": part[index_serm], "sdn": part_mapping_sdn}]
# Add SE-SE paths for SRC and DST
mapping_tn_se_of_path[src_dst_value] = src_dst_value_struct
# Append to final structure
self.mapping_tn_se_of.append(mapping_tn_se_of_path)
def find_path_sdn(self):
# Get SDN interfaces for (SRC, DST) SE interface
negative_filter = ["tnrm"]
for path_source in self.mapping_tn_se_of:
for src_dst_value in self.src_dst_values:
# Domains connected through VPN may not have SE links (skip)
if "se" not in path_source[src_dst_value]:
return
# possitive_filter_of_switches = [ FormatUtils.\
# remove_port_cid(f) for f in getattr(self, \
# "%s_of_cids" % src_dst_value) ]
se_interface = path_source[src_dst_value]["se"]
# Possible SE-SDN links
sdn_candidates = []
if se_interface is not None and len(se_interface) > 0:
# Search for *every* connection between SE and SDN devices
se_node = FormatUtils.remove_port_cid(se_interface)
sdn_candidates = self.find_se_sdn_links_for_se_node(
se_node, negative_filter)
for se_sdn_link in sdn_candidates:
se_sdn_link = \
FormatUtils.format_verify_se_sdn_links(se_sdn_link)
path_source[src_dst_value]["links"].append(se_sdn_link)
def format_structure(self):
# Restore the full CID of the source and destination TN interfaces
for mapping in self.mapping_tn_se_of:
for src_dst_value in self.src_dst_values:
# Domains connected through static links may not have "tn" data
if "tn" in mapping[src_dst_value]:
mapping[src_dst_value]["tn"] = self.\
format_verify_tn_interface(
mapping[src_dst_value]["tn"])
# Remove paths where either source or destination are invalid
self.mapping_tn_se_of = FilterUtils.\
prune_invalid_paths(self.mapping_tn_se_of)
self.mapping_tn_se_of = FilterUtils.prune_unlinked_dpids(
self.mapping_tn_se_of, self.src_of_cids,
self.dst_of_cids, self.of_cids_check_by_auth)
return self.mapping_tn_se_of
def find_paths(self):
# Find path from given TN to SDN, passing through SE
self.find_path_tn()
self.find_path_se()
self.find_path_sdn()
# Prepare structure (clean up, correct, etc)
self.mapping_tn_se_of = self.format_structure()
return self.mapping_tn_se_of
if __name__ == "__main__":
error_help = "Error using mapper. Usage: %s <src> <dst> [nsi|gre]" \
% (__file__)
# SRC and DST are required
if len(sys.argv) >= 3:
src_name = sys.argv[1]
dst_name = sys.argv[2]
else:
# src_name = "urn:publicid:IDN+fms:aist:tnrm+stp+urn:ogf:" +
# "network:pionier.net.pl:2013:topology:felix-ge-1-0-3"
# dst_name = "urn:publicid:IDN+fms:aist:tnrm+stp+urn:ogf:" +
# "network:jgn-x.jp:2013:topology:bi-felix-kddi-stp1"
src_name = "psnc"
dst_name = "aist"
# sys.exit(error_help)
# Link type is optional
if len(sys.argv) >= 4:
link_type = sys.argv[3]
else:
# link_type = "nsi"
link_type = ""
# src_of_switch_cids = ["i2cat"]
# dst_of_switch_cids = ["aist"]
src_of_switch_cids = [
'urn:publicid:IDN+openflow:ocf:psnc:ofam+datapath' +
'+00:00:08:81:f4:88:f5:b0_13',
'urn:publicid:IDN+openflow:ocf:psnc:' +
'ofam+datapath+00:00:08:81:f4:88:f5:b0_17']
dst_of_switch_cids = [
'urn:publicid:IDN+openflow:ocf:aist:ofam+datapath' +
'00:00:00:00:00:00:00:01_3',
'urn:publicid:IDN+openflow:ocf:aist:ofam+datapath' +
'00:00:00:00:00:00:00:01_5']
# Note: restrictions (src_of_switch_cids, dst_of_switch_cids)
# only to be explicitly passed (otherwise it will probably fail)
optional = {
# "src_of_switch_cids": src_of_switch_cids,
# "dst_of_switch_cids": dst_of_switch_cids,
"of_switch_cids_check_by_auth": True,
"link_type": link_type,
}
path_finder_tn_sdn = PathFinderTNtoSDN(src_name, dst_name, **optional)
pprint(path_finder_tn_sdn.find_paths())
|
|
# Copyright (c) 2015 Scality
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from manila.common import constants as const
from manila import exception
from manila.i18n import _
from manila.i18n import _LE
from manila.i18n import _LI
from manila.i18n import _LW
from manila.share import driver
from manila import utils
LOG = log.getLogger(__name__)
share_opts = [
cfg.StrOpt('nfs_export_ip', help='IP reachable from the tenant networks '
'on which the NFS shares are exposed'),
cfg.StrOpt('nfs_management_host',
help='IP/hostname of the machine hosting the scality-sfused '
'NFS connector (on which scality-manila-utils must be '
'installed)'),
cfg.IntOpt('nfs_management_port',
default=22,
help='Port that sshd is listening on, on the '
'nfs_management_host machine'),
cfg.StrOpt('smb_export_ip', help='IP reachable from the tenant networks '
'on which the SMB shares are exposed'),
cfg.StrOpt('smb_management_host',
help='IP/hostname of the machine hosting the scality-sfused '
'SMB connector (on which scality-manila-utils must be '
'installed)'),
cfg.IntOpt('smb_management_port',
default=22,
help='Port that sshd is listening on, on the '
'smb_management_host machine'),
cfg.StrOpt('smb_export_root', help='Full path on the smb_management_host '
'machine of a SOFS directory where the SMB share directories '
'will be created'),
cfg.StrOpt('management_user',
help='User for management tasks'),
cfg.StrOpt('ssh_key_path',
help='Path to the SSH key of the management user'),
]
CONF = cfg.CONF
CONF.register_opts(share_opts)
class ScalityShareDriver(driver.ShareDriver):
"""Scality Ring driver for Manila.
Supports NFS through the Sfused NFS connector.
Supports SMB through the Sfused SMB connector.
"""
VERSION = '1.0'
def __init__(self, *args, **kwargs):
super(ScalityShareDriver, self).__init__(False, *args, **kwargs)
self.configuration.append_config_values(share_opts)
self._helpers = {}
def do_setup(self, context):
super(ScalityShareDriver, self).do_setup(context)
LOG.debug('Validating Scality Driver configuration')
msg = _("Configuration error: missing value for %(item)s. "
"Check the manila.conf file")
# Check generic configuration
for config_item in ('management_user', 'ssh_key_path'):
# Accept neither None nor ""
if getattr(self.configuration, config_item) in (None, ''):
raise exception.InvalidParameterValue(
err=msg % {'item': config_item})
# Check protocol specific configuration
group_of_config_items = (
('NFS', ('nfs_export_ip', 'nfs_management_host')),
('SMB', ('smb_export_ip', 'smb_management_host',
'smb_export_root'))
)
for protocol, config_keys in group_of_config_items:
config_values = [getattr(self.configuration, k)
for k in config_keys]
if not any(config_values):
# All the values are empty -> we don't want to configure
# this protocol
continue
if not all(config_values):
# Some values are missing
items = ' or '.join(config_keys)
raise exception.InvalidParameterValue(
err=msg % {'item': items})
proto = protocol.lower()
ip = getattr(self.configuration, proto + '_management_host')
port = getattr(self.configuration, proto + '_management_port')
ssh_pool = utils.SSHPool(
ip=ip, port=port, conn_timeout=None,
login=self.configuration.management_user,
privatekey=self.configuration.ssh_key_path,
max_size=1)
export_ip = getattr(self.configuration, proto + '_export_ip')
if protocol == 'NFS':
self._helpers['NFS'] = NFSHelper(ssh_pool, export_ip)
elif protocol == 'SMB':
export_root = self.configuration.smb_export_root
# Manila refers to this protocol as CIFS.
# It's better known as SMB at Scality
self._helpers['CIFS'] = CIFSHelper(ssh_pool, export_ip,
export_root)
else:
err = ("Unknown protocol %(proto)s while initializing "
"%(driver)s") % {"proto": protocol,
"driver": self.__class__.__name__}
raise exception.ManilaException(err)
def _get_helper(self, share):
"""Get the correct helper instance based on the share protocol."""
helper = self._helpers.get(share['share_proto'])
if helper:
return helper
else:
reason = _("Protocol '%s' is wrong, unsupported or "
"disabled") % share['share_proto']
raise exception.InvalidShare(reason=reason)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met.
This is called by `manila.share.manager.ShareManager` right after the
call to `do_setup`
"""
for proto, helper in list(self._helpers.items()):
if not helper.setup_is_valid():
del self._helpers[proto]
if not self._helpers:
msg = _("ScalityShareDriver is not properly initialized")
raise exception.ManilaException(msg)
def allow_access(self, context, share, access, share_server=None):
# NOTE(vponomaryov): use direct verification for case some additional
# level is added.
access_level = access['access_level']
if access_level not in (const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO):
raise exception.InvalidShareAccessLevel(level=access_level)
self._get_helper(share).allow_access(share, access)
def deny_access(self, context, share, access, share_server=None):
self._get_helper(share).deny_access(share, access)
def delete_share(self, context, share, share_server=None):
try:
self._get_helper(share).delete_share(share)
except exception.InvalidShare as exc:
LOG.warning(_LW("Delete share failed with : %s"), exc)
def create_share(self, context, share, share_server=None):
return self._get_helper(share).create_share(share)
def ensure_share(self, context, share, share_server=None):
return self._get_helper(share).ensure_share(share)
def _update_share_stats(self):
backend_name = self.configuration.safe_get(
'share_backend_name') or 'Scality Ring Driver'
# If both protocols are enabled, protocol must be NFS_CIFS not CIFS_NFS
protocol = '_'.join(sorted(self._helpers.keys(), reverse=True))
if not protocol:
msg = _("ScalityShareDriver is not properly initialized")
raise exception.ManilaException(msg)
stats = {
'share_backend_name': backend_name,
'vendor_name': 'Scality',
'storage_protocol': protocol,
'driver_version': self.VERSION,
}
super(ScalityShareDriver, self)._update_share_stats(stats)
class NASHelperBase(object):
"""Base class for protocol specific share management tasks."""
# Cli exit codes
EXPORT_NOT_FOUND = 10
ACCESS_EXISTS = 11
ACCESS_NOT_FOUND = 12
HAS_GRANTS = 13
EXPORT_EXISTS = 14
def __init__(self, ssh_pool, export_ip):
self.ssh_pool = ssh_pool
self.export_ip = export_ip
self.optional_args = ""
def _management_call(self, command):
"""Send a command over ssh to the ring management host.
:param command: command to execute
:param command: string
:returns: tuple of (stdout, stderr) with command output
"""
cmd = 'sudo scality-manila-utils %s %s %s' % (
self.PROTOCOL.lower(), self.optional_args, command)
LOG.debug("Management execute: %s", cmd)
with self.ssh_pool.item() as connection:
result = processutils.ssh_execute(connection, cmd)
return result
@staticmethod
def _enforce_ip_acl(access):
"""Check that the access is IP based."""
if access['access_type'] != 'ip':
reason = 'Only IP access type allowed'
raise exception.InvalidShareAccess(reason)
def setup_is_valid(self):
"""Check that the management host is up and ready."""
LOG.info(_LI('Checking management server prerequisites'))
try:
self._management_call('check')
except processutils.ProcessExecutionError as e:
err = _LE("Requirements are not met on the management server."
"Check the manila.conf file and the "
"configuration of the management server. Protocol "
"%(proto)s is now disabled.")
LOG.error(err, {'proto': self.PROTOCOL})
LOG.error(e)
return False
LOG.info(_LI("Scality driver is properly configured for protocol "
"%(proto)s"), {'proto': self.PROTOCOL})
return True
def _get_allow_access_cmd(self, share, access):
raise NotImplementedError()
def allow_access(self, share, access):
self._enforce_ip_acl(access)
command = self._get_allow_access_cmd(share, access)
try:
self._management_call(command)
except processutils.ProcessExecutionError as e:
if e.exit_code == self.ACCESS_EXISTS:
raise exception.ShareAccessExists(
access_type=access['access_type'],
access=access['access_to']
)
elif e.exit_code == self.EXPORT_NOT_FOUND:
msg = _("'%(name)s' (%(id)s) not found") % {
'name': share['name'], 'id': share['id']}
raise exception.InvalidShare(reason=msg)
else:
raise
def deny_access(self, share, access):
self._enforce_ip_acl(access)
command = 'revoke %s %s' % (share['id'], access['access_to'])
try:
self._management_call(command)
except processutils.ProcessExecutionError as e:
if e.exit_code == self.ACCESS_NOT_FOUND:
# Access rule can be in error state so not properly set
# in the backend. So don't raise.
msg = _LI("Fail to revoke access of %(access_to)s on share"
"%(name)s (%(id)s): the grant didn't exist in the "
"backend") % {'id': share['id'],
'access_to': access['access_to'],
'name': share['name']}
LOG.info(msg)
elif e.exit_code == self.EXPORT_NOT_FOUND:
msg = _("'%(name)s' (%(id)s) not found") % {
'name': share['name'], 'id': share['id']}
raise exception.InvalidShare(reason=msg)
else:
raise
def delete_share(self, share):
command = 'wipe %s' % share['id']
try:
self._management_call(command)
except processutils.ProcessExecutionError as e:
if e.exit_code == self.HAS_GRANTS:
msg = _("Unable to remove share with granted access")
raise exception.ShareBackendException(msg=msg)
elif e.exit_code == self.EXPORT_NOT_FOUND:
msg = _("'%(name)s' (%(id)s) not found") % {
'name': share['name'], 'id': share['id']}
raise exception.InvalidShare(reason=msg)
else:
raise
def create_share(self, share):
command = 'create %s' % share['id']
try:
self._management_call(command)
except processutils.ProcessExecutionError as e:
if e.exit_code == self.EXPORT_EXISTS:
msg = _("Share '%(name)s' (%(id)s) already defined.") % {
'name': share['name'], 'id': share['id']}
raise exception.ShareBackendException(msg=msg)
else:
raise
return self._location_from_id(share['id'])
def ensure_share(self, share):
# Export locations are derived from the `export_ip` configuration
# parameter, and may thus change between service restarts. It is
# therefor always returned here if the share exists.
try:
self._management_call('get %s' % share['id'])
except processutils.ProcessExecutionError as e:
if e.exit_code == self.EXPORT_NOT_FOUND:
msg = _("'%(name)s' (%(id)s) not found") % {
'name': share['name'], 'id': share['id']}
raise exception.InvalidShare(reason=msg)
else:
raise
return self._location_from_id(share['id'])
def _location_from_id(self, share_id):
"""Format an export location from a share_id.
:param share_id: share id to format
:type share_id: string
:returns: string
"""
raise NotImplementedError()
class CIFSHelper(NASHelperBase):
PROTOCOL = "SMB"
def __init__(self, ssh_pool, export_ip, export_root):
super(CIFSHelper, self).__init__(ssh_pool, export_ip)
self.optional_args = "--root-export %s" % export_root
@staticmethod
def _get_allow_access_cmd(share, access):
if access['access_level'] != const.ACCESS_LEVEL_RW:
raise exception.InvalidShareAccessLevel(
level=access['access_level'])
# The scality-manila-utils implicitely set the access level to RW
# for SMB. It doesn't expect the `access['access_level']` argument
return 'grant %s %s' % (share['id'], access['access_to'])
def deny_access(self, share, access):
if access['access_level'] != const.ACCESS_LEVEL_RW:
return
super(CIFSHelper, self).deny_access(share, access)
def _location_from_id(self, share_id):
return '\\\\%s\\%s' % (self.export_ip, share_id)
class NFSHelper(NASHelperBase):
PROTOCOL = "NFS"
@staticmethod
def _get_allow_access_cmd(share, access):
return 'grant %s %s %s' % (share['id'], access['access_to'],
access['access_level'])
def _location_from_id(self, share_id):
return "%s:/%s" % (self.export_ip, share_id)
|
|
from unittest import mock
from unittest.mock import patch
from ddt import data, ddt
from rest_framework import status, test
from waldur_core.structure import signals as structure_signals
from waldur_core.structure.models import ServiceSettings
from waldur_mastermind.marketplace import models as marketplace_models
from waldur_mastermind.marketplace.tests import factories as marketplace_factories
from waldur_mastermind.marketplace.tests.factories import OfferingFactory
from waldur_mastermind.marketplace_openstack import (
INSTANCE_TYPE,
TENANT_TYPE,
VOLUME_TYPE,
)
from waldur_mastermind.marketplace_openstack.tests.mocks import (
MOCK_FLAVOR,
MOCK_INSTANCE,
MOCK_TENANT,
MOCK_VOLUME,
)
from waldur_openstack.openstack import models
from waldur_openstack.openstack.tests.factories import TenantFactory
from waldur_openstack.openstack.tests.fixtures import OpenStackFixture
from waldur_openstack.openstack.tests.test_tenant import BaseTenantActionsTest
from waldur_openstack.openstack.tests.unittests.test_backend import BaseBackendTestCase
from waldur_openstack.openstack_tenant.tests.factories import (
InstanceFactory,
VolumeFactory,
)
from waldur_openstack.openstack_tenant.tests.fixtures import OpenStackTenantFixture
from .mocks import MockTenant
from .utils import BaseOpenStackTest
class ImportAsMarketplaceResourceTest(BaseOpenStackTest):
def setUp(self):
super(ImportAsMarketplaceResourceTest, self).setUp()
self.fixture = OpenStackTenantFixture()
def test_import_volume_as_marketplace_resource(self):
volume = self.fixture.volume
marketplace_factories.OfferingFactory(
scope=self.fixture.openstack_tenant_service_settings, type=VOLUME_TYPE
)
structure_signals.resource_imported.send(
sender=volume.__class__, instance=volume,
)
self.assertTrue(
marketplace_models.Resource.objects.filter(scope=volume).exists()
)
def test_import_instance_as_marketplace_resource(self):
instance = self.fixture.instance
marketplace_factories.OfferingFactory(
scope=self.fixture.openstack_tenant_service_settings, type=INSTANCE_TYPE
)
structure_signals.resource_imported.send(
sender=instance.__class__, instance=instance,
)
self.assertTrue(
marketplace_models.Resource.objects.filter(scope=instance).exists()
)
def test_import_tenant_as_marketplace_resource(self):
tenant = self.fixture.tenant
self.import_tenant(tenant)
self.assertTrue(
marketplace_models.Resource.objects.filter(scope=tenant).exists()
)
def test_when_tenant_is_imported_volume_and_instance_offerings_are_created(self):
tenant = self.fixture.tenant
self.import_tenant(tenant)
self.assertTrue(
marketplace_models.Offering.objects.filter(type=INSTANCE_TYPE).exists()
)
self.assertTrue(
marketplace_models.Offering.objects.filter(type=VOLUME_TYPE).exists()
)
def import_tenant(self, tenant):
marketplace_factories.OfferingFactory(
scope=tenant.service_settings, type=TENANT_TYPE
)
structure_signals.resource_imported.send(
sender=tenant.__class__, instance=tenant,
)
class BaseInstanceImportTest(BaseBackendTestCase, BaseOpenStackTest):
def setUp(self):
super(BaseInstanceImportTest, self).setUp()
self.fixture = OpenStackTenantFixture()
self.offering = marketplace_factories.OfferingFactory(
scope=self.fixture.openstack_tenant_service_settings,
type=INSTANCE_TYPE,
shared=False,
customer=self.fixture.customer,
)
self.mocked_nova().servers.list.return_value = [MOCK_INSTANCE]
self.mocked_nova().servers.get.return_value = MOCK_INSTANCE
self.mocked_nova().flavors.get.return_value = MOCK_FLAVOR
self.mocked_nova().volumes.get_server_volumes.return_value = []
class InstanceImportableResourcesTest(BaseInstanceImportTest):
def setUp(self):
super(InstanceImportableResourcesTest, self).setUp()
self.url = OfferingFactory.get_url(self.offering, 'importable_resources')
self.client.force_authenticate(self.fixture.owner)
def test_importable_instances_are_returned(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEquals(
response.data,
[
{
'type': 'OpenStackTenant.Instance',
'name': 'VM-1',
'backend_id': '1',
'description': '',
'extra': [
{'name': 'Runtime state', 'value': 'active'},
{'name': 'Flavor', 'value': 'Standard'},
{'name': 'RAM (MBs)', 'value': 4096},
{'name': 'Cores', 'value': 4},
],
}
],
)
self.mocked_nova().servers.list.assert_called()
self.mocked_nova().flavors.get.assert_called()
class InstanceImportTest(BaseInstanceImportTest):
def setUp(self):
super(InstanceImportTest, self).setUp()
self.url = OfferingFactory.get_url(self.offering, 'import_resource')
self.client.force_authenticate(self.fixture.owner)
def _get_payload(self, backend_id='backend_id'):
return {
'backend_id': backend_id,
'project': self.fixture.project.uuid.hex,
}
@mock.patch(
'waldur_openstack.openstack_tenant.executors.InstancePullExecutor.execute'
)
def test_instance_can_be_imported(self, resource_import_execute_mock):
response = self.client.post(self.url, self._get_payload())
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
resource_import_execute_mock.assert_called()
instance = marketplace_models.Resource.objects.get()
self.assertEqual(instance.backend_id, '1')
def test_existing_instance_cannot_be_imported(self):
InstanceFactory(
service_settings=self.fixture.openstack_tenant_service_settings,
backend_id=MOCK_INSTANCE.id,
)
payload = self._get_payload(MOCK_INSTANCE.id)
response = self.client.post(self.url, payload)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data
)
class BaseVolumeImportTest(BaseBackendTestCase, test.APITransactionTestCase):
def setUp(self):
super(BaseVolumeImportTest, self).setUp()
self.fixture = OpenStackTenantFixture()
self.offering = marketplace_factories.OfferingFactory(
scope=self.fixture.openstack_tenant_service_settings,
type=VOLUME_TYPE,
shared=False,
customer=self.fixture.customer,
)
self.mocked_cinder().volumes.list.return_value = [MOCK_VOLUME]
self.mocked_cinder().volumes.get.return_value = MOCK_VOLUME
class VolumeImportableResourcesTest(BaseVolumeImportTest):
def setUp(self):
super(VolumeImportableResourcesTest, self).setUp()
self.url = OfferingFactory.get_url(self.offering, 'importable_resources')
self.client.force_authenticate(self.fixture.owner)
def test_importable_volumes_are_returned(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data,
[
{
'type': 'OpenStackTenant.Volume',
'name': 'ssd-volume',
'backend_id': '1',
'description': '',
'extra': [
{'name': 'Is bootable', 'value': False},
{'name': 'Size', 'value': 102400},
{'name': 'Device', 'value': ''},
{'name': 'Runtime state', 'value': 'available'},
],
}
],
)
class VolumeImportTest(BaseVolumeImportTest):
def setUp(self):
super(VolumeImportTest, self).setUp()
self.url = OfferingFactory.get_url(self.offering, 'import_resource')
self.client.force_authenticate(self.fixture.owner)
def test_backend_volume_is_imported(self):
response = self.client.post(self.url, self._get_payload())
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
instance = marketplace_models.Resource.objects.get()
self.assertEqual(instance.backend_id, '1')
def test_backend_volume_cannot_be_imported_if_it_is_registered_in_waldur(self):
volume = VolumeFactory(
service_settings=self.fixture.openstack_tenant_service_settings,
project=self.fixture.project,
)
response = self.client.post(self.url, self._get_payload(volume.backend_id))
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data
)
def _get_payload(self, backend_id='backend_id'):
return {
'backend_id': backend_id,
'project': self.fixture.project.uuid.hex,
}
@ddt
class TenantImportableResourcesTest(BaseBackendTestCase, BaseTenantActionsTest):
def setUp(self):
super(TenantImportableResourcesTest, self).setUp()
self.offering = marketplace_factories.OfferingFactory(
scope=self.fixture.openstack_service_settings, type=TENANT_TYPE
)
self.url = OfferingFactory.get_url(self.offering, 'importable_resources')
def test_user_can_list_importable_resources(self):
self.client.force_authenticate(self.fixture.staff)
self.mocked_keystone().projects.list.return_value = [
MockTenant(name='First Tenant', id='1'),
MockTenant(name='Second Tenant', id='2'),
]
response = self.client.get(self.url)
self.assertEquals(response.status_code, status.HTTP_200_OK, response.data)
self.assertEquals(
response.data,
[
{
'type': 'OpenStack.Tenant',
'name': 'First Tenant',
'description': '',
'backend_id': '1',
},
{
'type': 'OpenStack.Tenant',
'name': 'Second Tenant',
'description': '',
'backend_id': '2',
},
],
)
@data('admin', 'manager', 'owner')
def test_user_does_not_have_permissions_to_list_resources(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
response = self.client.get(self.url)
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
@ddt
class TenantImportTest(BaseBackendTestCase):
def setUp(self):
super(TenantImportTest, self).setUp()
self.fixture = OpenStackFixture()
self.backend_tenant = TenantFactory.build(
service_settings=self.fixture.openstack_service_settings,
project=self.fixture.project,
)
self.offering = marketplace_factories.OfferingFactory(
scope=self.fixture.openstack_service_settings, type=TENANT_TYPE
)
def test_tenant_is_imported(self):
response = self.import_tenant()
self.assertEquals(response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEquals(response.data['backend_id'], self.backend_tenant.backend_id)
self.assertTrue(
models.Tenant.objects.filter(
backend_id=self.backend_tenant.backend_id
).exists()
)
@patch('waldur_core.structure.handlers.event_logger')
def test_event_is_emitted(self, logger_mock):
self.import_tenant()
actual = logger_mock.resource.info.call_args[0][0]
expected = 'Resource {resource_full_name} has been imported.'
self.assertEqual(expected, actual)
@data('admin', 'manager', 'owner')
def test_user_cannot_import_tenant(self, user):
response = self.import_tenant(user)
self.assertEquals(
response.status_code, status.HTTP_403_FORBIDDEN, response.data
)
def test_tenant_cannot_be_imported_if_backend_id_exists_already(self):
self.backend_tenant.save()
response = self.import_tenant()
self.assertEquals(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data
)
def test_imported_tenant_has_user_password_and_username(self):
response = self.import_tenant()
self.assertEquals(response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEquals(response.data['backend_id'], self.backend_tenant.backend_id)
tenant = models.Tenant.objects.get(backend_id=self.backend_tenant.backend_id)
self.assertIsNotNone(tenant.user_username)
self.assertIsNotNone(tenant.user_password)
def test_imported_tenant_settings_have_username_and_password_set(self):
response = self.import_tenant()
self.assertEquals(response.status_code, status.HTTP_201_CREATED, response.data)
tenant = models.Tenant.objects.get(backend_id=self.backend_tenant.backend_id)
service_settings = ServiceSettings.objects.get(scope=tenant)
self.assertEquals(tenant.user_username, service_settings.username)
self.assertEquals(tenant.user_password, service_settings.password)
@mock.patch('waldur_mastermind.marketplace_openstack.handlers.tasks')
def test_import_instances_and_volumes_if_tenant_has_been_imported(self, mock_tasks):
marketplace_factories.CategoryFactory(default_vm_category=True)
marketplace_factories.CategoryFactory(default_volume_category=True)
response = self.import_tenant()
self.assertEquals(response.status_code, status.HTTP_201_CREATED, response.data)
mock_tasks.import_instances_and_volumes_of_tenant.delay.assert_called_once()
def import_tenant(self, user='staff'):
self.client.force_authenticate(getattr(self.fixture, user))
payload = {
'backend_id': self.backend_tenant.backend_id,
'project': self.fixture.project.uuid.hex,
}
url = OfferingFactory.get_url(self.offering, 'import_resource')
self.mocked_keystone.return_value.projects.get.return_value = MOCK_TENANT
return self.client.post(url, payload)
|
|
#!/usr/bin/env python3
import csv
import json
import pathlib
import records
from util import (query_file, name_and_status, is_number, finish_point,
TYPE_2_INT, FINISH_2_NAME)
CSV_DATA = pathlib.Path('data/csv')
META_DATA = pathlib.Path('data/meta.json')
TABLES = [
'Ninja', 'Course', 'Obstacle', 'ObstacleResult', 'CourseResult',
'CareerSummary'
]
def insert_ninja(db, row):
"""Add a row to the Ninja table.
`row` is CSV entry like
Ian Waggoner,42,M,...,
where the first 3 columns represent a competitor's name, age, and sex.
Returns:
(str, int): (shown status, ninja_id).
"""
name, shown = name_and_status(row[0])
age = row[1].strip() or None
sex = row[2].strip()
if not name or name == 'Name':
return '', -1
first, last = name.split(' ', 1)
out = db.query(
'SELECT ninja_id FROM Ninja WHERE first_name=:f AND last_name=:l',
f=first,
l=last).all()
# TODO: What if two competitors have the same first + last name?
if not out:
with META_DATA.open() as meta:
data = json.load(meta)
info = data.get('{0} {1}'.format(first, last), {})
ninja_id = db.query_file(
'data/sql/insert_ninja.sql',
f=first,
l=last,
s=sex,
a=age,
o=info.get('occupation'),
i=info.get('instagram'),
t=info.get('twitter')).all()[0].ninja_id
else:
ninja_id = out[0].ninja_id
return shown, ninja_id
def insert_course(db, headings, info):
"""Add a row to the Course table.
Args:
headings (List[str]): headings for the current CSV file.
info (List[str]): [city, category, season].
Returns:
int: The ID of the current course.
"""
city = info[0] if info[0] != 'Stage' else 'Las Vegas'
cat = info[1] if not is_number(info[1]) else 'Stage ' + info[1]
course_id = db.query_file(
'data/sql/insert_course.sql', city=city, cat=cat, s=info[2]).all()
return course_id[0].course_id
def insert_obstacles(db, row, info, cid):
"""Add a row to the Obstacle table.
Args:
info (list): [city, category, season]
Returns:
int: The ID of the current course.
"""
size = 0
for i in range(3, len(row) - 2): # Skip The first 3 and last 2 columns.
name = row[i]
if name.startswith('Transition'): # It's a transition column.
continue
db.query_file('data/sql/insert_obstacle.sql', title=name, id=cid)
size += 1
db.query(
'UPDATE Course SET size = :s WHERE course_id = :id;', s=size, id=cid)
def insert_obstacle_results(db, row, nid, cid, shown, headings):
"""Add rows to the ObstacleResult table.
Args:
nid (int): An ID of a column in the Ninja table.
cid (int) An ID of a column in the Course table.
shown (str): "S", "PS" or "NS".
headers (list): The CSV headers.
"""
if shown == 'NS': # There are no results.
return
elif shown == 'PS': # There are partial results.
print('Skipping PS ...')
# TODO: Handle PS (alter FAILED_IDS?)
return
i = 0
while i < len(headings) and nid not in FAILED_IDS:
header = headings[i]
if header == 'Gender' or header.startswith('Transition'):
# If the current column is either 'Gender' or 'Transition', we know
# that the next column's header (i + 1) will be the obstacle label.
name = headings[i + 1]
out = db.query(
"""
SELECT obstacle_id FROM Obstacle
WHERE (title=:title AND course_id=:id)
""",
title=name,
id=cid).all()
# Given that the current header is 'Gender' or 'Transition', we
# know that the value at the next column will be the time.
time = row[i + 1]
completed = is_number(time)
if not completed:
FAILED_IDS.append(nid)
time = 0
if header == 'Gender':
# This is the first obstacle and therefore is the only one
# without a transition.
transition = 0
i += 1
else:
transition = row[i]
i += 2
db.query_file(
'data/sql/insert_obstacle_result.sql',
nid=nid,
dur=time,
trans=transition,
comp=completed,
obsid=out[0].obstacle_id)
else:
i += 1
def insert_course_result(db, row, cid, nid, shown, obstacles):
"""Add columns to the CourseResult table.
Args:
nid (int): An ID of a column in the Ninja table.
cid (int) An ID of a column in the Course table.
shown (str): "S", "PS" or "NS".
"""
# time is the second-to-last column.
time = row[-2] or None
# completed is the last column.
completed = row[-1] == 'Completed'
results = db.query_file(
'data/sql/obstacles_by_ninja.sql', nid=nid, comp=completed,
crid=cid).all()[0].count
# Calculate the finish point.
finish = finish_point(row, shown, results, obstacles, completed)
db.query_file(
'data/sql/insert_course_result.sql',
crid=cid,
nid=nid,
dur=time,
fp=finish,
comp=completed).all()
def insert_summary(db):
"""Insert a row into the CareerSummary table.
"""
# Get all course results for ninja_id.
n = len(TYPE_2_INT)
for row in db.query('SELECT ninja_id FROM Ninja').all():
ninja_id = row.ninja_id
completes = [0] * n
finishes = [0] * n
finish_scores = [0] * n
seasons = set()
trend = []
places = []
# Walk through their career history and record course finishes.
for ret in query_file(db, 'results_by_ninja.sql', nid=ninja_id):
int_type = TYPE_2_INT[ret.category]
type_idx = int(int_type / 2) - 1
seasons.add(ret.season)
if ret.completed:
# The course was completed, so there's no fail point.
completes[type_idx] += 1
point = ret.size
else:
# The course wasn't completed, so there's a fail point.
point = ret.finish_point - 1
finishes[type_idx] += int_type + 0.1 * point
finish_scores[type_idx] += int_type + point
trend.append(point)
# Record the fastest competitors on each obstacle for this course.
obs = query_file(db, 'obstacles_by_course.sql', id=ret.course_id)
for ob in obs:
leaders = [
l.ninja_id
for l in query_file(
db, 'leaders.sql', obs_id=ob.obstacle_id)
]
if ninja_id in leaders:
places.append(leaders.index(ninja_id) + 1)
else:
places.append(0)
total = sum(trend) if trend else 0
n_seasons = len(seasons)
speed = round(3 * sum(x > 0 for x in places) - (sum(places) / len(places)), 3)
success = 4 * max(finish_scores)
consistency=total * n_seasons
summary_id = query_file(
db,
'insert_summary.sql',
nid=ninja_id,
best=FINISH_2_NAME.get(max(finishes)),
speed=speed,
success=success,
consistency=consistency,
rating=round(speed + success + consistency, 3),
seasons=n_seasons,
q=completes[0],
f=completes[1],
s=completes[2])
return summary_id
if __name__ == '__main__':
# Reset the database and its tables.
db = records.Database() # Defaults to $DATABASE_URL.
tx = db.transaction()
for table in TABLES:
db.query('DROP TABLE IF EXISTS {0} CASCADE;'.format(table))
db.query_file('data/sql/create_tables.sql')
for f in CSV_DATA.glob('**/*.csv'):
base = f.parts[-1]
course_info = base.strip('.csv').split('-')
print('Reading {} ...'.format(base))
with f.open('rU') as csv_file:
FAILED_IDS = []
reader = csv.reader(csv_file)
headings = next(reader) # Skip the headings
rows = list(reader)
# Insert data
obstacles = (len(headings) - 4) / 2
course_id = insert_course(db, headings, course_info)
insert_obstacles(db, headings, course_info, course_id)
for i, row in enumerate(rows):
shown, ninja_id = insert_ninja(db, row)
insert_obstacle_results(db, row, ninja_id, course_id, shown,
headings)
insert_course_result(db, row, course_id, ninja_id, shown,
obstacles)
print('Inserting summaries ...')
insert_summary(db)
tx.commit()
|
|
import csv
import datetime
from decimal import Decimal
import json
import operator
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import sys
from peewee import *
from playhouse.db_url import connect
from playhouse.migrate import migrate
from playhouse.migrate import SchemaMigrator
from playhouse.reflection import Introspector
if sys.version_info[0] == 3:
basestring = str
from functools import reduce
class DataSet(object):
def __init__(self, url):
self._url = url
parse_result = urlparse(url)
self._database_path = parse_result.path[1:]
# Connect to the database.
self._database = connect(url)
self._database.connect()
# Introspect the database and generate models.
self._introspector = Introspector.from_database(self._database)
self._models = self._introspector.generate_models(skip_invalid=True)
self._migrator = SchemaMigrator.from_database(self._database)
class BaseModel(Model):
class Meta:
database = self._database
self._base_model = BaseModel
self._export_formats = self.get_export_formats()
self._import_formats = self.get_import_formats()
def __repr__(self):
return '<DataSet: %s>' % self._database_path
def get_export_formats(self):
return {
'csv': CSVExporter,
'json': JSONExporter}
def get_import_formats(self):
return {
'csv': CSVImporter,
'json': JSONImporter}
def __getitem__(self, table):
return Table(self, table, self._models.get(table))
@property
def tables(self):
return self._database.get_tables()
def __contains__(self, table):
return table in self.tables
def connect(self):
self._database.connect()
def close(self):
self._database.close()
def update_cache(self, table=None):
if table:
model_class = self._models[table]
dependencies = model_class._meta.related_models(backrefs=True)
else:
dependencies = None # Update all tables.
updated = self._introspector.generate_models(
skip_invalid=True,
table_names=[related._meta.db_table for related in dependencies])
self._models.update(updated)
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._database.is_closed():
self.close()
def query(self, sql, params=None, commit=True):
return self._database.execute_sql(sql, params, commit)
def transaction(self):
if self._database.transaction_depth() == 0:
return self._database.transaction()
else:
return self._database.savepoint()
def _check_arguments(self, filename, file_obj, format, format_dict):
if filename and file_obj:
raise ValueError('file is over-specified. Please use either '
'filename or file_obj, but not both.')
if not filename and not file_obj:
raise ValueError('A filename or file-like object must be '
'specified.')
if format not in format_dict:
valid_formats = ', '.join(sorted(format_dict.keys()))
raise ValueError('Unsupported format "%s". Use one of %s.' % (
format, valid_formats))
def freeze(self, query, format='csv', filename=None, file_obj=None,
**kwargs):
self._check_arguments(filename, file_obj, format, self._export_formats)
if filename:
file_obj = open(filename, 'w')
exporter = self._export_formats[format](query)
exporter.export(file_obj, **kwargs)
if filename:
file_obj.close()
def thaw(self, table, format='csv', filename=None, file_obj=None,
strict=False, **kwargs):
self._check_arguments(filename, file_obj, format, self._export_formats)
if filename:
file_obj = open(filename, 'r')
importer = self._import_formats[format](self[table], strict)
count = importer.load(file_obj, **kwargs)
if filename:
file_obj.close()
return count
class Table(object):
def __init__(self, dataset, name, model_class):
self.dataset = dataset
self.name = name
if model_class is None:
model_class = self._create_model()
model_class.create_table()
self.dataset._models[name] = model_class
@property
def model_class(self):
return self.dataset._models[self.name]
def __repr__(self):
return '<Table: %s>' % self.name
def __len__(self):
return self.find().count()
def __iter__(self):
return iter(self.find().iterator())
def _create_model(self):
class Meta:
db_table = self.name
return type(
str(self.name),
(self.dataset._base_model,),
{'Meta': Meta})
def create_index(self, columns, unique=False):
self.dataset._database.create_index(
self.model_class,
columns,
unique=unique)
def _guess_field_type(self, value):
if isinstance(value, basestring):
return TextField
if isinstance(value, (datetime.date, datetime.datetime)):
return DateTimeField
elif value is True or value is False:
return BooleanField
elif isinstance(value, int):
return IntegerField
elif isinstance(value, float):
return FloatField
elif isinstance(value, Decimal):
return DecimalField
return TextField
@property
def columns(self):
return self.model_class._meta.get_field_names()
def _migrate_new_columns(self, data):
new_keys = set(data) - set(self.model_class._meta.fields)
if new_keys:
operations = []
for key in new_keys:
field_class = self._guess_field_type(data[key])
field = field_class(null=True)
operations.append(
self.dataset._migrator.add_column(self.name, key, field))
field.add_to_class(self.model_class, key)
migrate(*operations)
self.dataset.update_cache(self.name)
def insert(self, **data):
self._migrate_new_columns(data)
return self.model_class.insert(**data).execute()
def _apply_where(self, query, filters, conjunction=None):
conjunction = conjunction or operator.and_
if filters:
expressions = [
(self.model_class._meta.fields[column] == value)
for column, value in filters.items()]
query = query.where(reduce(conjunction, expressions))
return query
def update(self, columns=None, conjunction=None, **data):
self._migrate_new_columns(data)
filters = {}
if columns:
for column in columns:
filters[column] = data.pop(column)
return self._apply_where(
self.model_class.update(**data),
filters,
conjunction).execute()
def _query(self, **query):
return self._apply_where(self.model_class.select(), query)
def find(self, **query):
return self._query(**query).dicts()
def find_one(self, **query):
try:
return self.find(**query).get()
except self.model_class.DoesNotExist:
return None
def all(self):
return self.find()
def delete(self, **query):
return self._apply_where(self.model_class.delete(), query).execute()
def freeze(self, *args, **kwargs):
return self.dataset.freeze(self.all(), *args, **kwargs)
def thaw(self, *args, **kwargs):
return self.dataset.thaw(self.name, *args, **kwargs)
class Exporter(object):
def __init__(self, query):
self.query = query
def export(self, file_obj):
raise NotImplementedError
class JSONExporter(Exporter):
@staticmethod
def default(o):
if isinstance(o, (datetime.datetime, datetime.date, datetime.time)):
return o.isoformat()
elif isinstance(o, Decimal):
return str(o)
raise TypeError('Unable to serialize %r as JSON.' % o)
def export(self, file_obj, **kwargs):
json.dump(
list(self.query),
file_obj,
default=JSONExporter.default,
**kwargs)
class CSVExporter(Exporter):
def export(self, file_obj, header=True, **kwargs):
writer = csv.writer(file_obj, **kwargs)
if header and hasattr(self.query, '_select'):
writer.writerow([field.name for field in self.query._select])
for row in self.query.tuples():
writer.writerow(row)
class Importer(object):
def __init__(self, table, strict=False):
self.table = table
self.strict = strict
model = self.table.model_class
self.columns = model._meta.columns
self.columns.update(model._meta.fields)
def load(self, file_obj):
raise NotImplementedError
class JSONImporter(Importer):
def load(self, file_obj, **kwargs):
data = json.load(file_obj, **kwargs)
count = 0
for row in data:
if self.strict:
obj = {}
for key in row:
field = self.columns.get(key)
if field is not None:
obj[field.name] = field.python_value(row[key])
else:
obj = row
if obj:
self.table.insert(**obj)
count += 1
return count
class CSVImporter(Importer):
def load(self, file_obj, header=True, **kwargs):
count = 0
reader = csv.reader(file_obj, **kwargs)
if header:
try:
header_keys = next(reader)
except StopIteration:
return count
if self.strict:
header_fields = []
for idx, key in enumerate(header_keys):
if key in self.columns:
header_fields.append((idx, self.columns[key]))
else:
header_fields = list(enumerate(header_keys))
else:
header_fields = list(enumerate(self.model._meta.get_fields()))
if not header_fields:
return count
for row in reader:
obj = {}
for idx, field in header_fields:
if self.strict:
obj[field.name] = field.python_value(row[idx])
else:
obj[field] = row[idx]
self.table.insert(**obj)
count += 1
return count
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy
import numpy.matlib
linalg = numpy.linalg
# import scikits.learn.machine.em.densities as densities
# For treatment of missing data, see:
#
# Shumway, R.H. & Stoffer, D.S. (1982). An approach to time series
# smoothing and forecasting using the EM algorithm. Journal of Time
# Series Analysis, 3, 253-264. http://www.stat.pitt.edu/stoffer/em.pdf
def rand_mvn(mu, sigma, N):
"""generate multivariate normal samples
input:
mu - a length M vector, the mean of the distribution
sigma - an M by M array, the covariance of the distribution
N - a scalar N, the number of samples to generate
output:
Y - an N by M array, N samples with mean mu and covariance sigma
"""
M = sigma.shape[0]
X = numpy.random.standard_normal((M, N))
Y = numpy.dot(linalg.cholesky(sigma), X).T + mu
return Y
def covar(x):
"""determine the sample covariance matrix of x
input:
x - an N by M array, N samples of an M component vector
output:
sigma - and M by M array, the covariance matrix
"""
mu = numpy.mean(x, axis=0)
N = x.shape[0]
y = x - mu
sigma = numpy.dot(y.T, y) / (N - 1)
# Note, the maximum likelihood estimator is /N [not /(N-1)] as
# above, but that works only for a multivariate normal.
return sigma
def gaussian_prob(x, m, C, use_log=False):
if 0:
return numpy.asscalar(densities.gauss_den(x, m, C, log=use_log))
# Kevin Murphy's implementation
m = numpy.atleast_1d(m)
assert x.ndim == 1
N = 1
d = x.shape[0]
M = numpy.dot(m.T, numpy.ones((1, N))) # replicate mean across columns
denom = (2 * numpy.pi)**(d / 2) * numpy.sqrt(abs(numpy.linalg.det(C)))
x = x[:, numpy.newaxis] # make column vector
xMT = (x - M).T
tmpXX = (numpy.dot(xMT, numpy.linalg.inv(C))) * xMT
mahal = numpy.sum(tmpXX.flat)
if numpy.any(mahal < 0):
raise ValueError("mahal < 0 => C is not psd")
if use_log:
p = -0.5 * mahal - numpy.log(denom)
else:
eps = numpy.finfo(numpy.float64).eps
# eps=2**-52
p = numpy.exp(-0.5 * mahal) / (denom + eps)
return p
class VariableObservationNoiseKalmanFilter:
def __init__(self, A, C, Q, initial_x, initial_P):
ss = len(A) # ndim in state space
os = len(C) # ndim in observation space
assert A.shape == (ss, ss)
assert C.shape == (os, ss)
assert Q.shape == (ss, ss)
assert initial_x.shape == (ss,)
assert initial_P.shape == (ss, ss)
self.A = A # process update model
self.C = C # observation model
self.Q = Q # process covariance matrix
# These 2 attributes are the only state that changes during
# filtering:
self.xhat_k1 = initial_x # a posteriori state estimate from step (k-1)
self.P_k1 = initial_P # a posteriori error estimate from step (k-1)
self.ss = ss
self.os = os
self.AT = self.A.T
self.CT = self.C.T
if len(initial_x) != self.ss:
raise ValueError('initial_x must be a vector with ss components')
def step(self, y=None, isinitial=False, full_output=False, **kw):
xhatminus, Pminus = self.step1__calculate_a_priori(isinitial=isinitial)
return self.step2__calculate_a_posteri(xhatminus, Pminus, y=y,
full_output=full_output, **kw)
def step1__calculate_a_priori(self, isinitial=False):
dot = numpy.dot # shorthand
############################################
# update state-space
# compute a priori estimate of statespace
if not isinitial:
xhatminus = dot(self.A, self.xhat_k1)
# compute a priori estimate of errors
Pminus = dot(dot(self.A, self.P_k1), self.AT) + self.Q
else:
xhatminus = self.xhat_k1
Pminus = self.P_k1
return xhatminus, Pminus
def _compute_prediction(self, xhatminus):
return numpy.dot(self.C, xhatminus)
def step2__calculate_a_posteri(self, xhatminus, Pminus, y=None,
full_output=False, R=None):
"""
y represents the observation for this time-step
"""
if R is None:
raise ValueError('R cannot be None')
dot = numpy.dot # shorthand
inv = numpy.linalg.inv
missing_data = False
if y is None or numpy.any(numpy.isnan(y)):
missing_data = True
if not missing_data:
############################################
# incorporate observation
# calculate a posteriori state estimate
# calculate Kalman gain
Knumerator = dot(Pminus, self.CT)
Kdenominator = dot(dot(self.C, Pminus), self.CT) + R
K = dot(Knumerator, inv(Kdenominator)) # Kalman gain
residuals = y - self._compute_prediction(xhatminus) # error/innovation
xhat = xhatminus + dot(K, residuals)
one_minus_KC = numpy.eye(self.ss) - dot(K, self.C)
# compute a posteriori estimate of errors
P = dot(one_minus_KC, Pminus)
else:
# no observation
xhat = xhatminus
P = Pminus
if full_output:
if missing_data:
# XXX missing data, check literature!
raise NotImplementedError(
"don't know how to compute VVnew with missing data")
# VVnew = dot(self.A,self.P_k1)
# loglik = 0
else:
# calculate loglik and Pfuture
VVnew = dot(one_minus_KC, dot(self.A, self.P_k1))
loglik = gaussian_prob(residuals,
numpy.zeros((1, len(residuals))),
Kdenominator, use_log=True)
# this step (k) becomes next step's prior (k-1)
self.xhat_k1 = xhat
self.P_k1 = P
if full_output:
return xhat, P, loglik, VVnew
else:
return xhat, P
class KalmanFilter(VariableObservationNoiseKalmanFilter):
def __init__(self, A, C, Q, R, initial_x, initial_P):
self.R = R # measurement covariance matrix
VariableObservationNoiseKalmanFilter.__init__(
self, A=A, C=C, Q=Q, initial_x=initial_x, initial_P=initial_P)
assert R.shape == (self.os, self.os)
def step2__calculate_a_posteri(self, xhatminus, Pminus, y=None,
full_output=False):
return VariableObservationNoiseKalmanFilter.step2__calculate_a_posteri(
self, xhatminus=xhatminus, Pminus=Pminus, y=y, full_output=full_output,
R=self.R)
class KalmanFilter_NonlinearObservation(KalmanFilter):
def __init__(self, A, observation_function, Q, R, initial_x, initial_P, delta = 1e-5):
self.delta = delta
self.observation_function = observation_function
C0 = linearize_at(observation_function,initial_x)
KalmanFilter.__init__(
self, A=A, C=C0, Q=Q, R=R, initial_x=initial_x, initial_P=initial_P)
assert R.shape == (self.os, self.os)
def _compute_prediction(self, xhatminus):
result = self.observation_function(xhatminus)
assert(result.ndim == 2)
assert(result.shape[1] == 1)
return result[:,0] # drop dimension
def step2__calculate_a_posteri(self, xhatminus, Pminus, y=None,
full_output=False):
self.C = linearize_at(self.observation_function, xhatminus, delta=self.delta)
self.CT = self.C.T
return KalmanFilter.step2__calculate_a_posteri(
self, xhatminus=xhatminus, Pminus=Pminus, y=y, full_output=full_output)
def linearize_at(f, x, delta = 1e-5):
f0 = f(x)
mat = []
for i in range(len(x)):
dxi = numpy.zeros((len(x),))
dxi[i] = delta
fi = f(x+dxi)
dF_dxi = (fi-f0)/delta
mat.append(dF_dxi[:,0])
mat = numpy.array(mat).T
return mat
def kalman_filter(y, A, C, Q, R, init_x, init_V, full_output=False):
T = len(y)
ss = len(A)
R = numpy.array(R)
for arr in (A, C, Q):
if numpy.any(numpy.isnan(arr)):
raise ValueError(
"cannot do Kalman filtering with nan values in parameters")
if R.ndim not in (2, 3):
raise ValueError("R not 2 or 3 dimensions but %d" % R.ndim)
if R.ndim == 2:
kfilt = KalmanFilter(A, C, Q, R, init_x, init_V)
else:
assert R.ndim == 3
if R.shape[0] != T:
raise ValueError(
'Per-observation noise must have same length as observations')
kfilt = VariableObservationNoiseKalmanFilter(A, C, Q, init_x, init_V)
# Forward pass
xfilt = numpy.zeros((T, ss))
Vfilt = numpy.zeros((T, ss, ss))
if full_output:
VVfilt = numpy.zeros((T, ss, ss))
loglik = 0
for i in range(T):
isinitial = i == 0
y_i = y[i]
if R.ndim == 3:
kw = dict(R=R[i])
else:
kw = {}
if full_output:
xfilt_i, Vfilt_i, LL, VVfilt_i = kfilt.step(y=y_i,
isinitial=isinitial,
full_output=True, **kw)
VVfilt[i] = VVfilt_i
loglik += LL
else:
xfilt_i, Vfilt_i = kfilt.step(y=y_i,
isinitial=isinitial,
full_output=False, **kw)
xfilt[i] = xfilt_i
Vfilt[i] = Vfilt_i
if full_output:
return xfilt, Vfilt, VVfilt, loglik
else:
return xfilt, Vfilt
def kalman_smoother(y, A, C, Q, R, init_x, init_V, valid_data_idx=None,
full_output=False):
"""Rauch-Tung-Striebel (RTS) smoother
arguments
---------
y - observations
A - process update matrix
C - state-to-observation matrix
Q - process covariance matrix
R - observation covariance matrix
init_x - initial state
init_V - initial error estimate
valid_data_idx - (optional) Indices to rows of y that are valid or
boolean array of len(y). (None if all data valid.) Note that
this is not necessary if y is nan where data are invalid.
returns
-------
xsmooth - smoothed state estimates
Vsmooth - smoothed error estimates
VVsmooth - (only when full_output==True)
loglik - (only when full_output==True)
Kalman smoother based on Kevin Murphy's Kalman toolbox for
MATLAB(tm).
N.B. Axes are swapped relative to Kevin Murphy's example, because
in all my data, time is the first dimension."""
if valid_data_idx is not None:
if hasattr(valid_data_idx, 'dtype') and valid_data_idx.dtype == numpy.bool:
assert len(valid_data_idx) == len(y)
invalid_cond = ~valid_data_idx
y[invalid_cond] = numpy.nan # broadcast
else:
y = numpy.array(y, copy=True)
valid_data_idx = set(valid_data_idx)
all_idx = set(range(len(y)))
bad_idx = list(all_idx - valid_data_idx)
for i in bad_idx:
y[i] = numpy.nan # broadcast
def smooth_update(xsmooth_future, Vsmooth_future, xfilt, Vfilt,
Vfilt_future, VVfilt_future, A, Q, full_output=False):
dot = numpy.dot
inv = numpy.linalg.inv
xpred = dot(A, xfilt)
Vpred = dot(A, dot(Vfilt, A.T)) + Q
J = dot(Vfilt, dot(A.T, inv(Vpred))) # smoother gain matrix
xsmooth = xfilt + dot(J, xsmooth_future - xpred)
Vsmooth = Vfilt + dot(J, dot(Vsmooth_future - Vpred, J.T))
VVsmooth_future = VVfilt_future + dot(
(Vsmooth_future - Vfilt_future),
dot(inv(Vfilt_future), VVfilt_future))
return xsmooth, Vsmooth, VVsmooth_future
T = len(y)
ss = len(A)
# Forward pass
forward_results = kalman_filter(y, A, C, Q, R, init_x, init_V,
full_output=full_output)
if full_output:
xfilt, Vfilt, VVfilt, loglik = forward_results
else:
xfilt, Vfilt = forward_results
VVfilt = Vfilt # dummy value
# Backward pass
xsmooth = numpy.array(xfilt, copy=True)
Vsmooth = numpy.array(Vfilt, copy=True)
VVsmooth = numpy.empty(Vfilt.shape)
for t in range(T - 2, -1, -1):
xsmooth_t, Vsmooth_t, VVsmooth_t = smooth_update(xsmooth[t + 1, :],
Vsmooth[t + 1, :, :],
xfilt[t, :],
Vfilt[t, :, :],
Vfilt[t + 1, :, :],
VVfilt[t + 1, :, :],
A, Q,
full_output=full_output)
xsmooth[t, :] = xsmooth_t
Vsmooth[t, :, :] = Vsmooth_t
VVsmooth[t + 1, :, :] = VVsmooth_t
VVsmooth[0, :, :] = numpy.zeros((ss, ss))
if full_output:
return xsmooth, Vsmooth, VVsmooth, loglik
else:
return xsmooth, Vsmooth
|
|
# Copyright (c) 2018 Red Hat, Inc.
# All Rights Reserved.
import dateutil
import logging
from django.db.models import Count
from django.db import transaction
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from rest_framework.permissions import SAFE_METHODS
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from rest_framework import status
from awx.main.constants import ACTIVE_STATES
from awx.main.utils import (
get_object_or_400,
parse_yaml_or_json,
)
from awx.main.models.ha import (
Instance,
InstanceGroup,
)
from awx.main.models.organization import Team
from awx.main.models.projects import Project
from awx.main.models.inventory import Inventory
from awx.main.models.jobs import JobTemplate
from awx.api.exceptions import ActiveJobConflict
logger = logging.getLogger('awx.api.views.mixin')
class UnifiedJobDeletionMixin(object):
'''
Special handling when deleting a running unified job object.
'''
def destroy(self, request, *args, **kwargs):
obj = self.get_object()
if not request.user.can_access(self.model, 'delete', obj):
raise PermissionDenied()
try:
if obj.unified_job_node.workflow_job.status in ACTIVE_STATES:
raise PermissionDenied(detail=_('Cannot delete job resource when associated workflow job is running.'))
except self.model.unified_job_node.RelatedObjectDoesNotExist:
pass
# Still allow deletion of new status, because these can be manually created
if obj.status in ACTIVE_STATES and obj.status != 'new':
raise PermissionDenied(detail=_("Cannot delete running job resource."))
elif not obj.event_processing_finished:
# Prohibit deletion if job events are still coming in
if obj.finished and now() < obj.finished + dateutil.relativedelta.relativedelta(minutes=1):
# less than 1 minute has passed since job finished and events are not in
return Response({"error": _("Job has not finished processing events.")},
status=status.HTTP_400_BAD_REQUEST)
else:
# if it has been > 1 minute, events are probably lost
logger.warning('Allowing deletion of {} through the API without all events '
'processed.'.format(obj.log_format))
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class InstanceGroupMembershipMixin(object):
'''
This mixin overloads attach/detach so that it calls InstanceGroup.save(),
triggering a background recalculation of policy-based instance group
membership.
'''
def attach(self, request, *args, **kwargs):
response = super(InstanceGroupMembershipMixin, self).attach(request, *args, **kwargs)
sub_id, res = self.attach_validate(request)
if status.is_success(response.status_code):
if self.parent_model is Instance:
ig_obj = get_object_or_400(self.model, pk=sub_id)
inst_name = ig_obj.hostname
else:
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
with transaction.atomic():
ig_qs = InstanceGroup.objects.select_for_update()
if self.parent_model is Instance:
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
else:
# similar to get_parent_object, but selected for update
parent_filter = {
self.lookup_field: self.kwargs.get(self.lookup_field, None),
}
ig_obj = get_object_or_404(ig_qs, **parent_filter)
if inst_name not in ig_obj.policy_instance_list:
ig_obj.policy_instance_list.append(inst_name)
ig_obj.save(update_fields=['policy_instance_list'])
return response
def is_valid_relation(self, parent, sub, created=False):
if sub.is_isolated():
return {'error': _('Isolated instances may not be added or removed from instances groups via the API.')}
if self.parent_model is InstanceGroup:
ig_obj = self.get_parent_object()
if ig_obj.controller_id is not None:
return {'error': _('Isolated instance group membership may not be managed via the API.')}
return None
def unattach_validate(self, request):
(sub_id, res) = super(InstanceGroupMembershipMixin, self).unattach_validate(request)
if res:
return (sub_id, res)
sub = get_object_or_400(self.model, pk=sub_id)
attach_errors = self.is_valid_relation(None, sub)
if attach_errors:
return (sub_id, Response(attach_errors, status=status.HTTP_400_BAD_REQUEST))
return (sub_id, res)
def unattach(self, request, *args, **kwargs):
response = super(InstanceGroupMembershipMixin, self).unattach(request, *args, **kwargs)
if status.is_success(response.status_code):
sub_id = request.data.get('id', None)
if self.parent_model is Instance:
inst_name = self.get_parent_object().hostname
else:
inst_name = get_object_or_400(self.model, pk=sub_id).hostname
with transaction.atomic():
ig_qs = InstanceGroup.objects.select_for_update()
if self.parent_model is Instance:
ig_obj = get_object_or_400(ig_qs, pk=sub_id)
else:
# similar to get_parent_object, but selected for update
parent_filter = {
self.lookup_field: self.kwargs.get(self.lookup_field, None),
}
ig_obj = get_object_or_404(ig_qs, **parent_filter)
if inst_name in ig_obj.policy_instance_list:
ig_obj.policy_instance_list.pop(ig_obj.policy_instance_list.index(inst_name))
ig_obj.save(update_fields=['policy_instance_list'])
return response
class RelatedJobsPreventDeleteMixin(object):
def perform_destroy(self, obj):
self.check_related_active_jobs(obj)
return super(RelatedJobsPreventDeleteMixin, self).perform_destroy(obj)
def check_related_active_jobs(self, obj):
active_jobs = obj.get_active_jobs()
if len(active_jobs) > 0:
raise ActiveJobConflict(active_jobs)
time_cutoff = now() - dateutil.relativedelta.relativedelta(minutes=1)
recent_jobs = obj._get_related_jobs().filter(finished__gte = time_cutoff)
for unified_job in recent_jobs.get_real_instances():
if not unified_job.event_processing_finished:
raise PermissionDenied(_(
'Related job {} is still processing events.'
).format(unified_job.log_format))
class OrganizationCountsMixin(object):
def get_serializer_context(self, *args, **kwargs):
full_context = super(OrganizationCountsMixin, self).get_serializer_context(*args, **kwargs)
if self.request is None:
return full_context
db_results = {}
org_qs = self.model.accessible_objects(self.request.user, 'read_role')
org_id_list = org_qs.values('id')
if len(org_id_list) == 0:
if self.request.method == 'POST':
full_context['related_field_counts'] = {}
return full_context
inv_qs = Inventory.accessible_objects(self.request.user, 'read_role')
project_qs = Project.accessible_objects(self.request.user, 'read_role')
jt_qs = JobTemplate.accessible_objects(self.request.user, 'read_role')
# Produce counts of Foreign Key relationships
db_results['inventories'] = inv_qs.values('organization').annotate(Count('organization')).order_by('organization')
db_results['teams'] = Team.accessible_objects(
self.request.user, 'read_role').values('organization').annotate(
Count('organization')).order_by('organization')
db_results['job_templates'] = jt_qs.values('organization').annotate(Count('organization')).order_by('organization')
db_results['projects'] = project_qs.values('organization').annotate(Count('organization')).order_by('organization')
# Other members and admins of organization are always viewable
db_results['users'] = org_qs.annotate(
users=Count('member_role__members', distinct=True),
admins=Count('admin_role__members', distinct=True)
).values('id', 'users', 'admins')
count_context = {}
for org in org_id_list:
org_id = org['id']
count_context[org_id] = {
'inventories': 0, 'teams': 0, 'users': 0, 'job_templates': 0,
'admins': 0, 'projects': 0}
for res, count_qs in db_results.items():
if res == 'users':
org_reference = 'id'
else:
org_reference = 'organization'
for entry in count_qs:
org_id = entry[org_reference]
if org_id in count_context:
if res == 'users':
count_context[org_id]['admins'] = entry['admins']
count_context[org_id]['users'] = entry['users']
continue
count_context[org_id][res] = entry['%s__count' % org_reference]
full_context['related_field_counts'] = count_context
return full_context
class ControlledByScmMixin(object):
'''
Special method to reset SCM inventory commit hash
if anything that it manages changes.
'''
def _reset_inv_src_rev(self, obj):
if self.request.method in SAFE_METHODS or not obj:
return
project_following_sources = obj.inventory_sources.filter(
update_on_project_update=True, source='scm')
if project_following_sources:
# Allow inventory changes unrelated to variables
if self.model == Inventory and (
not self.request or not self.request.data or
parse_yaml_or_json(self.request.data.get('variables', '')) == parse_yaml_or_json(obj.variables)):
return
project_following_sources.update(scm_last_revision='')
def get_object(self):
obj = super(ControlledByScmMixin, self).get_object()
self._reset_inv_src_rev(obj)
return obj
def get_parent_object(self):
obj = super(ControlledByScmMixin, self).get_parent_object()
self._reset_inv_src_rev(obj)
return obj
class NoTruncateMixin(object):
def get_serializer_context(self):
context = super().get_serializer_context()
if self.request.query_params.get('no_truncate'):
context.update(no_truncate=True)
return context
|
|
from qtpy.QtWidgets import (
QHBoxLayout,
QVBoxLayout,
QGroupBox,
QPushButton,
QCheckBox,
QLabel,
QGridLayout,
QMessageBox,
)
from qtpy.QtCore import Qt, Slot, Signal, QThreadPool, QRunnable
from .useful_widgets import (
set_tooltip,
SecondaryWindow,
LineEditExtended,
IntValidatorStrict,
DoubleValidatorStrict,
)
import logging
logger = logging.getLogger(__name__)
class WndGeneralFittingSettings(SecondaryWindow):
# Signal that is sent (to main window) to update global state of the program
update_global_state = Signal()
computations_complete = Signal(object)
def __init__(self, *, gpc, gui_vars):
super().__init__()
# Global processing classes
self.gpc = gpc
# Global GUI variables (used for control of GUI state)
self.gui_vars = gui_vars
self._dialog_data = {}
self._validator_int = IntValidatorStrict()
self._validator_float = DoubleValidatorStrict()
# Reference to the main window. The main window will hold
# references to all non-modal windows that could be opened
# from multiple places in the program.
self.ref_main_window = self.gui_vars["ref_main_window"]
self.update_global_state.connect(self.ref_main_window.update_widget_state)
self.initialize()
self._data_changed = False
def initialize(self):
self.setWindowTitle("General Settings for Fitting Algorithm")
self.setMinimumHeight(330)
self.setMinimumWidth(500)
self.resize(650, 330)
self.pb_apply = QPushButton("Apply")
self.pb_apply.setEnabled(False)
self.pb_apply.clicked.connect(self.pb_apply_clicked)
self.pb_cancel = QPushButton("Cancel")
self.pb_cancel.setEnabled(False)
self.pb_cancel.clicked.connect(self.pb_cancel_clicked)
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.pb_apply)
hbox.addWidget(self.pb_cancel)
vbox.addLayout(hbox)
hbox = self._setup_table()
vbox.addLayout(hbox)
vbox.addStretch(1)
self.setLayout(vbox)
self._set_tooltips()
self.update_form_data()
def _setup_table(self):
vbox_left = QVBoxLayout()
# ===== Top-left section of the dialog box =====
self.le_max_iterations = LineEditExtended()
self.le_tolerance_stopping = LineEditExtended()
self.le_escape_ratio = LineEditExtended()
grid = QGridLayout()
grid.addWidget(QLabel("Iterations (max):"), 0, 0)
grid.addWidget(self.le_max_iterations, 0, 1)
grid.addWidget(QLabel("Tolerance (stopping):"), 1, 0)
grid.addWidget(self.le_tolerance_stopping, 1, 1)
grid.addWidget(QLabel("Escape peak ratio:"), 2, 0)
grid.addWidget(self.le_escape_ratio, 2, 1)
self.group_total_spectrum_fitting = QGroupBox("Fitting of Total Spectrum (Model)")
self.group_total_spectrum_fitting.setLayout(grid)
vbox_left.addWidget(self.group_total_spectrum_fitting)
# ===== Bottom-left section of the dialog box =====
# Incident energy and the selected range
self.le_incident_energy = LineEditExtended()
self.le_range_low = LineEditExtended()
self.le_range_high = LineEditExtended()
self.group_energy_range = QGroupBox("Incident Energy and Selected Range")
grid = QGridLayout()
grid.addWidget(QLabel("Incident energy, keV"), 0, 0)
grid.addWidget(self.le_incident_energy, 0, 1)
grid.addWidget(QLabel("Range (low), keV"), 1, 0)
grid.addWidget(self.le_range_low, 1, 1)
grid.addWidget(QLabel("Range (high), keV"), 2, 0)
grid.addWidget(self.le_range_high, 2, 1)
self.group_energy_range.setLayout(grid)
vbox_left.addWidget(self.group_energy_range)
vbox_right = QVBoxLayout()
# ===== Top-right section of the dialog box =====
self.cb_linear_baseline = QCheckBox("Subtract linear baseline")
self.cb_snip_baseline = QCheckBox("Subtract baseline using SNIP")
# This option is not supported. In the future it may be removed
# if not needed or implemented.
self.cb_add_const_to_data = QCheckBox("Add const. bias to data")
self.cb_add_const_to_data.setEnabled(False)
self.lb_add_const_to_data = QLabel("Constant bias:")
self.lb_add_const_to_data.setEnabled(False)
self.le_add_const_to_data = LineEditExtended()
self.le_add_const_to_data.setEnabled(False)
vbox = QVBoxLayout()
vbox.addWidget(self.cb_linear_baseline)
vbox.addWidget(self.cb_snip_baseline)
vbox.addWidget(self.cb_add_const_to_data)
hbox = QHBoxLayout()
hbox.addWidget(self.lb_add_const_to_data)
hbox.addWidget(self.le_add_const_to_data)
vbox.addLayout(hbox)
self.group_pixel_fitting = QGroupBox("Fitting of Single Spectra (XRF Maps)")
self.group_pixel_fitting.setLayout(vbox)
vbox_right.addWidget(self.group_pixel_fitting)
# ===== Bottom-right section of the dialog box =====
self.le_snip_window_size = LineEditExtended()
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel("SNIP window size(*):"))
hbox.addWidget(self.le_snip_window_size)
vbox.addLayout(hbox)
vbox.addWidget(QLabel("*Total spectrum fitting always includes \n SNIP baseline subtraction"))
self.group_all_fitting = QGroupBox("All Fitting")
self.group_all_fitting.setLayout(vbox)
vbox_right.addWidget(self.group_all_fitting)
vbox_right.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(vbox_left)
hbox.addLayout(vbox_right)
self.le_max_iterations.textChanged.connect(self.le_max_iterations_text_changed)
self.le_max_iterations.editingFinished.connect(self.le_max_iterations_editing_finished)
self.le_tolerance_stopping.textChanged.connect(self.le_tolerance_stopping_text_changed)
self.le_tolerance_stopping.editingFinished.connect(self.le_tolerance_stopping_editing_finished)
self.le_escape_ratio.textChanged.connect(self.le_escape_ratio_text_changed)
self.le_escape_ratio.editingFinished.connect(self.le_escape_ratio_editing_finished)
self.le_incident_energy.textChanged.connect(self.le_incident_energy_text_changed)
self.le_incident_energy.editingFinished.connect(self.le_incident_energy_editing_finished)
self.le_range_low.textChanged.connect(self.le_range_low_text_changed)
self.le_range_low.editingFinished.connect(self.le_range_low_editing_finished)
self.le_range_high.textChanged.connect(self.le_range_high_text_changed)
self.le_range_high.editingFinished.connect(self.le_range_high_editing_finished)
self.le_snip_window_size.textChanged.connect(self.le_snip_window_size_text_changed)
self.le_snip_window_size.editingFinished.connect(self.le_snip_window_size_editing_finished)
self.cb_linear_baseline.stateChanged.connect(self.cb_linear_baseline_state_changed)
self.cb_snip_baseline.stateChanged.connect(self.cb_snip_baseline_state_changed)
return hbox
def update_widget_state(self, condition=None):
# Update the state of the menu bar
state = not self.gui_vars["gui_state"]["running_computations"]
self.setEnabled(state)
if condition == "tooltips":
self._set_tooltips()
def _set_tooltips(self):
set_tooltip(self.pb_apply, "Save changes and <b>update plots</b>.")
set_tooltip(self.pb_cancel, "<b>Discard</b> all changes.")
set_tooltip(self.le_max_iterations, "<b>Maximum number of iterations</b> used for total spectrum fitting.")
set_tooltip(self.le_tolerance_stopping, "<b>Tolerance</b> setting for total spectrum fitting.")
set_tooltip(self.le_escape_ratio, "Parameter for total spectrum fitting: <b>escape ration</b>")
set_tooltip(self.le_incident_energy, "<b>Incident energy</b> in keV")
set_tooltip(self.le_range_low, "<b>Lower boundary</b> of the selected range in keV.")
set_tooltip(self.le_range_high, "<b>Upper boundary</b> of the selected range in keV.")
set_tooltip(
self.cb_linear_baseline,
"Subtract baseline as represented as a constant. <b>XRF Map generation</b>. "
"Baseline subtraction is performed as part of NNLS fitting.",
)
set_tooltip(
self.cb_snip_baseline,
"Subtract baseline using SNIP method. <b>XRF Map generation</b>. "
"This is a separate step of processing and can be used together with "
"'linear' baseline subtraction if needed.",
)
set_tooltip(
self.le_snip_window_size,
"Window size for <b>SNIP</b> algorithm. Used both for total spectrum fitting "
"and XRF Map generation. SNIP baseline subtraction is always performed while "
"fitting total spectrum, but its effect may be reduced or eliminated by setting "
"the window size to some large value.",
)
def pb_apply_clicked(self):
"""Save dialog data and update plots"""
self.save_form_data()
def pb_cancel_clicked(self):
"""Reload data (discard all changes)"""
self.update_form_data()
def le_max_iterations_text_changed(self, text):
self._data_changed = True
self._validate_all()
def le_max_iterations_editing_finished(self):
if self._validate_max_interations():
self._dialog_data["max_iterations"] = int(self._read_le_value(self.le_max_iterations))
else:
self._show_max_iterations()
def le_tolerance_stopping_text_changed(self, text):
self._data_changed = True
self._validate_all()
def le_tolerance_stopping_editing_finished(self):
if self._validate_tolerance_stopping():
self._dialog_data["tolerance"] = self._read_le_value(self.le_tolerance_stopping)
else:
self._show_tolerance_stopping()
def le_escape_ratio_text_changed(self, text):
self._data_changed = True
self._validate_all()
def le_escape_ratio_editing_finished(self):
if self._validate_escape_ratio():
self._dialog_data["escape_peak_ratio"] = self._read_le_value(self.le_escape_ratio)
else:
self._show_escape_ratio()
def le_incident_energy_text_changed(self, text):
self._data_changed = True
if self._validate_incident_energy(text):
val = float(text)
val_range_high = val + 0.8
self._show_range_high(val_range_high)
self._validate_all()
def le_incident_energy_editing_finished(self):
if self._validate_incident_energy():
self._dialog_data["incident_energy"] = self._read_le_value(self.le_incident_energy)
else:
self._show_incident_energy()
if self._validate_range():
self._dialog_data["range_low"] = self._read_le_value(self.le_range_low)
self._dialog_data["range_high"] = self._read_le_value(self.le_range_high)
else:
self._show_range_low()
self._show_range_high()
self._validate_all()
def le_range_low_text_changed(self, text):
self._data_changed = True
self._validate_all()
def le_range_low_editing_finished(self):
if self._validate_range():
self._dialog_data["range_low"] = self._read_le_value(self.le_range_low)
self._dialog_data["range_high"] = self._read_le_value(self.le_range_high)
else:
self._show_range_low()
self._show_range_high()
self._validate_all()
def le_range_high_text_changed(self, text):
self._data_changed = True
self._validate_all()
def le_range_high_editing_finished(self):
if self._validate_range():
self._dialog_data["range_low"] = self._read_le_value(self.le_range_low)
self._dialog_data["range_high"] = self._read_le_value(self.le_range_high)
else:
self._show_range_low()
self._show_range_high()
def le_snip_window_size_text_changed(self, text):
self._data_changed = True
self._validate_all()
def le_snip_window_size_editing_finished(self):
if self._validate_snip_window_size():
self._dialog_data["snip_window_size"] = self._read_le_value(self.le_snip_window_size)
else:
self._show_snip_window_size()
def cb_linear_baseline_state_changed(self, state):
self._dialog_data["subtract_baseline_linear"] = state == Qt.Checked
self._data_changed = True
self._validate_all()
def cb_snip_baseline_state_changed(self, state):
self._dialog_data["subtract_baseline_snip"] = state == Qt.Checked
self._data_changed = True
self._validate_all()
def update_form_data(self):
self._dialog_data = self.gpc.get_general_fitting_params()
self._show_all()
self._data_changed = False
self._validate_all()
def save_form_data(self):
if self._data_changed:
def cb(dialog_data):
try:
self.gpc.set_general_fitting_params(dialog_data)
success, msg = True, ""
except Exception as ex:
success, msg = False, str(ex)
return {"success": success, "msg": msg}
self._compute_in_background(cb, self.slot_save_form_data, dialog_data=self._dialog_data)
@Slot(object)
def slot_save_form_data(self, result):
self._recover_after_compute(self.slot_save_form_data)
if not result["success"]:
msg = result["msg"]
msgbox = QMessageBox(
QMessageBox.Critical, "Failed to Apply Fit Parameters", msg, QMessageBox.Ok, parent=self
)
msgbox.exec()
else:
self._data_changed = False
self._validate_all()
self.gui_vars["gui_state"]["state_model_fit_exists"] = False
self.update_global_state.emit()
def _show_all(self):
self._show_max_iterations()
self._show_tolerance_stopping()
self._show_escape_ratio()
self._show_incident_energy()
self._show_range_high()
self._show_range_low()
self._show_linear_baseline()
self._show_snip_baseline()
self._show_snip_window_size()
def _show_max_iterations(self):
val = self._dialog_data["max_iterations"]
self.le_max_iterations.setText(f"{val}")
def _show_tolerance_stopping(self):
val = self._dialog_data["tolerance"]
self.le_tolerance_stopping.setText(self._format_float(val))
def _show_escape_ratio(self):
val = self._dialog_data["escape_peak_ratio"]
self.le_escape_ratio.setText(self._format_float(val))
def _show_incident_energy(self):
val = self._dialog_data["incident_energy"]
self.le_incident_energy.setText(self._format_float(val))
def _show_range_high(self, val=None):
val = self._dialog_data["range_high"] if val is None else val
self.le_range_high.setText(self._format_float(val))
def _show_range_low(self):
val = self._dialog_data["range_low"]
self.le_range_low.setText(self._format_float(val))
def _show_linear_baseline(self):
val = self._dialog_data["subtract_baseline_linear"]
self.cb_linear_baseline.setChecked(Qt.Checked if val else Qt.Unchecked)
def _show_snip_baseline(self):
val = self._dialog_data["subtract_baseline_snip"]
self.cb_snip_baseline.setChecked(Qt.Checked if val else Qt.Unchecked)
def _show_snip_window_size(self):
val = self._dialog_data["snip_window_size"]
self.le_snip_window_size.setText(self._format_float(val))
def _validate_all(self):
valid = (
self._validate_max_interations()
and self._validate_tolerance_stopping()
and self._validate_escape_ratio()
and self._validate_incident_energy()
and self._validate_range()
and self._validate_snip_window_size()
)
self.pb_apply.setEnabled(valid and self._data_changed)
self.pb_cancel.setEnabled(valid and self._data_changed)
def _validate_max_interations(self, text=None):
if text is None:
text = self.le_max_iterations.text()
valid = self._validate_int(text, v_min=1)
self.le_max_iterations.setValid(valid)
return valid
def _validate_tolerance_stopping(self, text=None):
if text is None:
text = self.le_tolerance_stopping.text()
valid = self._validate_float(text, v_min=1e-30)
self.le_tolerance_stopping.setValid(valid)
return valid
def _validate_escape_ratio(self, text=None):
if text is None:
text = self.le_escape_ratio.text()
valid = self._validate_float(text, v_min=0)
self.le_escape_ratio.setValid(valid)
return valid
def _validate_incident_energy(self, text=None):
if text is None:
text = self.le_incident_energy.text()
valid = self._validate_float(text, v_min=0)
self.le_incident_energy.setValid(valid)
return valid
def _validate_range(self, low_text=None, high_text=None):
if low_text is None:
low_text = self.le_range_low.text()
if high_text is None:
high_text = self.le_range_high.text()
valid = False
if self._validate_float(low_text, v_min=0) and self._validate_float(high_text, v_min=0):
v_low = float(low_text)
v_high = float(high_text)
if v_low < v_high:
valid = True
self.le_range_high.setValid(valid)
self.le_range_low.setValid(valid)
return valid
def _validate_snip_window_size(self, text=None):
if text is None:
text = self.le_snip_window_size.text()
valid = self._validate_float(text, v_min=1e-30)
self.le_snip_window_size.setValid(valid)
return valid
def _validate_int(self, text, *, v_min=None, v_max=None):
valid = False
if self._validator_int.validate(text, 0)[0] == IntValidatorStrict.Acceptable:
valid = True
v_int = int(text)
if (v_min is not None) and (v_int < v_min):
valid = False
if (v_max is not None) and (v_int > v_max):
valid = False
return valid
def _validate_float(self, text, *, v_min=None, v_max=None):
valid = False
if self._validator_float.validate(text, 0)[0] == DoubleValidatorStrict.Acceptable:
valid = True
v_float = float(text)
if (v_min is not None) and (v_float < v_min):
valid = False
if (v_max is not None) and (v_float > v_max):
valid = False
return valid
def _format_float(self, val):
return f"{val:.10g}"
def _read_le_value(self, line_edit):
"""It is assumed that the value is validated before the function is called"""
return float(line_edit.text())
def _compute_in_background(self, func, slot, *args, **kwargs):
"""
Run function `func` in a background thread. Send the signal
`self.computations_complete` once computation is finished.
Parameters
----------
func: function
Reference to a function that is supposed to be executed at the background.
The function return value is passed as a signal parameter once computation is
complete.
slot: qtpy.QtCore.Slot or None
Reference to a slot. If not None, then the signal `self.computation_complete`
is connected to this slot.
args, kwargs
arguments of the function `func`.
"""
signal_complete = self.computations_complete
def func_to_run(func, *args, **kwargs):
class RunTask(QRunnable):
def run(self):
result_dict = func(*args, **kwargs)
signal_complete.emit(result_dict)
return RunTask()
if slot is not None:
self.computations_complete.connect(slot)
self.gui_vars["gui_state"]["running_computations"] = True
self.update_global_state.emit()
QThreadPool.globalInstance().start(func_to_run(func, *args, **kwargs))
def _recover_after_compute(self, slot):
"""
The function should be called after the signal `self.computations_complete` is
received. The slot should be the same as the one used when calling
`self.compute_in_background`.
"""
if slot is not None:
self.computations_complete.disconnect(slot)
self.gui_vars["gui_state"]["running_computations"] = False
self.update_global_state.emit()
|
|
slugs = {
'robots.txt' : {'date':'','new_slug':'robots.txt'},
'rss.xml' : {'date':'','new_slug':'rss.xml'},
'tag/appengine' : {'date':'/search/label/','new_slug':'appengine'},
'tag/observations' : {'date':'/search/label/','new_slug':'observations'},
'tag/projects' : {'date':'/search/label/','new_slug':'projects'},
'tag/software' : {'date':'/search/label/','new_slug':'software'},
'tag/programming' : {'date':'/search/label/','new_slug':'programming'},
'tag/madison' : {'date':'/search/label/','new_slug':'madison'},
'tag/family' : {'date':'/search/label/','new_slug':'family'},
'tag/posterous' : {'date':'/search/label/','new_slug':'posterous'},
'tag/exercise' : {'date':'/search/label/','new_slug':'exercise'},
'tag/kids' : {'date':'/search/label/','new_slug':'kids'},
'tag/sharendipity' : {'date':'/search/label/','new_slug':'sharendipity'},
'tag/twilio' : {'date':'/search/label/','new_slug':'twilio'},
'tag/opengov' : {'date':'/search/label/','new_slug':'opengov'},
'tag/funny' : {'date':'/search/label/','new_slug':'funny'},
'tag/google' : {'date':'/search/label/','new_slug':'google'},
'tag/jobs' : {'date':'/search/label/','new_slug':'jobs'},
'tag/Image' : {'date':'/search/label/','new_slug':'Image'},
'tag/startups' : {'date':'/search/label/','new_slug':'startups'},
'tag/design' : {'date':'/search/label/','new_slug':'design'},
'tag/gov2.0' : {'date':'/search/label/','new_slug':'gov2.0'},
'tag/creative' : {'date':'/search/label/','new_slug':'creative'},
'tag/travel' : {'date':'/search/label/','new_slug':'travel'},
'tag/quantified-self' : {'date':'/search/label/','new_slug':'quantified-self'},
'tag/quantifiedself' : {'date':'/search/label/','new_slug':'quantified-self'},
'tag/ringerous' : {'date':'/search/label/','new_slug':'ringerous'},
'tag/sports' : {'date':'/search/label/','new_slug':'sports'},
'tag/baseball' : {'date':'/search/label/','new_slug':'baseball'},
'tag/books' : {'date':'/search/label/','new_slug':'books'},
'tag/gov20' : {'date':'/search/label/','new_slug':'gov20'},
'tag/redsox' : {'date':'/search/label/','new_slug':'redsox'},
'cancer-stole-one-of-the-good-ones-today' : {'date':'/2012/07/','new_slug':'cancer-stole-one-of-good-ones-today.html'},
'56599543' : {'date':'/2011/09/','new_slug':'madison-transit-api-homebrew.html'},
'spam-blockers-and-colors' : {'date':'/2012/02/','new_slug':''},
'adding-parking-to-the-madison-api-homebrew' : {'date':'/2012/01/','new_slug':'adding-parking-to-madison-api-homebrew.html'},
'its-been-two-months-since-i-canceled-by-data' : {'date':'/2011/11/','new_slug':'it-been-two-months-since-i-canceled-by.html'},
'revisiting-google-app-engines-pricing-changes' : {'date':'/2011/11/','new_slug':'revisiting-google-app-engine-pricing.html'},
'i-just-canceled-my-data-plan' : {'date':'/2011/10/','new_slug':''},
'twilio-google-docs-meets-madcamp' : {'date':'/2011/09/','new_slug':''},
'madison-transit-api-snags-a-giant-new-user' : {'date':'/2011/09/','new_slug':'madison-transit-api-snags-giant-new-user.html'},
'app-engines-place-as-a-developer-playground' : {'date':'/2011/09/','new_slug':'app-engine-place-as-developer-playground.html'},
'who-needs-a-phone-the-magic-of-the-twilio-cli' : {'date':'/2011/08/','new_slug':'who-needs-phone-magic-of-twilio-client.html'},
'conans-commencement-speech-is-really-for-us-o' : {'date':'/2011/06/','new_slug':'conan-commencement-speech-is-really-for.html'},
'my-tedx-talk-on-civic-entrepreneurship' : {'date':'/2011/05/','new_slug':''},
'do-you-pave-roads-are-construct-walls' : {'date':'/2011/03/','new_slug':'do-you-pave-roads-or-construct-walls.html'},
'smsmybus-by-the-numbers-a-recap-of-the-first' : {'date':'/2011/02/','new_slug':'smsmybus-by-numbers-recap-of-first-year.html'},
'imagine-a-world-where-social-media-was-used-t' : {'date':'/2011/02/','new_slug':'imagine-world-where-social-media-was.html'},
'civic-entrepreneurship-the-pecha-kucha-talk-c' : {'date':'/2011/02/','new_slug':'civic-entrepreneurship-pecha-kucha-talk.html'},
'this-is-how-you-dont-give-someone-feedback' : {'date':'/2011/02/','new_slug':'this-is-how-you-don-give-someone.html'},
'launched-2010' : {'date':'/2011/01/','new_slug':''},
'quantifying-myself-a-2010-recap' : {'date':'/2010/12/','new_slug':'quantifying-myself-2010-recap.html'},
'boing-boings-startup-advice' : {'date':'/2010/12/','new_slug':'boing-boing-startup-advice.html'},
'civic-entrepreneurship-a-pecha-kucha-talk' : {'date':'/2010/11/','new_slug':'civic-entrepreneurship-pechakucha-talk.html'},
'hacking-education-with-twilio-powered-spellin' : {'date':'/2010/11/','new_slug':'hacking-education-with-twilio-powered.html'},
'pseudo-synchronous-voice-transcription-in-the' : {'date':'/2010/09/','new_slug':'pseudo-synchronous-voice-transcription.html'},
'proud-to-be-living-in-madison-a-barcampmadiso' : {'date':'/2010/09/','new_slug':'proud-to-be-living-in-madison.html'},
'rounding-the-hog-island-lighthouse-in-a-speed' : {'date':'/2010/08/','new_slug':'rounding-hog-island-lighthouse-in-speedo.html'},
'barcampmadison-is-coming-soon' : {'date':'/2010/08/','new_slug':''},
'adventures-in-performance-tuning-on-google-ap' : {'date':'/2010/08/','new_slug':'adventures-in-performance-tuning-on.html'},
'ive-seen-that-picture-before' : {'date':'/2010/08/','new_slug':'i-seen-that-picture-before.html'},
'debugging-your-app-remotely-via-twilio-sms' : {'date':'/2010/08/','new_slug':'debugging-your-app-remotely-via-twilio.html'},
'marathon-training-checkin-july-2010' : {'date':'/2010/07/','new_slug':''},
'i-was-interviewed-by-techinmadison-we-talked' : {'date':'/2010/06/','new_slug':'i-was-interviewed-by-techinmadison-we.html'},
'the-world-is-short-on-leaders' : {'date':'/2010/06/','new_slug':''},
'drive-the-surprising-truth-about-what-motivat-7' : {'date':'/2010/05/','new_slug':'drive-surprising-truth-about-what.html'},
'19-things-i-believe-with-help-from-bob-sutton' : {'date':'/2010/05/','new_slug':'19-things-i-believe-with-help-from-bob.html'},
'wonderful-advice-from-alice-waters' : {'date':'/2010/05/','new_slug':''},
'when-pet-projects-grow-up-and-become-somethin' : {'date':'/2010/05/','new_slug':'when-pet-projects-grow-up-and-become.html'},
'its-official-i-now-have-a-marathon-date-on-th' : {'date':'/2010/04/','new_slug':'it-official-i-now-have-marathon-date-on.html'},
'marathon-training-checkin-april-2010' : {'date':'/2010/03/','new_slug':''},
'vegas-baby-57' : {'date':'/2010/03/','new_slug':'vegas-baby.html'},
'my-web608-talk-on-google-app-engine' : {'date':'/2010/03/','new_slug':''},
'hiring-by-guy-kawasaki' : {'date':'/2010/03/','new_slug':''},
'a-bad-day-11' : {'date':'/2010/03/','new_slug':'a-bad-day.html'},
'using-gaes-task-queue-to-break-problems-down' : {'date':'/2010/03/','new_slug':'using-app-engine-task-queue-to-break.html'},
'software-release-humor' : {'date':'/2010/03/','new_slug':''},
'marathon-training-started-this-week' : {'date':'/2010/03/','new_slug':''},
'winter-might-actually-end-no-really-i-have-ev' : {'date':'/2010/03/','new_slug':'winter-might-actually-end-no-really-i.html'},
'the-sun-has-set-3' : {'date':'/2010/02/','new_slug':'the-sun-has-set.html'},
'the-evolution-of-the-web-and-standing-on-the' : {'date':'/2010/02/','new_slug':'the-evolution-of-web-and-standing-on.html'},
'mini-ceoship-advice-from-mark-pincus' : {'date':'/2010/01/','new_slug':''},
'vince-lombardi-was-wrong' : {'date':'/2010/01/','new_slug':''},
'facebooks-identity-crisis-privacy-does-matter' : {'date':'/2010/01/','new_slug':'facebook-identity-crisis-privacy-does.html'},
'kids-building-software-emmatracy' : {'date':'/2010/01/','new_slug':''},
'posterous-api-a-wish-list' : {'date':'/2010/01/','new_slug':'posterous-api-wish-list.html'},
'my-trees-year' : {'date':'/2009/12/','new_slug':'my-tree-year.html'},
'data-loss-2' : {'date':'/2009/12/','new_slug':'data-loss.html'},
'posterous-slideshows-inside-your-facebook-fee' : {'date':'/2009/12/','new_slug':'posterous-slideshows-inside-your.html'},
'posterous-google-translate-narrated-slideshow' : {'date':'/2009/12/','new_slug':'posterous-google-translate-narrated.html'},
'snowed-in-9' : {'date':'/2009/12/','new_slug':'snowed-in.html'},
'five-uses-for-ringerous' : {'date':'/2009/12/','new_slug':''},
'inspiration-by-paul-taylor' : {'date':'/2009/12/','new_slug':''},
'mini-youtube-tvs' : {'date':'/2009/11/','new_slug':''},
'google-app-engine-a-first-timers-experience' : {'date':'/2009/11/','new_slug':'google-app-engine-first-timer-experience.html'},
'fall-rain' : {'date':'/2009/11/','new_slug':''},
'the-ginkgo-mine-field' : {'date':'/2009/11/','new_slug':''},
'congratulations-for-the-ning-appathon-win' : {'date':'/2009/11/','new_slug':'congratulations-on-ning-appathon-win.html'},
'phoning-in-via-ringerous-1' : {'date':'/2009/11/','new_slug':'phoning-in-via-ringerous.html'},
'did-you-save-your-company' : {'date':'/2009/11/','new_slug':''},
'developer-humor' : {'date':'/2009/11/','new_slug':''},
'quantifying-your-message' : {'date':'/2009/11/','new_slug':''},
'signs-of-bird-evolution' : {'date':'/2009/10/','new_slug':''},
'google-calendar-goes-down-a-lesson-in-linguis' : {'date':'/2009/10/','new_slug':'google-calendar-goes-down-lesson-in.html'},
'moo-moo-brown-cow' : {'date':'/2009/10/','new_slug':''},
'hail-damage-in-the-garden' : {'date':'/2009/09/','new_slug':'hail-damage-in-garden.html'},
'deal-of-the-century' : {'date':'/2009/09/','new_slug':'deal-of-century.html'},
'we-can-do-better-2' : {'date':'/2009/09/','new_slug':'we-can-do-better.html'},
'take-your-posterous-blog-anywhere' : {'date':'/2009/09/','new_slug':''},
'reflecting-on-mints-170m-exit' : {'date':'/2009/09/','new_slug':'reflecting-on-mint-170m-exit.html'},
'twitter-is-bookmarking-your-saved-searches' : {'date':'/2009/09/','new_slug':'twitter-is-bookmarking-your-saved.html'},
'an-unintentional-oxymoron' : {'date':'/2009/09/','new_slug':''},
'worst-use-of-technology-ever' : {'date':'/2009/09/','new_slug':''},
'access-should-be-easy' : {'date':'/2009/09/','new_slug':''},
'venture-investor-interview' : {'date':'/2009/09/','new_slug':''},
'posterous-games' : {'date':'/2009/09/','new_slug':''},
'creativity-envy' : {'date':'/2009/08/','new_slug':''},
'kick-the-can' : {'date':'/2009/08/','new_slug':'kick-can.html'},
'google-wave-post-ii' : {'date':'/2009/08/','new_slug':''},
'i-just-got-my-google-wave-account-sweet-i-jus' : {'date':'/2009/08/','new_slug':'i-just-got-my-google-wave-account-sweet.html'},
'birding-hacks-ii-goldfinch' : {'date':'/2009/08/','new_slug':''},
'red-sox-rally-time' : {'date':'/2009/08/','new_slug':''},
'batting-stances-red-sox' : {'date':'/2009/08/','new_slug':''},
'birding-hacks' : {'date':'/2009/08/','new_slug':''},
'youre-all-awesome' : {'date':'/2009/08/','new_slug':'you-all-awesome.html'},
'thimbleberry' : {'date':'/2009/08/','new_slug':''},
'duct-tape-use-134-baby-gate' : {'date':'/2009/08/','new_slug':''},
'what-really-goes-on-when-a-dog-wags-its-tail' : {'date':'/2009/08/','new_slug':'what-really-goes-on-when-dog-wags-its.html'},
'start-reading-the-corner-office' : {'date':'/2009/08/','new_slug':'start-reading-corner-office.html'},
'which-way-do-i-go' : {'date':'/2009/08/','new_slug':''},
'its-all-about-search' : {'date':'/2009/07/','new_slug':'it-all-about-search.html'},
'when-instruction-becomes-ambiguous' : {'date':'/2009/07/','new_slug':''},
'a-plethora-of-buns' : {'date':'/2009/07/','new_slug':''},
'its-always-important-to-know-your-place' : {'date':'/2009/07/','new_slug':'it-always-important-to-know-your-place.html'},
'un-googley-help-interface-using-youtube' : {'date':'/2009/07/','new_slug':''},
'kuroshio-sea-20' : {'date':'/2009/07/','new_slug':'kuroshio-sea.html'},
'bring-smiles-to-unusual-places' : {'date':'/2009/07/','new_slug':''},
'google-ads-status-messages' : {'date':'/2009/07/','new_slug':'google-adds-status-messages.html'},
'google-looking-more-and-more-like-facebook-ev' : {'date':'/2009/07/','new_slug':'google-looking-more-and-more-like.html'},
'more-posterous-goodies' : {'date':'/2009/07/','new_slug':''},
'has-it-really-been-20-years' : {'date':'/2009/07/','new_slug':''},
'spreading-the-posterous-love' : {'date':'/2009/07/','new_slug':'spreading-posterous-love.html'},
'posterous-within-posterous' : {'date':'/2009/07/','new_slug':''},
'powerpoint-doesnt-deserve-this' : {'date':'/2009/07/','new_slug':'powerpoint-doesn-deserve-this.html'},
'dinosaur-spotted-in-the-woods-of-wisconsin' : {'date':'/2009/06/','new_slug':'dinosaur-spotted-in-woods-of-wisconsin.html'},
'eating-watermelon' : {'date':'/2009/06/','new_slug':''},
'googles-query-suggestions' : {'date':'/2009/06/','new_slug':'google-query-suggestions.html'},
'gls-slideshow' : {'date':'/2009/06/','new_slug':''},
'riding-to-work-with-the-mayor-is-a-joy' : {'date':'/2009/06/','new_slug':'riding-to-work-with-mayor-is-joy.html'},
'the-anti-climbing-tree' : {'date':'/2009/06/','new_slug':''},
'sorry-im-late-9' : {'date':'/2009/05/','new_slug':'i-late.html'},
'cookouts-made-easy' : {'date':'/2009/05/','new_slug':''},
'bratfest-logistics' : {'date':'/2009/05/','new_slug':''},
'state-stimulus-plan-in-horticulture' : {'date':'/2009/05/','new_slug':''},
'why-did-the-turkey-cross-the-road' : {'date':'/2009/05/','new_slug':'why-did-turkey-cross-road.html'},
'we-need-scratch-and-sniff-monitors' : {'date':'/2009/05/','new_slug':''},
'the-oddball' : {'date':'/2009/05/','new_slug':''},
'understanding-fuel-efficiency' : {'date':'/2009/05/','new_slug':''},
'kevin-youkilis-had-a-bad-day-and-now-we-are-c' : {'date':'/2009/04/','new_slug':'kevin-youkilis-had-bad-day-and-now-we.html'},
'glass-ceiling' : {'date':'/2009/04/','new_slug':''},
'magnetic-movie-0' : {'date':'/2009/04/','new_slug':'magnetic-movie.html'},
'philips-carousel-commercial-adam-berg-2' : {'date':'/2009/04/','new_slug':'philips-carousel-commercial-adam-berg.html'},
'mini-meatloaf-sandwich' : {'date':'/2009/04/','new_slug':''},
'destruction-is-inevitable' : {'date':'/2009/04/','new_slug':''},
'sibling-cruelty' : {'date':'/2009/04/','new_slug':''},
'little-touches' : {'date':'/2009/04/','new_slug':''},
'my-wife-has-a-great-product' : {'date':'/2009/04/','new_slug':'my-wife-has-great-product.html'},
'being-bucky-trailer' : {'date':'/2009/04/','new_slug':''},
'wireless-piles-of-books-everyone-wins' : {'date':'/2009/04/','new_slug':''},
'a-watchful-eye-over-fenway-par' : {'date':'/2009/03/','new_slug':'a-watchful-eye-over-fenway-park.html'},
'a-hockey-record-that-wont-be-b' : {'date':'/2009/03/','new_slug':'a-hockey-record-that-won-be-broken.html'},
'sleds-emerge-from-december' : {'date':'/2009/03/','new_slug':''},
'siftables-are-high-in-cool' : {'date':'/2009/03/','new_slug':''},
'when-is-summer-going-to-be-her' : {'date':'/2009/03/','new_slug':'when-is-summer-going-to-be-here.html'},
'a-view-from-japan' : {'date':'/2009/03/','new_slug':''},
'animoto-for-education' : {'date':'/2009/03/','new_slug':''},
'we-mail-cheese' : {'date':'/2009/02/','new_slug':''},
'discussion-7' : {'date':'/2009/02/','new_slug':'discussion.html'},
'indefatigable' : {'date':'/2009/02/','new_slug':''},
'disconnecting-from-the-interne' : {'date':'/2009/02/','new_slug':'disconnecting-from-internet.html'},
'ms-liberty-surfaces-on-lake-me' : {'date':'/2009/02/','new_slug':'ms-liberty-surfaces-on-lake-mendota.html'},
'daring-greatly' : {'date':'/2009/02/','new_slug':''},
'gigantic-boot-holes' : {'date':'/2009/02/','new_slug':''},
'humpty-dumpty-siting' : {'date':'/2009/01/','new_slug':''},
'lessons-in-architecture' : {'date':'/2009/01/','new_slug':''},
'running-and-reading-by-will-sm' : {'date':'/2009/01/','new_slug':'running-and-reading-by-will-smith.html'},
'the-art-of-a-presentation' : {'date':'/2009/01/','new_slug':'the-art-of-presentation.html'},
'thank-you-betty' : {'date':'/2009/01/','new_slug':''},
'my-weather-week' : {'date':'/2009/01/','new_slug':''},
'headhunter-emails' : {'date':'/2009/01/','new_slug':''},
'curiously-bad' : {'date':'/2009/01/','new_slug':''},
'birthday-cake-tastes-better' : {'date':'/2009/01/','new_slug':''},
'show-and-tell-surprises' : {'date':'/2008/12/','new_slug':''},
'new-toys-0' : {'date':'/2008/12/','new_slug':'new-toys.html'},
'tools-that-make-things-worse' : {'date':'/2008/12/','new_slug':''},
'hubris-vs-humility-by-seth-god' : {'date':'/2008/12/','new_slug':'hubris-vs-humility-by-seth-godin.html'},
'creator-vs-creation' : {'date':'/2008/12/','new_slug':''},
'theres-cold' : {'date':'/2008/12/','new_slug':'there-cold.html'},
'gingerbread-factory' : {'date':'/2008/12/','new_slug':'gingerbread-house-factory.html'},
'icing-on-a-cake' : {'date':'/2008/12/','new_slug':'icing-on-cake.html'},
'snow-piles' : {'date':'/2008/12/','new_slug':''},
'more-views' : {'date':'/2008/12/','new_slug':'capital-views.html'},
'strong-like-bull' : {'date':'/2008/12/','new_slug':''},
'capital-views' : {'date':'/2008/12/','new_slug':''},
'views-from-the-square' : {'date':'/2008/12/','new_slug':'views-from-square.html'},
'reminders-of-summer' : {'date':'/2008/11/','new_slug':''},
'exhilarating-cedary-aroma' : {'date':'/2008/11/','new_slug':''},
'lessons-from-packerland' : {'date':'/2008/11/','new_slug':''},
'the-first-snowfall' : {'date':'/2008/11/','new_slug':''},
'knuckledheads-on-board' : {'date':'/2008/11/','new_slug':''},
'starting-the-pushup-challenge' : {'date':'/2008/11/','new_slug':'starting-pushup-challenge-again.html'},
'voting-for-the-best-choice' : {'date':'/2008/11/','new_slug':'voting-for-best-choice.html'},
'life-paths-cross' : {'date':'/2008/10/','new_slug':''},
'halloween-for-kids' : {'date':'/2008/10/','new_slug':''},
'dont-park-here' : {'date':'/2008/10/','new_slug':'don-park-here.html'},
'what-am-i' : {'date':'/2008/10/','new_slug':''},
'titoism-in-the-playoffs' : {'date':'/2008/10/','new_slug':'titoism-in-playoffs.html'},
'ethical-wisdom' : {'date':'/2008/10/','new_slug':''},
'lesson-learned-1' : {'date':'/2008/10/','new_slug':'lesson-learned','new_slug':''},
'stat-of-the-day-3' : {'date':'/2008/09/','new_slug':'stat-of-day_30.html'},
'stat-of-the-day-2' : {'date':'/2008/09/','new_slug':'stat-of-day_28.html'},
'tv-drama' : {'date':'/2008/09/','new_slug':''},
'mrs-mallard' : {'date':'/2008/09/','new_slug':''},
'cool-airport-tricks' : {'date':'/2008/09/','new_slug':''},
'new-york-in-under-24-hours' : {'date':'/2008/09/','new_slug':''},
'design-principles-at-google' : {'date':'/2008/09/','new_slug':''},
'a-mobile-opportunity' : {'date':'/2008/09/','new_slug':''},
'stat-of-the-day-1' : {'date':'/2008/09/','new_slug':'stat-of-day.html'},
'october-must-be-near' : {'date':'/2008/09/','new_slug':''},
'more-sharendipity-goodness' : {'date':'/2008/08/','new_slug':''},
'good-things-come-in-small-pack' : {'date':'/2008/08/','new_slug':'good-things-come-in-small-packages.html'},
'learned-something-today' : {'date':'/2008/08/','new_slug':''},
'uw-madison-computer-sciences-r' : {'date':'/2008/08/','new_slug':'uw-madison-computer-sciences-reunion.html'},
'urban-assault-ride-survivor' : {'date':'/2008/08/','new_slug':''},
'100-million-and-counting' : {'date':'/2008/08/','new_slug':''},
'babys-view' : {'date':'/2008/08/','new_slug':'baby-view.html'},
'moving-day-1' : {'date':'/2008/08/','new_slug':'moving-day.html'},
'just-seen-on-tv' : {'date':'/2008/08/','new_slug':''},
'fish-eye-view' : {'date':'/2008/08/','new_slug':''},
'stat-of-the-day' : {'date':'/2008/08/','new_slug':'stat-of-day.html'},
'those-pesky-segways' : {'date':'/2008/08/','new_slug':''},
'why-are-manhole-covers-round' : {'date':'/2008/08/','new_slug':''},
'quote-of-the-day-3' : {'date':'/2008/08/','new_slug':'quote-of-day.html'},
'saw-this-and-laughed' : {'date':'/2008/08/','new_slug':''},
'julio-who' : {'date':'/2008/08/','new_slug':''},
'random-stat-of-the-day' : {'date':'/2008/08/','new_slug':'random-stat-of-day.html'},
'love-that-face' : {'date':'/2008/07/','new_slug':''},
'factoid-of-the-day-1' : {'date':'/2008/07/','new_slug':'factoid-of-day_28.html'},
'old-faces' : {'date':'/2008/07/','new_slug':''},
'red-sox-yankees' : {'date':'/2008/07/','new_slug':''},
'sunken-boat' : {'date':'/2008/07/','new_slug':''},
'swimming-in-narragansett' : {'date':'/2008/07/','new_slug':''},
'random-acts-of-excercise' : {'date':'/2008/07/','new_slug':'random-acts-of-exercise.html'},
'bits-of-paper' : {'date':'/2008/07/','new_slug':''},
'barcamp-madison-finally' : {'date':'/2008/07/','new_slug':''},
'4-days-and-counting' : {'date':'/2008/07/','new_slug':''},
'6-days-and-counting' : {'date':'/2008/07/','new_slug':''},
'7-days-and-counting' : {'date':'/2008/07/','new_slug':''},
'8-days-and-counting' : {'date':'/2008/07/','new_slug':''},
'9-days-and-counting' : {'date':'/2008/07/','new_slug':''},
'manny-being-manny' : {'date':'/2008/07/','new_slug':''},
'10-days-and-counting' : {'date':'/2008/07/','new_slug':''},
'sharendipity-in-the-press' : {'date':'/2008/07/','new_slug':'sharendipity-in-press.html'},
'11-days-and-counting' : {'date':'/2008/07/','new_slug':''},
'12-where-did-you-go-days-and-c' : {'date':'/2008/07/','new_slug':'12-where-did-you-go-days-and-counting.html'},
'solipsistic-sighting' : {'date':'/2008/07/','new_slug':''},
'13-days-and-counting' : {'date':'/2008/07/','new_slug':''},
'backyard-fun' : {'date':'/2008/07/','new_slug':''},
'14-days-and-counting' : {'date':'/2008/07/','new_slug':''},
'oh-my' : {'date':'/2008/07/','new_slug':''},
'the-energy-conundrum-is-really' : {'date':'/2008/07/','new_slug':'the-energy-conundrum-is-really-all.html'},
'factoid-of-the-day' : {'date':'/2008/07/','new_slug':'factoid-of-day.html'},
'if-you-follow-everyone-you-fol' : {'date':'/2008/07/','new_slug':'if-you-follow-everyone-you-follow-no-one.html'},
'from-the-tough-luck-club' : {'date':'/2008/07/','new_slug':'from-tough-luck-club.html'},
}
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the Oppia editor view."""
__author__ = 'sll@google.com (Sean Lip)'
import json
from apps.exploration.models import Exploration
from apps.parameter.models import Parameter
from apps.parameter.models import ParamChange
from apps.state.models import AnswerHandlerInstance
from apps.state.models import Content
from apps.state.models import Rule
from apps.state.models import State
from apps.statistics.models import Statistics
from apps.statistics.models import STATS_ENUMS
from apps.widget.models import InteractiveWidget
from controllers.base import BaseHandler
from controllers.base import require_editor
from controllers.base import require_user
import feconf
import utils
from google.appengine.api import users
EDITOR_MODE = 'editor'
def get_state_for_frontend(state, exploration):
"""Returns a representation of the given state for the frontend."""
state_repr = state.as_dict()
modified_state_dict = state.internals_as_dict(human_readable_dests=True)
# TODO(sll): The following is for backwards-compatibility and should be
# deleted later.
rules = {}
for handler in state_repr['widget']['handlers']:
rules[handler['name']] = handler['rules']
for item in rules[handler['name']]:
if item['name'] == 'Default':
item['rule'] = 'Default'
else:
item['rule'] = InteractiveWidget.get(
state.widget.widget_id).get_readable_name(
handler['name'], item['name']
)
state_repr['widget']['rules'] = rules
state_repr['widget']['id'] = state_repr['widget']['widget_id']
state_repr['widget']['sticky'] = state_repr['widget']['sticky']
state_repr['yaml'] = utils.yaml_from_dict(modified_state_dict)
return state_repr
def get_exploration_stats(exploration):
"""Returns a dict with stats for the given exploration."""
num_visits = Statistics.get_exploration_stats(
STATS_ENUMS.exploration_visited, exploration.id)
num_completions = Statistics.get_exploration_stats(
STATS_ENUMS.exploration_completed, exploration.id)
answers = Statistics.get_exploration_stats(
STATS_ENUMS.rule_hit, exploration.id)
state_counts = Statistics.get_exploration_stats(
STATS_ENUMS.state_hit, exploration.id)
state_stats = {}
for state_id in answers.keys():
state_stats[state_id] = {
'name': answers[state_id]['name'],
'count': state_counts[state_id]['count'],
'rule_stats': {},
}
for rule in answers[state_id]['rules'].keys():
state_stats[state_id]['rule_stats'][rule] = answers[state_id]['rules'][rule]
state_stats[state_id]['rule_stats'][rule]['count'] = 0
for _, count in answers[state_id]['rules'][rule]['answers']:
state_stats[state_id]['rule_stats'][rule]['count'] += count
return {
'num_visits': num_visits,
'num_completions': num_completions,
'state_stats': state_stats,
}
class NewExploration(BaseHandler):
"""Creates a new exploration."""
@require_user
def post(self, user):
"""Handles POST requests."""
payload = json.loads(self.request.get('payload'))
title = payload.get('title')
category = payload.get('category')
if not title:
raise self.InvalidInputException('No title supplied.')
if not category:
raise self.InvalidInputException('No category chosen.')
yaml = self.request.get('yaml')
if yaml and feconf.ALLOW_YAML_FILE_UPLOAD:
exploration = Exploration.create_from_yaml(
yaml_file=yaml, user=user, title=title, category=category)
else:
exploration = Exploration.create(
user, title=title, category=category)
self.response.write(json.dumps({
'explorationId': exploration.id,
}))
class ForkExploration(BaseHandler):
"""Forks an existing exploration."""
@require_user
def post(self, user):
"""Handles POST requests."""
payload = json.loads(self.request.get('payload'))
exploration_id = payload.get('exploration_id')
forked_exploration = Exploration.get(exploration_id)
if not forked_exploration.is_demo_exploration():
raise self.InvalidInputException('Exploration cannot be forked.')
# Get the demo exploration as a YAML file, so that new states can be
# created.
yaml = forked_exploration.as_yaml()
title = 'Copy of %s' % forked_exploration.title
category = forked_exploration.category
exploration = Exploration.create_from_yaml(
yaml_file=yaml, user=user, title=title, category=category)
self.response.write(json.dumps({
'explorationId': exploration.id,
}))
class ExplorationPage(BaseHandler):
"""Page describing a single exploration."""
@require_editor
def get(self, unused_user, unused_exploration):
"""Handles GET requests."""
self.values.update({
'js': utils.get_js_controllers(
['editorExploration', 'editorTree', 'editorGraph',
'guiEditor', 'yamlEditor', 'interactiveWidgetPreview']),
'nav_mode': EDITOR_MODE,
})
self.render_template('editor/editor_exploration.html')
class ExplorationHandler(BaseHandler):
"""Page with editor data for a single exploration."""
@require_editor
def get(self, unused_user, exploration):
"""Gets the question name and state list for a question page."""
state_list = {}
for state_key in exploration.states:
state = state_key.get()
state_list[state.id] = get_state_for_frontend(state, exploration)
parameters = []
for param in exploration.parameters:
parameters.append({'name': param.name, 'type': param.obj_type})
self.values.update({
'exploration_id': exploration.id,
'init_state_id': exploration.init_state.get().id,
'is_public': exploration.is_public,
'image_id': exploration.image_id,
'category': exploration.category,
'title': exploration.title,
'editors': [editor.nickname() for editor in exploration.editors],
'states': state_list,
'parameters': parameters,
})
statistics = get_exploration_stats(exploration)
self.values.update({
'num_visits': statistics['num_visits'],
'num_completions': statistics['num_completions'],
'state_stats': statistics['state_stats'],
})
self.response.write(json.dumps(self.values))
@require_editor
def post(self, unused_user, exploration):
"""Adds a new state to the given exploration."""
payload = json.loads(self.request.get('payload'))
state_name = payload.get('state_name')
if not state_name:
raise self.InvalidInputException('Please specify a state name.')
state = exploration.add_state(state_name)
self.response.write(json.dumps(state.as_dict()))
@require_editor
def put(self, user, exploration):
"""Updates properties of the given exploration."""
payload = json.loads(self.request.get('payload'))
is_public = payload.get('is_public')
category = payload.get('category')
title = payload.get('title')
image_id = payload.get('image_id')
editors = payload.get('editors')
parameters = payload.get('parameters')
if is_public:
exploration.is_public = True
if category:
exploration.category = category
if title:
exploration.title = title
if 'image_id' in payload:
exploration.image_id = None if image_id == 'null' else image_id
if editors:
if exploration.editors and user == exploration.editors[0]:
exploration.editors = []
for email in editors:
editor = users.User(email=email)
exploration.editors.append(editor)
else:
raise self.UnauthorizedUserException(
'Only the exploration owner can add new collaborators.')
if parameters:
exploration.parameters = [
Parameter(name=item['name'], obj_type=item['type'])
for item in parameters
]
exploration.put()
@require_editor
def delete(self, unused_user, exploration):
"""Deletes the given exploration."""
exploration.delete()
class ExplorationDownloadHandler(BaseHandler):
"""Downloads an exploration as a YAML file."""
@require_editor
def get(self, unused_user, exploration):
"""Handles GET requests."""
filename = 'oppia-%s' % utils.to_ascii(exploration.title)
if not filename:
filename = feconf.DEFAULT_FILE_NAME
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Content-Disposition'] = (
'attachment; filename=%s.txt' % filename)
# TODO(sll): Cache the YAML file.
self.response.write(exploration.as_yaml())
class StateHandler(BaseHandler):
"""Handles state transactions."""
@require_editor
def put(self, unused_user, exploration, state):
"""Saves updates to a state."""
payload = json.loads(self.request.get('payload'))
yaml_file = payload.get('yaml_file')
if yaml_file and feconf.ALLOW_YAML_FILE_UPLOAD:
# The user has uploaded a YAML file. Process only this action.
state = State.modify_using_dict(
exploration, state,
utils.dict_from_yaml(yaml_file))
self.response.write(json.dumps(
get_state_for_frontend(state, exploration)))
return
state_name = payload.get('state_name')
param_changes = payload.get('param_changes')
interactive_widget = payload.get('interactive_widget')
interactive_params = payload.get('interactive_params')
interactive_rulesets = payload.get('interactive_rulesets')
sticky_interactive_widget = payload.get('sticky_interactive_widget')
content = payload.get('content')
unresolved_answers = payload.get('unresolved_answers')
if 'state_name' in payload:
# Replace the state name with this one, after checking validity.
if state_name == feconf.END_DEST:
raise self.InvalidInputException('Invalid state name: END')
exploration.rename_state(state, state_name)
if 'param_changes' in payload:
state.param_changes = [
ParamChange(
name=param_change['name'], values=param_change['values'],
obj_type='UnicodeString'
) for param_change in param_changes
]
if interactive_widget:
state.widget.widget_id = interactive_widget
if interactive_params:
state.widget.params = interactive_params
if sticky_interactive_widget is not None:
state.widget.sticky = sticky_interactive_widget
if interactive_rulesets:
ruleset = interactive_rulesets['submit']
utils.recursively_remove_key(ruleset, u'$$hashKey')
state.widget.handlers = [AnswerHandlerInstance(
name='submit', rules=[])]
# This is part of the state. The rules should be put into it.
state_ruleset = state.widget.handlers[0].rules
# TODO(yanamal): Do additional calculations here to get the
# parameter changes, if necessary.
for rule_ind in range(len(ruleset)):
rule = ruleset[rule_ind]
state_rule = Rule()
state_rule.name = rule.get('name')
state_rule.inputs = rule.get('inputs')
state_rule.dest = rule.get('dest')
state_rule.feedback = rule.get('feedback')
# Generate the code to be executed.
if rule['rule'] == 'Default':
# This is the default rule.
assert rule_ind == len(ruleset) - 1
state_rule.name = 'Default'
state_ruleset.append(state_rule)
continue
# Normalize the params here, then store them.
classifier_func = state_rule.name.replace(' ', '')
first_bracket = classifier_func.find('(')
mutable_rule = rule['rule']
params = classifier_func[first_bracket + 1: -1].split(',')
for index, param in enumerate(params):
if param not in rule['inputs']:
raise self.InvalidInputException(
'Parameter %s could not be replaced.' % param)
typed_object = state.get_typed_object(mutable_rule, param)
# TODO(sll): Make the following check more robust.
if (not isinstance(rule['inputs'][param], basestring) or
'{{' not in rule['inputs'][param] or
'}}' not in rule['inputs'][param]):
normalized_param = typed_object.normalize(
rule['inputs'][param])
else:
normalized_param = rule['inputs'][param]
if normalized_param is None:
raise self.InvalidInputException(
'%s has the wrong type. Please replace it with a '
'%s.' % (rule['inputs'][param],
typed_object.__name__))
state_rule.inputs[param] = normalized_param
state_ruleset.append(state_rule)
if content:
state.content = [Content(type=item['type'], value=item['value'])
for item in content]
if 'unresolved_answers' in payload:
state.unresolved_answers = {}
for answer, count in unresolved_answers.iteritems():
if count > 0:
state.unresolved_answers[answer] = count
state.put()
self.response.write(json.dumps(
get_state_for_frontend(state, exploration)))
@require_editor
def delete(self, unused_user, exploration, state):
"""Deletes the state with id state_id."""
# Do not allow deletion of initial states.
if exploration.init_state == state.key:
raise self.InvalidInputException(
'Cannot delete initial state of an exploration.')
# Find all dests in this exploration which equal the state to be
# deleted, and change them to loop back to their containing state.
for state_key in exploration.states:
origin_state = state_key.get()
changed = False
for handler in origin_state.widget.handlers:
for rule in handler.rules:
if rule.dest == state.id:
rule.dest = origin_state.id
changed = True
if changed:
origin_state.put()
# Delete the state with id state_id.
state.key.delete()
exploration.states.remove(state.key)
exploration.put()
|
|
#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
import requests
from networking_l2gw.services.l2gateway.common import constants as l2gw_const
from networking_sfc.extensions import flowclassifier as fc_const
from networking_sfc.extensions import sfc as sfc_const
from neutron_lib.api.definitions import bgpvpn as bgpvpn_const
from neutron_lib.callbacks import resources
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from networking_odl.bgpvpn import odl_v2 as bgpvpn_driver
from networking_odl.common import constants as odl_const
from networking_odl.common import exceptions
from networking_odl.db import db
from networking_odl.journal import base_driver
from networking_odl.journal import full_sync
from networking_odl.journal import journal
from networking_odl.l2gateway import driver_v2 as l2gw_driver
from networking_odl.l3 import l3_odl_v2
from networking_odl.ml2 import mech_driver_v2
from networking_odl.qos import qos_driver_v2 as qos_driver
from networking_odl.sfc.flowclassifier import sfc_flowclassifier_v2
from networking_odl.sfc import sfc_driver_v2 as sfc_driver
from networking_odl.tests import base
from networking_odl.tests.unit.journal import helper
from networking_odl.tests.unit import test_base_db
from networking_odl.trunk import trunk_driver_v2 as trunk_driver
class FullSyncTestCase(test_base_db.ODLBaseDbTestCase):
def setUp(self):
self.useFixture(
base.OpenDaylightRestClientGlobalFixture(full_sync._CLIENT))
super(FullSyncTestCase, self).setUp()
self._CLIENT = full_sync._CLIENT.get_client()
self.addCleanup(full_sync.FULL_SYNC_RESOURCES.clear)
# NOTE(rajivk) workaround, Fixture defined are executed after complete
# tests cases, but cleanup is needed after each test case.
self.addCleanup(self._clean_registered_plugins)
def _clean_registered_plugins(self):
for plugin_type in self._get_all_plugins().keys():
directory.add_plugin(plugin_type, None)
def test_no_full_sync_when_canary_exists(self):
full_sync.full_sync(self.db_context)
self.assertEqual([], db.get_all_db_rows(self.db_context))
def _filter_out_canary(self, rows):
return [row for row in rows if row['object_uuid'] !=
full_sync._CANARY_NETWORK_ID]
def _mock_l2_resources(self):
expected_journal = {odl_const.ODL_NETWORK: '1',
odl_const.ODL_SUBNET: '2',
odl_const.ODL_PORT: '3'}
network_id = expected_journal[odl_const.ODL_NETWORK]
plugin = mock.Mock()
plugin.get_networks.return_value = [{'id': network_id}]
plugin.get_subnets.return_value = [
{'id': expected_journal[odl_const.ODL_SUBNET],
'network_id': network_id}]
port = {'id': expected_journal[odl_const.ODL_PORT],
odl_const.ODL_SGS: None,
'tenant_id': '123',
'fixed_ips': [],
'network_id': network_id}
plugin.get_ports.side_effect = ([port], [])
directory.add_plugin(constants.CORE, plugin)
return expected_journal
def _test_no_full_sync_when_canary_in_journal(self, state):
self._mock_canary_missing()
self._mock_l2_resources()
db.create_pending_row(self.db_context, odl_const.ODL_NETWORK,
full_sync._CANARY_NETWORK_ID,
odl_const.ODL_CREATE, {})
row = db.get_all_db_rows(self.db_context)[0]
db.update_db_row_state(self.db_context, row, state)
full_sync.full_sync(self.db_context)
rows = db.get_all_db_rows(self.db_context)
self.assertEqual([], self._filter_out_canary(rows))
def test_no_full_sync_when_canary_pending_creation(self):
self._test_no_full_sync_when_canary_in_journal(odl_const.PENDING)
def test_no_full_sync_when_canary_is_processing(self):
self._test_no_full_sync_when_canary_in_journal(odl_const.PROCESSING)
@staticmethod
def _get_all_resources():
return (
(odl_const.ODL_SG, constants.CORE),
(odl_const.ODL_SG_RULE, constants.CORE),
(odl_const.ODL_NETWORK, constants.CORE),
(odl_const.ODL_SUBNET, constants.CORE),
(odl_const.ODL_ROUTER, constants.L3),
(odl_const.ODL_PORT, constants.CORE),
(odl_const.ODL_FLOATINGIP, constants.L3),
(odl_const.ODL_QOS_POLICY, constants.QOS),
(odl_const.ODL_TRUNK, resources.TRUNK),
(odl_const.ODL_BGPVPN, bgpvpn_const.ALIAS),
(odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION, bgpvpn_const.ALIAS),
(odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION, bgpvpn_const.ALIAS),
(odl_const.ODL_SFC_FLOW_CLASSIFIER, fc_const.FLOW_CLASSIFIER_EXT),
(odl_const.ODL_SFC_PORT_PAIR, sfc_const.SFC_EXT),
(odl_const.ODL_SFC_PORT_PAIR_GROUP, sfc_const.SFC_EXT),
(odl_const.ODL_SFC_PORT_CHAIN, sfc_const.SFC_EXT),
(odl_const.ODL_L2GATEWAY, l2gw_const.L2GW),
(odl_const.ODL_L2GATEWAY_CONNECTION, l2gw_const.L2GW))
@mock.patch.object(db, 'delete_pending_rows')
@mock.patch.object(full_sync, '_full_sync_needed')
@mock.patch.object(full_sync, '_sync_resources')
@mock.patch.object(journal, 'record')
def test_sync_resource_order(
self, record_mock, _sync_resources_mock, _full_sync_needed_mock,
delete_pending_rows_mock):
all_resources = self._get_all_resources()
full_sync.FULL_SYNC_RESOURCES = {resource_type: mock.Mock()
for resource_type, _ in all_resources}
_full_sync_needed_mock._full_sync_needed.return_value = True
context = mock.MagicMock()
full_sync.full_sync(context)
_sync_resources_mock.assert_has_calls(
[mock.call(mock.ANY, object_type, mock.ANY)
for object_type, _ in all_resources])
def test_client_error_propagates(self):
class TestException(Exception):
def __init__(self):
pass
self._CLIENT.get.side_effect = TestException()
self.assertRaises(TestException, full_sync.full_sync, self.db_context)
def _mock_canary_missing(self):
get_return = mock.MagicMock()
get_return.status_code = requests.codes.not_found
self._CLIENT.get.return_value = get_return
def _assert_canary_created(self):
rows = db.get_all_db_rows(self.db_context)
self.assertTrue(any(r['object_uuid'] == full_sync._CANARY_NETWORK_ID
for r in rows))
return rows
def _test_full_sync_resources(self, expected_journal):
self._mock_canary_missing()
directory.add_plugin(constants.CORE, mock.Mock())
full_sync.full_sync(self.db_context)
rows = self._assert_canary_created()
rows = self._filter_out_canary(rows)
self.assertCountEqual(expected_journal.keys(),
[row['object_type'] for row in rows])
for row in rows:
self.assertEqual(expected_journal[row['object_type']],
row['object_uuid'])
def test_full_sync_removes_pending_rows(self):
db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, "uuid",
odl_const.ODL_CREATE, {'foo': 'bar'})
self._test_full_sync_resources({})
def test_full_sync_no_resources(self):
self._test_full_sync_resources({})
@staticmethod
def _get_mocked_security_groups(context):
return [{'description': 'description',
'security_group_rules': ['security_grp_rules'],
'id': 'test_uuid', 'name': 'default'}]
@staticmethod
def _get_mocked_security_group_rules(context):
return [{'direction': 'egress', 'protocol': None,
'description': 'description', 'port_range_max': None,
'id': 'test_uuid', 'security_group_id': 'test_uuid'}]
@staticmethod
def _get_mocked_networks(context):
return [{'id': 'test_uuid', 'project_id': 'project_id',
'status': 'ACTIVE', 'subnets': [], 'description': '',
'name': 'network0'}]
@staticmethod
def _get_mocked_subnets(context):
return [{'description': '', 'cidr': 'test-cidr', 'id': 'test_uuid',
'name': 'test-subnet', 'network_id': 'test_uuid',
'gateway_ip': 'gateway_ip'}]
@staticmethod
def _get_mocked_routers(context):
return [{'status': 'ACTIVE', 'description': '', 'name': 'router1',
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_ports(context):
return [{'status': 'DOWN', 'description': None, 'id': 'test_uuid',
'name': 'loadbalancer-27', 'network_id': 'test_uuid',
'mac_address': 'fa:16:3e:69:4e:33'}]
@staticmethod
def _get_mocked_loadbalancers(context):
return [{'description': '', 'tenant_id': 'tenant_id',
'vip_subnet_id': 'subnet_id', 'listeners': [],
'vip_address': '10.1.0.11', 'vip_port_id': 'port_id',
'pools': [], 'id': 'test_uuid', 'name': 'test-lb'}]
@staticmethod
def _get_mocked_listeners(context):
return [{'admin_state_up': True, 'project_id': 'test_uuid',
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_trunks(context):
return [{'routers': [], 'id': 'test_uuid', 'name': '',
'tenant_id': 'project_id', 'networks': [], 'route_targets': [
'64512:1'], 'project_id': 'project_id', 'type': 'l3'},
{'routers': [], 'id': 'test_uuid', 'name': '',
'tenant_id': 'tenant_id', 'networks': [], 'route_targets': [
'64512:1'], 'project_id': 'project_id', 'type': 'l3'}]
@staticmethod
def _get_mocked_bgpvpns(context):
return [{'network_id': 'test_uuid', 'bgpvpn_id': 'test_uuid',
'project_id': 'test_uuid', 'id': 'test_uuid'}]
@staticmethod
def _get_mocked_l2_gateways(context):
return [{'tenant_id': 'test_tenant_id', 'id': 'test_uuid',
'devices': [{'interfaces': [{'name': 'eth3'}],
'id': 'test_uuid', 'device_name': 'vtep0'}],
'name': 'test-gateway'}]
@staticmethod
def _get_mocked_l2_gateway_connections(context):
return [{'network_id': 'test_uuid', 'tenant_id': 'test_uuid',
'l2_gateway_id': 'test_uuid', 'id': 'test_uuid'}]
@staticmethod
def _get_mocked_pools(context):
return [{'name': 'pool1', 'admin_state_up': True,
'project_id': 'test_uuid', 'id': 'test_uuid'}]
@staticmethod
def _get_mocked_pool_members(context, pool_id):
return [{'name': 'pool1', 'admin_state_up': True,
'project_id': 'test_uuid', 'id': 'test_uuid'}]
@staticmethod
def _get_mocked_healthmonitors(context):
return [{'type': 'HTTP', 'admin_state_up': True,
'project_id': 'test_uuid', 'id': 'test_uuid',
'name': 'monitor1'}]
@staticmethod
def _get_mocked_listener(context):
return [{'admin_state_up': True, 'project_id': 'test_uuid',
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_floatingips(context):
return [{'floating_network_id': 'test_uuid', 'tenant_id': 'test_uuid',
'dns_name': '', 'dns_domain': '', 'id': 'test_uuid'}]
@staticmethod
def _get_mocked_policies(context):
return [{'id': 'test_uuid', 'project_id': 'test_uuid',
'name': 'test-policy', 'description': 'Policy description',
'shared': True, 'is_default': False}]
@staticmethod
def _get_mocked_bgpvpn_network_associations(context, bgpvpn_id):
return [{'network_id': 'test_uuid', 'tenant_id': 'test_uuid',
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_bgpvpn_router_associations(context, bgpvpn_id):
return [{'router_id': 'test_uuid', 'tenant_id': 'test_uuid',
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_port_chains(context):
tenant_id = 'test_uuid'
return [{'tenant_id': tenant_id, 'project_id': tenant_id,
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_port_pair_groups(context):
tenant_id = 'test_uuid'
return [{'tenant_id': tenant_id, 'project_id': tenant_id,
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_port_pairs(context):
tenant_id = 'test_uuid'
return [{'tenant_id': tenant_id, 'project_id': tenant_id,
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_flowclassifiers(context):
tenant_id = 'test_uuid'
return [{'tenant_id': tenant_id, 'project_id': tenant_id,
'id': 'test_uuid'}]
@staticmethod
def _get_all_plugins():
return {
constants.CORE: (mock.Mock(),
mech_driver_v2.OpenDaylightMechanismDriver),
constants.L3: (mock.Mock(), l3_odl_v2.OpenDaylightL3RouterPlugin),
resources.TRUNK: (mock.Mock(),
trunk_driver.OpenDaylightTrunkHandlerV2),
constants.QOS: (mock.Mock(), qos_driver.OpenDaylightQosDriver),
sfc_const.SFC_EXT: (mock.Mock(),
sfc_driver.OpenDaylightSFCDriverV2),
bgpvpn_const.ALIAS: (mock.Mock(),
bgpvpn_driver.OpenDaylightBgpvpnDriver),
fc_const.FLOW_CLASSIFIER_EXT: (
mock.Mock(),
sfc_flowclassifier_v2.OpenDaylightSFCFlowClassifierDriverV2),
l2gw_const.L2GW: (mock.Mock(), l2gw_driver.OpenDaylightL2gwDriver)
}
@staticmethod
def _get_name(resource_type):
mapping = {
odl_const.ODL_QOS_POLICY: odl_const.ODL_QOS_POLICIES,
odl_const.ODL_SFC_PORT_PAIR:
odl_const.NETWORKING_SFC_FLOW_CLASSIFIERS,
odl_const.ODL_SFC_PORT_PAIR:
odl_const.NETWORKING_SFC_PORT_PAIRS,
odl_const.ODL_SFC_PORT_PAIR_GROUP:
odl_const.NETWORKING_SFC_PORT_PAIR_GROUPS,
odl_const.ODL_SFC_PORT_CHAIN: odl_const.NETWORKING_SFC_PORT_CHAINS,
odl_const.ODL_L2GATEWAY_CONNECTION:
odl_const.ODL_L2GATEWAY_CONNECTIONS}
return ('_get_mocked_%s' % mapping.get(
resource_type, resource_type + 's'))
def _add_side_effect(self):
plugins = self._get_all_plugins()
resources = self._get_all_resources()
for resource_type, plugin_name in resources:
name = self._get_name(resource_type)
setattr(plugins[plugin_name][0], "get_%s" % name[12:],
getattr(self, name))
if directory.get_plugin(plugin_name) is None:
directory.add_plugin(plugin_name, plugins[plugin_name][0])
@mock.patch.object(journal, 'record')
def _test_sync_resources(self, object_type, plugin_type, mocked_record):
plugins = self._get_all_plugins()
driver = plugins[plugin_type][1]
args = [mock.Mock()]
if object_type in [odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION,
odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION]:
args.append(mock.Mock())
resources = getattr(self, self._get_name(object_type))(*args)
context = mock.Mock()
def _test_get_default_handler(context, resource_type,
plugin_type=plugin_type):
resource_type = self._get_name(resource_type)[12:]
return full_sync.get_resources(context, plugin_type=plugin_type,
resource_type=resource_type)
handler = getattr(driver, 'get_resources', _test_get_default_handler)
full_sync._sync_resources(context, object_type, handler)
mocked_record.assert_has_calls(
[mock.call(context, object_type, resource['id'],
odl_const.ODL_CREATE,
resource) for resource in resources])
def test_sync_all_resources(self):
self._add_side_effect()
resources = self._get_all_resources()
for obj_type, plugin_name in resources:
self._test_sync_resources(obj_type, plugin_name)
def test_full_sync_retries_exceptions(self):
with mock.patch.object(full_sync, '_full_sync_needed') as m:
self._test_retry_exceptions(full_sync.full_sync, m)
def test_object_not_registered(self):
self.assertRaises(exceptions.ResourceNotRegistered,
full_sync.sync_resources,
self.db_context,
'test-object-type')
self.assertEqual([], db.get_all_db_rows(self.db_context))
def _register_resources(self):
helper.TestDriver()
self.addCleanup(base_driver.ALL_RESOURCES.clear)
def add_plugin(self, plugin_type, plugin):
directory.add_plugin(plugin_type, plugin)
def test_plugin_not_registered(self):
self._register_resources()
# NOTE(rajivk): workaround, as we don't have delete method for plugin
plugin = directory.get_plugin(helper.TEST_PLUGIN)
directory.add_plugin(helper.TEST_PLUGIN, None)
self.addCleanup(self.add_plugin, helper.TEST_PLUGIN, plugin)
self.assertRaises(exceptions.PluginMethodNotFound,
full_sync.sync_resources,
self.db_context,
helper.TEST_RESOURCE1)
self.assertEqual([], db.get_all_db_rows(self.db_context))
def test_sync_resources(self):
self._register_resources()
plugin = helper.TestPlugin()
self.add_plugin(helper.TEST_PLUGIN, plugin)
resources = plugin.get_test_resource1s(self.db_context)
full_sync.sync_resources(self.db_context,
helper.TEST_RESOURCE1)
entries = [entry.data for entry in db.get_all_db_rows(self.db_context)]
for resource in resources:
self.assertIn(resource, entries)
self.assertEqual(len(resources), len(entries))
@mock.patch.object(base_driver.ResourceBaseDriver,
'get_resources_for_full_sync')
def test_get_resources_failed(self, mock_get_resources):
self._register_resources()
mock_get_resources.side_effect = exceptions.UnsupportedResourceType()
resource_name = helper.TEST_RESOURCE1
self.assertRaises(exceptions.UnsupportedResourceType,
full_sync.sync_resources, self.db_context,
resource_name)
mock_get_resources.assert_called_once_with(self.db_context,
resource_name)
self.assertEqual([], db.get_all_db_rows(self.db_context))
|
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from pathlib import Path
from typing import Any, Dict, Optional, Union
import hjson # type: ignore
from reggen.lib import check_int, check_keys, check_list, check_name, check_str
from reggen.params import BaseParam, Params
class TemplateParseError(Exception):
pass
class TemplateParameter(BaseParam):
""" A template parameter. """
VALID_PARAM_TYPES = (
'int',
'string',
'object',
)
def __init__(self, name: str, desc: Optional[str], param_type: str,
default: str):
assert param_type in self.VALID_PARAM_TYPES
super().__init__(name, desc, param_type)
self.default = default
self.value = None
def as_dict(self) -> Dict[str, object]:
rd = super().as_dict()
rd['default'] = self.default
return rd
def _parse_template_parameter(where: str, raw: object) -> TemplateParameter:
rd = check_keys(raw, where, ['name', 'desc', 'type'], ['default'])
name = check_str(rd['name'], 'name field of ' + where)
r_desc = rd.get('desc')
if r_desc is None:
desc = None
else:
desc = check_str(r_desc, 'desc field of ' + where)
r_type = rd.get('type')
param_type = check_str(r_type, 'type field of ' + where)
if param_type not in TemplateParameter.VALID_PARAM_TYPES:
raise ValueError('At {}, the {} param has an invalid type field {!r}. '
'Allowed values are: {}.'.format(
where, name, param_type,
', '.join(TemplateParameter.VALID_PARAM_TYPES)))
r_default = rd.get('default')
if param_type == 'int':
default = check_int(
r_default,
'default field of {}, (an integer parameter)'.format(name))
elif param_type == 'string':
default = check_str(r_default, 'default field of ' + where)
elif param_type == 'object':
default = IpConfig._check_object(r_default, 'default field of ' + where)
else:
assert False, f"Unknown parameter type found: {param_type!r}"
return TemplateParameter(name, desc, param_type, default)
class TemplateParams(Params):
""" A group of template parameters. """
@classmethod
def from_raw(cls, where: str, raw: object) -> 'TemplateParams':
""" Produce a TemplateParams instance from an object as it is in Hjson.
"""
ret = cls()
rl = check_list(raw, where)
for idx, r_param in enumerate(rl):
entry_where = 'entry {} in {}'.format(idx + 1, where)
param = _parse_template_parameter(entry_where, r_param)
if param.name in ret:
raise ValueError('At {}, found a duplicate parameter with '
'name {}.'.format(entry_where, param.name))
ret.add(param)
return ret
class IpTemplate:
""" An IP template.
An IP template is an IP block which needs to be parametrized before it
can be transformed into an actual IP block (which can then be instantiated
in a hardware design).
"""
name: str
params: TemplateParams
template_path: Path
def __init__(self, name: str, params: TemplateParams, template_path: Path):
self.name = name
self.params = params
self.template_path = template_path
@classmethod
def from_template_path(cls, template_path: Path) -> 'IpTemplate':
""" Create an IpTemplate from a template directory.
An IP template directory has a well-defined structure:
- The IP template name (TEMPLATE_NAME) is equal to the directory name.
- It contains a file 'data/TEMPLATE_NAME.tpldesc.hjson' containing all
configuration information related to the template.
- It contains zero or more files ending in '.tpl'. These files are
Mako templates and rendered into an file in the same location without
the '.tpl' file extension.
"""
# Check if the directory structure matches expectations.
if not template_path.is_dir():
raise TemplateParseError(
"Template path {!r} is not a directory.".format(
str(template_path)))
if not (template_path / 'data').is_dir():
raise TemplateParseError(
"Template path {!r} does not contain the required 'data' directory."
.format(str(template_path)))
# The template name equals the name of the template directory.
template_name = template_path.stem
# Find the template description file.
tpldesc_file = template_path / 'data/{}.tpldesc.hjson'.format(
template_name)
# Read the template description from file.
try:
tpldesc_obj = hjson.load(open(tpldesc_file, 'r'), use_decimal=True)
except (OSError, FileNotFoundError) as e:
raise TemplateParseError(
"Unable to read template description file {!r}: {}".format(
str(tpldesc_file), str(e)))
# Parse the template description file.
where = 'template description file {!r}'.format(str(tpldesc_file))
if 'template_param_list' not in tpldesc_obj:
raise TemplateParseError(
f"Required key 'variables' not found in {where}")
try:
params = TemplateParams.from_raw(
f"list of parameters in {where}",
tpldesc_obj['template_param_list'])
except ValueError as e:
raise TemplateParseError(e) from None
return cls(template_name, params, template_path)
class IpConfig:
def __init__(self,
template_params: TemplateParams,
instance_name: str,
param_values: Dict[str, Union[str, int]] = {}):
self.template_params = template_params
self.instance_name = instance_name
self.param_values = IpConfig._check_param_values(
template_params, param_values)
@staticmethod
def _check_object(obj: object, what: str) -> object:
"""Check that obj is a Hjson-serializable object.
If not, raise a ValueError; the what argument names the object.
"""
try:
# Round-trip objects through the JSON encoder to get the
# same representation no matter if we load the config from
# file, or directly pass it on to the template. Also, catch
# encoding/decoding errors when setting the object.
json = hjson.dumps(obj,
ensure_ascii=False,
use_decimal=True,
for_json=True,
encoding='UTF-8')
obj_checked = hjson.loads(json,
use_decimal=True,
encoding='UTF-8')
except TypeError as e:
raise ValueError('{} cannot be serialized as Hjson: {}'
.format(what, str(e))) from None
return obj_checked
@staticmethod
def _check_param_values(template_params: TemplateParams,
param_values: Any) -> Dict[str, Union[str, int]]:
"""Check if parameter values are valid.
Returns the parameter values in typed form if successful, and throws
a ValueError otherwise.
"""
VALID_PARAM_TYPES = ('string', 'int', 'object')
param_values_typed = {}
for key, value in param_values.items():
if not isinstance(key, str):
raise ValueError(
f"The IP configuration has a key {key!r} which is not a "
"string.")
if key not in template_params:
raise ValueError(
f"The IP configuration has a key {key!r} which is a "
"valid parameter.")
param_type = template_params[key].param_type
if param_type not in VALID_PARAM_TYPES:
raise ValueError(
f"Unknown template parameter type {param_type!r}. "
"Allowed types: " + ', '.join(VALID_PARAM_TYPES))
if param_type == 'string':
param_value_typed = check_str(
value, f"the key {key} of the IP configuration")
elif param_type == 'int':
param_value_typed = check_int(
value, f"the key {key} of the IP configuration")
elif param_type == 'object':
param_value_typed = IpConfig._check_object(
value, f"the key {key} of the IP configuration")
else:
assert False, "Unexpected parameter type found, expand check"
param_values_typed[key] = param_value_typed
return param_values_typed
@classmethod
def from_raw(cls, template_params: TemplateParams, raw: object,
where: str) -> 'IpConfig':
""" Load an IpConfig from a raw object """
rd = check_keys(raw, 'configuration file ' + where, ['instance_name'],
['param_values'])
instance_name = check_name(rd.get('instance_name'),
"the key 'instance_name' of " + where)
if not isinstance(raw, dict):
raise ValueError(
"The IP configuration is expected to be a dict, but was "
"actually a " + type(raw).__name__)
param_values = IpConfig._check_param_values(template_params,
rd['param_values'])
return cls(template_params, instance_name, param_values)
@classmethod
def from_text(cls, template_params: TemplateParams, txt: str, where: str) -> 'IpConfig':
"""Load an IpConfig from an Hjson description in txt"""
raw = hjson.loads(txt, use_decimal=True, encoding="UTF-8")
return cls.from_raw(template_params, raw, where)
def to_file(self, file_path: Path, header: Optional[str] = ""):
obj = {}
obj['instance_name'] = self.instance_name
obj['param_values'] = self.param_values
with open(file_path, 'w') as fp:
if header:
fp.write(header)
hjson.dump(obj,
fp,
ensure_ascii=False,
use_decimal=True,
for_json=True,
encoding='UTF-8',
indent=2)
fp.write("\n")
|
|
"""
Tests for the following offsets:
- BYearBegin
- BYearEnd
"""
from __future__ import annotations
from datetime import datetime
import pytest
from pandas._libs.tslibs.offsets import YearOffset
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tseries.offsets import (
BYearBegin,
BYearEnd,
)
class TestBYearBegin(Base):
_offset: type[YearOffset] = BYearBegin
def test_misspecified(self):
msg = "Month must go from 1 to 12"
with pytest.raises(ValueError, match=msg):
BYearBegin(month=13)
with pytest.raises(ValueError, match=msg):
BYearEnd(month=13)
offset_cases = []
offset_cases.append(
(
BYearBegin(),
{
datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2011, 1, 1): datetime(2011, 1, 3),
datetime(2011, 1, 3): datetime(2012, 1, 2),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2),
},
)
)
offset_cases.append(
(
BYearBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2),
},
)
)
offset_cases.append(
(
BYearBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 1, 2),
datetime(2009, 1, 4): datetime(2009, 1, 1),
datetime(2009, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 2),
datetime(2006, 12, 30): datetime(2006, 1, 2),
datetime(2006, 1, 1): datetime(2005, 1, 3),
},
)
)
offset_cases.append(
(
BYearBegin(-2),
{
datetime(2007, 1, 1): datetime(2005, 1, 3),
datetime(2007, 6, 30): datetime(2006, 1, 2),
datetime(2008, 12, 31): datetime(2007, 1, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
class TestBYearEnd(Base):
_offset: type[YearOffset] = BYearEnd
offset_cases = []
offset_cases.append(
(
BYearEnd(),
{
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2006, 12, 29),
datetime(2005, 12, 31): datetime(2006, 12, 29),
},
)
)
offset_cases.append(
(
BYearEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 29),
},
)
)
offset_cases.append(
(
BYearEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29),
},
)
)
offset_cases.append(
(
BYearEnd(-2),
{
datetime(2007, 1, 1): datetime(2005, 12, 30),
datetime(2008, 6, 30): datetime(2006, 12, 29),
datetime(2008, 12, 31): datetime(2006, 12, 29),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(BYearEnd(), datetime(2007, 12, 31), True),
(BYearEnd(), datetime(2008, 1, 1), False),
(BYearEnd(), datetime(2006, 12, 31), False),
(BYearEnd(), datetime(2006, 12, 29), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
class TestBYearEndLagged(Base):
_offset: type[YearOffset] = BYearEnd
def test_bad_month_fail(self):
msg = "Month must go from 1 to 12"
with pytest.raises(ValueError, match=msg):
BYearEnd(month=13)
with pytest.raises(ValueError, match=msg):
BYearEnd(month=0)
offset_cases = []
offset_cases.append(
(
BYearEnd(month=6),
{
datetime(2008, 1, 1): datetime(2008, 6, 30),
datetime(2007, 6, 30): datetime(2008, 6, 30),
},
)
)
offset_cases.append(
(
BYearEnd(n=-1, month=6),
{
datetime(2008, 1, 1): datetime(2007, 6, 29),
datetime(2007, 6, 30): datetime(2007, 6, 29),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_roll(self):
offset = BYearEnd(month=6)
date = datetime(2009, 11, 30)
assert offset.rollforward(date) == datetime(2010, 6, 30)
assert offset.rollback(date) == datetime(2009, 6, 30)
on_offset_cases = [
(BYearEnd(month=2), datetime(2007, 2, 28), True),
(BYearEnd(month=6), datetime(2007, 6, 30), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
|
|
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
import github.NamedUser
import github.CommitStats
import github.Gist
class GistHistoryState(github.GithubObject.CompletableGithubObject):
"""
This class represents GistHistoryStates
"""
@property
def change_status(self):
"""
:type: :class:`github.CommitStats.CommitStats`
"""
self._completeIfNotSet(self._change_status)
return self._change_status.value
@property
def comments(self):
"""
:type: integer
"""
self._completeIfNotSet(self._comments)
return self._comments.value
@property
def comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._commits_url)
return self._commits_url.value
@property
def committed_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._committed_at)
return self._committed_at.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def files(self):
"""
:type: dict of string to :class:`github.GistFile.GistFile`
"""
self._completeIfNotSet(self._files)
return self._files.value
@property
def forks(self):
"""
:type: list of :class:`github.Gist.Gist`
"""
self._completeIfNotSet(self._forks)
return self._forks.value
@property
def forks_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._forks_url)
return self._forks_url.value
@property
def git_pull_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_pull_url)
return self._git_pull_url.value
@property
def git_push_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_push_url)
return self._git_push_url.value
@property
def history(self):
"""
:type: list of :class:`GistHistoryState`
"""
self._completeIfNotSet(self._history)
return self._history.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: string
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def owner(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._owner)
return self._owner.value
@property
def public(self):
"""
:type: bool
"""
self._completeIfNotSet(self._public)
return self._public.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
@property
def version(self):
"""
:type: string
"""
self._completeIfNotSet(self._version)
return self._version.value
def _initAttributes(self):
self._change_status = github.GithubObject.NotSet
self._comments = github.GithubObject.NotSet
self._comments_url = github.GithubObject.NotSet
self._commits_url = github.GithubObject.NotSet
self._committed_at = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._files = github.GithubObject.NotSet
self._forks = github.GithubObject.NotSet
self._forks_url = github.GithubObject.NotSet
self._git_pull_url = github.GithubObject.NotSet
self._git_push_url = github.GithubObject.NotSet
self._history = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._owner = github.GithubObject.NotSet
self._public = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
self._version = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "change_status" in attributes: # pragma no branch
self._change_status = self._makeClassAttribute(github.CommitStats.CommitStats, attributes["change_status"])
if "comments" in attributes: # pragma no branch
self._comments = self._makeIntAttribute(attributes["comments"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commits_url" in attributes: # pragma no branch
self._commits_url = self._makeStringAttribute(attributes["commits_url"])
if "committed_at" in attributes: # pragma no branch
self._committed_at = self._makeDatetimeAttribute(attributes["committed_at"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "files" in attributes: # pragma no branch
self._files = self._makeDictOfStringsToClassesAttribute(github.GistFile.GistFile, attributes["files"])
if "forks" in attributes: # pragma no branch
self._forks = self._makeListOfClassesAttribute(github.Gist.Gist, attributes["forks"])
if "forks_url" in attributes: # pragma no branch
self._forks_url = self._makeStringAttribute(attributes["forks_url"])
if "git_pull_url" in attributes: # pragma no branch
self._git_pull_url = self._makeStringAttribute(attributes["git_pull_url"])
if "git_push_url" in attributes: # pragma no branch
self._git_push_url = self._makeStringAttribute(attributes["git_push_url"])
if "history" in attributes: # pragma no branch
self._history = self._makeListOfClassesAttribute(GistHistoryState, attributes["history"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeStringAttribute(attributes["id"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["owner"])
if "public" in attributes: # pragma no branch
self._public = self._makeBoolAttribute(attributes["public"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
if "version" in attributes: # pragma no branch
self._version = self._makeStringAttribute(attributes["version"])
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Xcode 4 template generator for cocos2d project
# (c) 2011 Ricardo Quesada
#
# LICENSE: Dual License: MIT & GNU GPL v2 Whatever suits you best.
#
# Given a directory, it generates the "Definitions" and "Nodes" elements
#
# Format taken from: http://blog.boreal-kiss.net/2011/03/11/a-minimal-project-template-for-xcode-4/
# ----------------------------------------------------------------------------
'''
Xcode 4 template generator
'''
__docformat__ = 'restructuredtext'
_template_open_body = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<!-- FILE AUTOGENERATED BY cocos2d/tools/template_generator.py DO NOT EDIT -->
<plist version="1.0">
<dict>
<key>Description</key>
<string>This is a template description.</string>
<key>Identifier</key>
<string>com.cocos2d-v2.%s</string>
<key>Kind</key>
<string>Xcode.Xcode3.ProjectTemplateUnitKind</string>"""
_template_close_body = "</dict>\n</plist>"
_template_header_path= """<key>Targets</key>
<array>
<dict>
<key>SharedSettings</key>
<dict>
<key>HEADER_SEARCH_PATHS</key>
<string>%s</string>
</dict>
</dict>
</array>"""
_template_user_header_path= """<key>Targets</key>
<array>
<dict>
<key>SharedSettings</key>
<dict>
<key>ALWAYS_SEARCH_USER_PATHS</key>
<string>YES</string>
<key>USER_HEADER_SEARCH_PATHS</key>
<string>%s</string>
</dict>
</dict>
</array>"""
_template_ancestor = """ <key>Ancestors</key>
<array>
<string>%s</string>
</array>"""
# python
import sys
import os
import getopt
import glob
class Xcode4Template(object):
def __init__( self, directory, group=0, identifier="XXX", header_path=None, user_header_path=None, ancestor=None ):
self.directory = directory
self.files_to_include = []
self.wildcard = '*'
self.ignore_extensions = ['h','txt','html','patch','cmake']
self.group_start_index = group # eg: if 1 then libs/cocos2d/support -> ["cocos2d", "support"] ignoring "libs"
self.output = []
self.identifier = identifier
self.header_path = header_path
self.user_header_path = user_header_path
self.ancestor = ancestor
def scandirs(self, path):
for currentFile in glob.glob( os.path.join(path, self.wildcard) ):
if os.path.isdir(currentFile):
self.scandirs(currentFile)
else:
self.files_to_include.append( currentFile )
#
# append the definitions
#
def append_definition( self, output_body, path, group, dont_index ):
output_body.append("\t\t<key>%s</key>" % path )
output_body.append("\t\t<dict>")
if group:
output_body.append("\t\t\t<key>Group</key>")
output_body.append("\t\t\t<array>")
for g in group:
output_body.append("\t\t\t\t<string>%s</string>" % g )
output_body.append("\t\t\t</array>")
output_body.append("\t\t\t<key>Path</key>\n\t\t\t<string>%s</string>" % path )
if dont_index:
output_body.append("\t\t\t<key>TargetIndices</key>\n\t\t\t<array/>")
output_body.append("\t\t</dict>")
#
# Generate the "Definitions" section
#
def generate_definitions( self ):
output_header = "\t<key>Definitions</key>"
output_dict_open = "\t<dict>"
output_dict_close = "\t</dict>"
output_body = []
for path in self.files_to_include:
# group name
group = []
# obtain group name from directory
dirs = os.path.dirname(path)
group = dirs.split('/')
group = group[self.group_start_index:]
# get the extension
filename = os.path.basename(path)
name_extension= filename.split('.')
extension = None
if len(name_extension) == 2:
extension = name_extension[1]
self.append_definition( output_body, path, group, extension in self.ignore_extensions )
self.output.append( output_header )
self.output.append( output_dict_open )
self.output.append( "\n".join( output_body ) )
self.output.append( output_dict_close )
#
# Generates the "Nodes" section
#
def generate_nodes( self ):
output_header = "\t<key>Nodes</key>"
output_open = "\t<array>"
output_close = "\t</array>"
output_body = []
for path in self.files_to_include:
output_body.append("\t\t<string>%s</string>" % path )
self.output.append( output_header )
self.output.append( output_open )
self.output.append( "\n".join( output_body ) )
self.output.append( output_close )
#
# Generate ancestors
#
def generate_ancestor( self ):
if self.ancestor:
self.output.append( _template_ancestor % self.ancestor )
#
# Generates the include directory
#
def generate_header_path( self ):
if self.header_path:
self.output.append( _template_header_path % self.header_path )
if self.user_header_path:
self.output.append( _template_user_header_path % self.user_header_path )
#
# Generates the plist. Send it to to stdout
#
def generate_xml( self ):
self.output.append( _template_open_body % self.identifier )
self.generate_ancestor()
self.generate_definitions()
self.generate_nodes()
self.generate_header_path()
self.output.append( _template_close_body )
print "\n".join( self.output )
def generate( self ):
self.scandirs( self.directory )
self.generate_xml()
def help():
print "%s v1.0 - An utility to generate Xcode 4 templates" % sys.argv[0]
print "Usage:"
print "-g --group\t\tdirectory_used_as_starting_group (if 1, then 'libs/cocos2d/Support/' -> ['cocos2d','Support'] ignoring 'libs')"
print "-i --identifier\t\tidentifier (Xcode4 template identifier)"
print "-a --ancestor\t\tancestor identifier. Default: none"
print "--header-path\t\theader search path"
print "--user-header-path\tuser header search path"
print "directory_to_parse"
print "\nExample:"
print "\t%s -i kazmathlib --header-path ___PACKAGENAME___/libs/kazmath/include libs" % sys.argv[0]
print "\t%s -i cocos2dlib libs" % sys.argv[0]
sys.exit(-1)
if __name__ == "__main__":
if len( sys.argv ) == 1:
help()
directory = None
group = 0
identifier = None
header_path= None
user_header_path= None
ancestor = None
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "a:g:i:", ["ancestor=","group=","identifier=","header-path=", "user-header-path="])
if len(args) == 0:
help()
for opt, arg in opts:
if opt in ("-g","--group"):
group = arg
if opt in ("-i","--identifier"):
identifier = arg
if opt in ["--header-path"]:
header_path= arg
if opt in ["--user-header-path"]:
user_header_path= arg
if opt in ("-a", "--ancestor"):
ancestor = arg
except getopt.GetoptError,e:
print e
directory = args[0]
if directory == None:
help()
gen = Xcode4Template( directory=directory, group=int(group), identifier=identifier, header_path=header_path, ancestor=ancestor)
gen.generate()
|
|
import pytest
import unittest
from calvin.runtime.north.plugins.port import queue
from calvin.runtime.north.calvin_token import Token, ExceptionToken
from calvin.runtime.north.plugins.port.queue.common import QueueFull, QueueEmpty
pytest_unittest = pytest.mark.unittest
class DummyPort(object):
pass
def create_port(routing):
port = DummyPort()
port.properties = {'routing': routing, "direction": "in"}
return queue.get(port)
def unwrap(data):
return data.value.items()[0]
@pytest_unittest
class TestCollectAnyFIFO(unittest.TestCase):
def setUp(self):
self.inport = create_port(routing="collect-any-tagged")
def setup_writers(self, n):
writer_list = [ "writer-%d" % i for i in range(1, n+1)]
for writer in writer_list:
self.inport.add_writer(writer, {})
def tearDown(self):
pass
def testInit(self):
assert self.inport
def testType(self):
queue_type = self.inport.queue_type
self.assertEqual(queue_type, "collect:any-tagged")
def testGetPeers(self):
self.setup_writers(3)
self.assertEqual(set(self.inport.get_peers()),
set(["writer-%d" % i for i in [1,2,3]]))
def testAddWriter(self):
self.inport.add_writer("writer", {})
self.assertTrue("writer" in self.inport.writers)
def testRemoveWriter_Normal(self):
self.setup_writers(1)
self.inport.remove_writer("writer-1")
self.assertTrue("writer-1" not in self.inport.writers)
def testRemoveWriter_Failure(self):
with self.assertRaises(Exception):
self.inport.remove_writer(self)
with self.assertRaises(Exception):
self.inport.remove_writer("no such writer")
def testAddReader_Normal(self):
self.inport.add_reader(None, None)
def testAddReader_Illegal(self):
pass
def testAddReader_Replication(self):
# Test replication etc
pass
def testRemoveReader_Normal(self):
self.inport.remove_reader(None)
def testWrite_Normal(self):
self.setup_writers(3)
for _ in range(3):
for i in [1,2,3]:
self.inport.write(i, "writer-%d" % i)
for i in [1,2,3]:
fifo = self.inport.fifo["writer-%d" % i]
self.assertEqual(fifo[:3], [i,i,i])
def testWrite_QueueFull(self):
self.setup_writers(2)
with self.assertRaises(QueueFull):
for i in range(10):
self.inport.write("fillme", "writer-1")
def testTokensAvailable_Normal(self):
self.setup_writers(5)
for i in [1,2,3,4,5]:
self.inport.write(Token("data-%d" % i), "writer-%d" % i)
self.assertTrue(self.inport.tokens_available(1, None))
for i in [1,2,3]:
self.inport.write(Token("data-%d" % i), "writer-%d" % i)
self.assertTrue(self.inport.tokens_available(2, None))
for i in [4,5]:
self.inport.write(Token("data-%d" % i), "writer-%d" % i)
self.assertTrue(self.inport.tokens_available(2, None))
i = 0
try:
self.inport.write(Token("data-%d" % i), "writer-%d" % 1)
self.assertTrue(self.inport.tokens_available(3+i, None))
i+=1
except QueueFull:
pass
def testSlotsAvailable_Normal(self):
self.setup_writers(5)
for i in [1,2,3,4,5]:
self.assertTrue(self.inport.slots_available(self.inport.N-1, "writer-%d" % i))
for i in [1,2,3]:
self.inport.write(Token("data"), "writer-%d" % i)
for w in self.inport.writers:
if w in ["writer-%d" % i for i in [1,2,3]]:
self.assertTrue(self.inport.slots_available(self.inport.N-2, w))
else:
self.assertTrue(self.inport.slots_available(self.inport.N-1, w))
def testSlotsAvailable_Failure(self):
pass
def testPeek_Failure(self):
self.setup_writers(3)
with self.assertRaises(QueueEmpty):
self.inport.peek(None)
def testSerialize(self):
self.setup_writers(3)
for i in [1,2,3]:
for j in [0,3]:
self.inport.write(Token("data-%d" % (i+j)), "writer-%d" % i)
self.inport.peek(None)
state = self.inport._state()
port = create_port(routing="collect-all-tagged")
port._set_state(state)
data = self.inport.peek(None).value
self.assertEqual(data, {"writer-%d" % i: "data-%d" % (i+3) for i in [1,2,3]})
def testPeek_Normal(self):
self.setup_writers(3)
for i in [1,2,3]:
for j in [0,3]:
self.inport.write(Token("data-%d" % (i+j)), "writer-%d" % i)
data = self.inport.peek(None).value
self.assertEqual(data, {"writer-%d" % i: "data-%d" % i for i in [1,2,3]})
data = self.inport.peek(None).value
self.assertEqual(data, {"writer-%d" % i: "data-%d" % (i+3) for i in [1,2,3]})
def testPeek_Exception(self):
self.setup_writers(3)
for i in [1,2,3]:
self.inport.write(Token("data-%d" % i), "writer-%d" % i)
self.inport.write(Token("data-%d" % (1+3)), "writer-%d" % 1)
self.inport.write(ExceptionToken(), "writer-%d" % 2)
self.inport.write(Token("data-%d" % (3+3)), "writer-%d" % 3)
for i in [1,2,3]:
self.inport.write(Token("data-%d" % (i+6)), "writer-%d" % i)
"""
w1: 1 4 7
w2: 2 e 8
w3: 3 6 9
"""
data_1 = self.inport.peek(None).value
self.assertEqual(data_1, {"writer-%d" % i: "data-%d" % i for i in [1,2,3]})
"""
w1: 4 7
w2: e 8
w3: 6 9
"""
data_2 = self.inport.peek(None).value
self.assertEqual(data_2, {"writer-2": 'Exception'})
"""
w1: 4 7
w2: 8
w3: 6 9
"""
data_3 = self.inport.peek(None).value
result = {"writer-%d" % i: "data-%d" % (i+3) for i in [1,2,3]}
result["writer-2"] = "data-8"
self.assertEqual(data_3, result)
def testCancel(self):
self.setup_writers(3)
for i in [1,2,3]:
for j in [0,3]:
self.inport.write(Token("data-%d" % (i+j)), "writer-%d" % i)
data_1 = self.inport.peek(None).value
self.inport.cancel(None)
data_2 = self.inport.peek(None).value
self.assertEqual(data_1, data_2)
data_2 = self.inport.peek(None).value
self.assertEqual(data_2, {"writer-%d" % i: "data-%d" % (i+3) for i in [1,2,3]})
def testCommit(self):
self.setup_writers(3)
for i in [1,2,3]:
for j in [0,3]:
self.inport.write(Token("data-%d" % (i+j)), "writer-%d" % i)
self.inport.peek(None)
self.inport.commit(None)
data_2 = self.inport.peek(None).value
self.assertEqual(data_2, {"writer-%d" % i: "data-%d" % (i+3) for i in [1,2,3]})
with self.assertRaises(QueueEmpty):
self.inport.peek(None)
|
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from django.db import transaction
from django.utils import timezone
from django.http import FileResponse, StreamingHttpResponse
from django.contrib.gis.db.models.functions import Distance
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.contrib.gis.gdal import GDALException
from django.db.models import FloatField
from django.db.models.functions import Cast
from rest_framework import status, filters
from rest_framework.exceptions import PermissionDenied, NotFound, ValidationError
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from gwells.roles import WELLS_VIEWER_ROLE, WELLS_EDIT_ROLE
from gwells.pagination import apiLimitedPagination, APILimitOffsetPagination
from gwells.geojson import GeoJSONIterator
from wells.filters import (
BoundingBoxFilterBackend,
WellListFilterBackend,
WellListOrderingFilter,
GeometryFilterBackend,
RadiusFilterBackend
)
from wells.models import Well
from wells.serializers_v2 import (
WellLocationSerializerV2,
WellVerticalAquiferExtentSerializerV2,
WellListSerializerV2,
WellListAdminSerializerV2,
WellExportSerializerV2,
WellExportAdminSerializerV2,
WellSubsurfaceSerializer,
WellDetailSerializer
)
from wells.permissions import WellsEditOrReadOnly
from wells.renderers import WellListCSVRenderer, WellListExcelRenderer
from aquifers.models import (
Aquifer,
VerticalAquiferExtent,
VerticalAquiferExtentsHistory
)
from aquifers.permissions import HasAquiferEditRole
from wells.views import WellDetail as WellDetailV1
from wells.constants import MAX_EXPORT_COUNT, MAX_LOCATION_COUNT
logger = logging.getLogger(__name__)
class WellLocationListV2APIView(ListAPIView):
""" returns well locations for a given search
get: returns a list of wells with locations only
"""
swagger_schema = None
permission_classes = (WellsEditOrReadOnly,)
model = Well
pagination_class = apiLimitedPagination(MAX_LOCATION_COUNT)
# Allow searching on name fields, names of related companies, etc.
filter_backends = (WellListFilterBackend, BoundingBoxFilterBackend,
filters.SearchFilter, WellListOrderingFilter, GeometryFilterBackend)
ordering = ('well_tag_number',)
search_fields = ('well_tag_number', 'identification_plate_number',
'street_address', 'city', 'owner_full_name', 'ems')
TOO_MANY_ERROR_MESSAGE = "Too many wells to display on map. Please zoom in or change your search criteria."
def get_serializer_class(self):
return WellLocationSerializerV2
def get_queryset(self):
""" Excludes Unpublished wells for users without edit permissions """
if self.request.user.groups.filter(name=WELLS_EDIT_ROLE).exists():
qs = Well.objects.all()
else:
qs = Well.objects.all().exclude(well_publication_status='Unpublished')
# check to see if we should filter wells by which ones intersect an aquifer
intersects_aquifer_id = self.request.query_params.get('intersects_aquifer_id', None)
if intersects_aquifer_id:
aquifer = Aquifer.objects.filter(aquifer_id=int(intersects_aquifer_id)).first()
if not aquifer:
raise NotFound(f'Unknown aquifer {intersects_aquifer_id}')
if not aquifer.geom:
# if the aquifer has no/null geometry, it might be an aquifer
# that the business area has created but has not delineated an area
# for (for example, the special "holding" aquifer 1143).
qs = qs.none()
else:
# Find wells that intersect this simplified aquifer polygon (excluding wells
# with null geom)
qs = qs.exclude(geom=None)
qs = qs.filter(geom__intersects=aquifer.geom)
well_tag_numbers = self.request.query_params.get('well_tag_numbers', '')
if well_tag_numbers:
well_tag_numbers = well_tag_numbers.split(',')
qs = qs.filter(well_tag_number__in=well_tag_numbers)
return qs
def get(self, request, *args, **kwargs):
"""
Returns geojson if requested, otherwise handles request as normal.
"""
geojson_requested = self.request.query_params.get('geojson') == 'true'
# if geojson requested, create a query that returns each well's geometry as GeoJSON
# so that we can easily create a FeatureCollection.
# This might be more performant in the database using json_agg and ST_AsGeoJSON
# vs creating geojson Features here in Python.
if geojson_requested:
return self.geoJSONResponse()
return super().get(request)
def geoJSONResponse(self):
"""
Returns a streaming GeoJSON HTTP response of the searched wells
"""
qs = self.get_queryset()
qs = qs.exclude(geom=None)
fields = [
"geom",
"well_tag_number",
"identification_plate_number",
"street_address",
"city",
"artesian_conditions",
]
locations = self.filter_queryset(qs)
# If the user can edit wells then we can add the `is_published` property to the response
if self.request.user.groups.filter(name=WELLS_EDIT_ROLE).exists():
locations = locations.extra(select={'is_published': "well_publication_status_code = 'Published'"})
fields.append("is_published")
locations = locations.values(*fields)
locations = list(locations[:MAX_LOCATION_COUNT + 1])
# return a 403 response if there are too many wells to display
if len(locations) > MAX_LOCATION_COUNT:
raise PermissionDenied(self.TOO_MANY_ERROR_MESSAGE)
# turn the list of locations into a generator so the GeoJSONIterator can use it
locations_iter = (location for location in locations)
iterator = GeoJSONIterator(locations_iter)
return StreamingHttpResponse(iterator, content_type="application/json")
class WellAquiferListV2APIView(ListAPIView):
"""
Returns a list of aquifers with depth information for a well
"""
swagger_schema = None
permission_classes = (HasAquiferEditRole,)
ordering = ('start',)
serializer_class = WellVerticalAquiferExtentSerializerV2
pagination_class = None
def get_queryset(self):
"""
Excludes Aquifer 3D points that relate to unpublished wells for users without edit permissions
"""
well = self.get_well()
qs = VerticalAquiferExtent.objects.filter(well=well).select_related('aquifer')
if not self.request.user.groups.filter(name=WELLS_EDIT_ROLE).exists():
qs = qs.exclude(well__well_publication_status='Unpublished')
return qs
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
@transaction.atomic
def post(self, request, *args, **kwargs):
username = self.request.user.profile.username
timestamp = timezone.now()
# we expect a list
if not isinstance(request.data, list):
raise NotFound()
# get the well and 404 if it doesn't exist
well = self.get_well()
max_depth = float('-inf')
ids = []
items = []
errors = []
has_errors = False
for item in request.data: # go through each vertical aquifer extent
item['well_tag_number'] = well.well_tag_number
vertical_aquifer_extent = None
vae_id = item.get('id', None)
if vae_id: # has an id - then it must be an existing one
vertical_aquifer_extent = VerticalAquiferExtent.objects.get(pk=vae_id)
serializer = WellVerticalAquiferExtentSerializerV2(instance=vertical_aquifer_extent,
data=item)
serializer_errors = {}
if serializer.is_valid():
# add user audit information
serializer.validated_data['update_user'] = username
serializer.validated_data['update_date'] = timestamp
if not vertical_aquifer_extent:
serializer.validated_data['create_user'] = username
serializer.validated_data['create_date'] = timestamp
if self.has_changed(vertical_aquifer_extent, serializer.validated_data):
vertical_aquifer_extent = serializer.save()
# keep track existing ids and any newly added IDs
ids.append(vertical_aquifer_extent.id)
items.append(serializer.data)
else:
serializer_errors = serializer.errors
has_errors = True
if vertical_aquifer_extent is not None:
self.log_history(vertical_aquifer_extent, username, timestamp)
if vertical_aquifer_extent.start < max_depth:
has_errors = True
serializer_errors.setdefault('start', []) \
.append('Start depth overlaps with another')
max_depth = vertical_aquifer_extent.end
errors.append(serializer_errors) # always add to keep the index correct for web app
# roll back on errors and undo any changes
if has_errors:
transaction.set_rollback(True)
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
# delete any ids not in the POST-ed list
self.get_queryset().exclude(id__in=ids).delete()
return Response(items, status=status.HTTP_201_CREATED)
def get_well(self):
well_tag_number = int(self.kwargs['well_tag_number'])
try:
return Well.objects.get(pk=well_tag_number)
except Exception:
raise NotFound(f'Well {well_tag_number} could not be found')
def has_changed(self, existing_vertical_aquifer_extent, new_data):
if existing_vertical_aquifer_extent is None:
return True
if existing_vertical_aquifer_extent.start != new_data['start']:
return True
if existing_vertical_aquifer_extent.end != new_data['end']:
return True
if existing_vertical_aquifer_extent.aquifer_id != new_data['aquifer_id']:
return True
if existing_vertical_aquifer_extent.geom and new_data['geom']:
if existing_vertical_aquifer_extent.geom.x != new_data['geom'].x:
return True
if existing_vertical_aquifer_extent.geom.y != new_data['geom'].y:
return True
else:
return True
return False
def log_history(self, vertical_aquifer_extent, username, timestamp):
# Whenever a VerticalAquiferExtent is saved - insert a copy of the data into the
# vertical_aquifer_extents_history table
VerticalAquiferExtentsHistory.objects.create(
create_user=username,
create_date=timestamp,
update_user=username,
update_date=timestamp,
well_tag_number=vertical_aquifer_extent.well_id,
aquifer_id=vertical_aquifer_extent.aquifer_id,
geom=vertical_aquifer_extent.geom,
start=vertical_aquifer_extent.start,
end=vertical_aquifer_extent.end
)
class WellListAPIViewV2(ListAPIView):
"""List and create wells
get: returns a list of wells
"""
swagger_schema = None
permission_classes = (WellsEditOrReadOnly,)
model = Well
pagination_class = APILimitOffsetPagination
filter_backends = (WellListFilterBackend, BoundingBoxFilterBackend,
filters.SearchFilter, WellListOrderingFilter, GeometryFilterBackend)
ordering = ('well_tag_number',)
search_fields = ('well_tag_number', 'identification_plate_number',
'street_address', 'city', 'owner_full_name')
default_limit = 10
def get_serializer_class(self):
"""Returns a different serializer class for admin users."""
serializer_class = WellListSerializerV2
if (self.request.user and self.request.user.is_authenticated and
self.request.user.groups.filter(name=WELLS_VIEWER_ROLE).exists()):
serializer_class = WellListAdminSerializerV2
return serializer_class
def get_queryset(self):
""" Excludes Unpublished wells for users without edit permissions """
if self.request.user.groups.filter(name=WELLS_EDIT_ROLE).exists():
qs = Well.objects.all()
else:
qs = Well.objects.all().exclude(well_publication_status='Unpublished')
qs = qs \
.select_related(
"bcgs_id",
).prefetch_related(
"water_quality_characteristics",
"drilling_methods",
"development_methods"
)
return qs
class WellExportListAPIViewV2(ListAPIView):
"""Returns CSV or Excel data for wells.
"""
swagger_schema = None
permission_classes = (WellsEditOrReadOnly,)
model = Well
# Allow searching on name fields, names of related companies, etc.
filter_backends = (WellListFilterBackend, BoundingBoxFilterBackend,
filters.SearchFilter, filters.OrderingFilter)
ordering = ('well_tag_number',)
pagination_class = None
search_fields = ('well_tag_number', 'identification_plate_number',
'street_address', 'city', 'owner_full_name')
renderer_classes = (WellListCSVRenderer, WellListExcelRenderer)
SELECT_RELATED_OPTIONS = [
'well_class',
'well_subclass',
'well_status',
'land_district',
'company_of_person_responsible',
'ground_elevation_method',
'surface_seal_material',
'surface_seal_method',
'liner_material',
'screen_intake_method',
'screen_type',
'screen_material',
'screen_opening',
'screen_bottom',
'well_yield_unit',
'observation_well_status',
'coordinate_acquisition_code',
'bcgs_id',
'decommission_method',
'aquifer',
'aquifer_lithology',
'yield_estimation_method',
'well_disinfected_status',
]
PREFETCH_RELATED_OPTIONS = [
'development_methods',
'drilling_methods',
'water_quality_characteristics',
]
def get_fields(self):
raw_fields = self.request.query_params.get('fields')
return raw_fields.split(',') if raw_fields else None
def get_queryset(self):
"""Excludes unpublished wells for users without edit permissions.
"""
if self.request.user.groups.filter(name=WELLS_EDIT_ROLE).exists():
qs = Well.objects.all()
else:
qs = Well.objects.all().exclude(well_publication_status='Unpublished')
included_fields = self.get_fields()
if included_fields:
select_relateds = [
relation for relation in self.SELECT_RELATED_OPTIONS
if relation in included_fields
]
prefetches = [
relation for relation in self.PREFETCH_RELATED_OPTIONS
if relation in included_fields
]
if select_relateds:
qs = qs.select_related(*select_relateds)
if prefetches:
qs = qs.prefetch_related(*prefetches)
elif included_fields is None:
# If no fields are passed, then include everything
qs = qs.select_related(*self.SELECT_RELATED_OPTIONS)
qs = qs.prefetch_related(*self.PREFETCH_RELATED_OPTIONS)
return qs
def get_serializer_class(self):
"""Returns a different serializer class for admin users."""
serializer_class = WellExportSerializerV2
if (self.request.user and self.request.user.is_authenticated and
self.request.user.groups.filter(name=WELLS_VIEWER_ROLE).exists()):
serializer_class = WellExportAdminSerializerV2
return serializer_class
def get_serializer_context(self):
context = super().get_serializer_context()
fields = self.get_fields()
if fields:
context['fields'] = fields
return context
def get_renderer_context(self):
context = super().get_renderer_context()
fields = self.get_fields()
if fields:
context['header'] = fields
return context
def batch_iterator(self, queryset, count, batch_size=200):
"""Batch a queryset into chunks of batch_size, and serialize the results
Allows iterative processing while taking advantage of prefetching many
to many relations.
"""
for offset in range(0, count, batch_size):
end = min(offset + batch_size, count)
batch = queryset[offset:end]
serializer = self.get_serializer(batch, many=True)
for item in serializer.data:
yield item
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
count = queryset.count()
# return an empty response if there are too many wells to display
if count > MAX_EXPORT_COUNT:
raise PermissionDenied(
'Too many wells to export. Please change your search criteria.'
)
elif count == 0:
raise NotFound('No well records could be found.')
renderer = request.accepted_renderer
if renderer.format == 'xlsx':
response_class = FileResponse
else:
response_class = StreamingHttpResponse
context = self.get_renderer_context()
data_iterator = self.batch_iterator(queryset, count)
render_result = renderer.render(data_iterator, renderer_context=context)
response = response_class(render_result, content_type=renderer.media_type)
response['Content-Disposition'] = 'attachment; filename="search-results.{ext}"'.format(ext=renderer.format)
return response
class WellSubsurface(ListAPIView):
""" Returns well subsurface info within a gemoetry or a list of wells """
""" This replaces WellScreen with the additional aquifer and lithology info"""
model = Well
serializer_class = WellSubsurfaceSerializer
filter_backends = (GeometryFilterBackend, RadiusFilterBackend)
swagger_schema = None
def get_queryset(self):
qs = Well.objects.all() \
.select_related('intended_water_use', 'aquifer', 'aquifer__material',
'aquifer__subtype') \
.prefetch_related('screen_set')
if not self.request.user.groups.filter(name=WELLS_EDIT_ROLE).exists():
qs = qs.exclude(well_publication_status='Unpublished')
# check if a point was supplied (note: actual filtering will be by
# the filter_backends classes). If so, add distances from the point.
point = self.request.query_params.get('point', None)
srid = self.request.query_params.get('srid', 4326)
radius = self.request.query_params.get('radius', None)
if point and radius:
try:
shape = GEOSGeometry(point, srid=int(srid))
radius = float(radius)
assert shape.geom_type == 'Point'
except (ValueError, AssertionError, GDALException, GEOSException):
raise ValidationError({
'point': 'Invalid point geometry. Use geojson geometry or WKT. Example: {"type": "Point", "coordinates": [-123,49]}'
})
else:
qs = qs.annotate(
distance=Cast(Distance('geom', shape), output_field=FloatField())
).order_by('distance')
# can also supply a comma separated list of wells
wells = self.request.query_params.get('wells', None)
if wells:
wells = wells.split(',')
for w in wells:
if not w.isnumeric():
raise ValidationError(detail='Invalid well')
wells = map(int, wells)
qs = qs.filter(well_tag_number__in=wells)
return qs
class WellDetail(WellDetailV1):
"""
Return well detail.
This view is open to all, and has no permissions.
"""
serializer_class = WellDetailSerializer
|
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetSessionTypesRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'program_i_ds': 'list[int]',
'online_only': 'bool',
'limit': 'int',
'offset': 'int'
}
attribute_map = {
'program_i_ds': 'ProgramIDs',
'online_only': 'OnlineOnly',
'limit': 'Limit',
'offset': 'Offset'
}
def __init__(self, program_i_ds=None, online_only=None, limit=None, offset=None): # noqa: E501
"""GetSessionTypesRequest - a model defined in Swagger""" # noqa: E501
self._program_i_ds = None
self._online_only = None
self._limit = None
self._offset = None
self.discriminator = None
if program_i_ds is not None:
self.program_i_ds = program_i_ds
if online_only is not None:
self.online_only = online_only
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
@property
def program_i_ds(self):
"""Gets the program_i_ds of this GetSessionTypesRequest. # noqa: E501
Filters results to session types that belong to one of the given program IDs. If omitted, all program IDs return. # noqa: E501
:return: The program_i_ds of this GetSessionTypesRequest. # noqa: E501
:rtype: list[int]
"""
return self._program_i_ds
@program_i_ds.setter
def program_i_ds(self, program_i_ds):
"""Sets the program_i_ds of this GetSessionTypesRequest.
Filters results to session types that belong to one of the given program IDs. If omitted, all program IDs return. # noqa: E501
:param program_i_ds: The program_i_ds of this GetSessionTypesRequest. # noqa: E501
:type: list[int]
"""
self._program_i_ds = program_i_ds
@property
def online_only(self):
"""Gets the online_only of this GetSessionTypesRequest. # noqa: E501
When `true`, indicates that only the session types that can be booked online should be returned.<br /> Default: **false** # noqa: E501
:return: The online_only of this GetSessionTypesRequest. # noqa: E501
:rtype: bool
"""
return self._online_only
@online_only.setter
def online_only(self, online_only):
"""Sets the online_only of this GetSessionTypesRequest.
When `true`, indicates that only the session types that can be booked online should be returned.<br /> Default: **false** # noqa: E501
:param online_only: The online_only of this GetSessionTypesRequest. # noqa: E501
:type: bool
"""
self._online_only = online_only
@property
def limit(self):
"""Gets the limit of this GetSessionTypesRequest. # noqa: E501
Number of results to include, defaults to 100 # noqa: E501
:return: The limit of this GetSessionTypesRequest. # noqa: E501
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this GetSessionTypesRequest.
Number of results to include, defaults to 100 # noqa: E501
:param limit: The limit of this GetSessionTypesRequest. # noqa: E501
:type: int
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this GetSessionTypesRequest. # noqa: E501
Page offset, defaults to 0. # noqa: E501
:return: The offset of this GetSessionTypesRequest. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this GetSessionTypesRequest.
Page offset, defaults to 0. # noqa: E501
:param offset: The offset of this GetSessionTypesRequest. # noqa: E501
:type: int
"""
self._offset = offset
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GetSessionTypesRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetSessionTypesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path, subprocess
from ..mesonlib import EnvironmentException, version_compare
from .compilers import (
GCC_STANDARD,
d_dmd_buildtype_args,
d_gdc_buildtype_args,
d_ldc_buildtype_args,
get_gcc_soname_args,
gnu_color_args,
Compiler,
CompilerArgs,
)
class DCompiler(Compiler):
def __init__(self, exelist, version, is_cross):
self.language = 'd'
super().__init__(exelist, version)
self.id = 'unknown'
self.is_cross = is_cross
def sanity_check(self, work_dir, environment):
source_name = os.path.join(work_dir, 'sanity.d')
output_name = os.path.join(work_dir, 'dtest')
with open(source_name, 'w') as ofile:
ofile.write('''void main() {
}
''')
pc = subprocess.Popen(self.exelist + self.get_output_args(output_name) + [source_name], cwd=work_dir)
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('D compiler %s can not compile programs.' % self.name_string())
if subprocess.call(output_name) != 0:
raise EnvironmentException('Executables created by D compiler %s are not runnable.' % self.name_string())
def needs_static_linker(self):
return True
def name_string(self):
return ' '.join(self.exelist)
def get_linker_exelist(self):
return self.exelist[:]
def get_preprocess_only_args(self):
return ['-E']
def get_compile_only_args(self):
return ['-c']
def depfile_for_object(self, objfile):
return objfile + '.' + self.get_depfile_suffix()
def get_depfile_suffix(self):
return 'dep'
def get_pic_args(self):
return ['-fPIC']
def get_std_shared_lib_link_args(self):
return ['-shared']
def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):
# FIXME: Make this work for Windows, MacOS and cross-compiling
return get_gcc_soname_args(GCC_STANDARD, prefix, shlib_name, suffix, path, soversion, is_shared_module)
def get_unittest_args(self):
return ['-unittest']
def get_buildtype_linker_args(self, buildtype):
return []
def get_std_exe_link_args(self):
return []
def build_rpath_args(self, build_dir, from_dir, rpath_paths, install_rpath):
# This method is to be used by LDC and DMD.
# GDC can deal with the verbatim flags.
if not rpath_paths and not install_rpath:
return []
paths = ':'.join([os.path.join(build_dir, p) for p in rpath_paths])
if len(paths) < len(install_rpath):
padding = 'X' * (len(install_rpath) - len(paths))
if not paths:
paths = padding
else:
paths = paths + ':' + padding
return ['-L-rpath={}'.format(paths)]
def _get_compiler_check_args(self, env, extra_args, dependencies, mode='compile'):
if extra_args is None:
extra_args = []
elif isinstance(extra_args, str):
extra_args = [extra_args]
if dependencies is None:
dependencies = []
elif not isinstance(dependencies, list):
dependencies = [dependencies]
# Collect compiler arguments
args = CompilerArgs(self)
for d in dependencies:
# Add compile flags needed by dependencies
args += d.get_compile_args()
if mode == 'link':
# Add link flags needed to find dependencies
args += d.get_link_args()
if mode == 'compile':
# Add DFLAGS from the env
args += env.coredata.external_args[self.language]
elif mode == 'link':
# Add LDFLAGS from the env
args += env.coredata.external_link_args[self.language]
# extra_args must override all other arguments, so we add them last
args += extra_args
return args
def compiles(self, code, env, extra_args=None, dependencies=None, mode='compile'):
args = self._get_compiler_check_args(env, extra_args, dependencies, mode)
with self.compile(code, args, mode) as p:
return p.returncode == 0
def has_multi_arguments(self, args, env):
return self.compiles('int i;\n', env, extra_args=args)
@classmethod
def translate_args_to_nongnu(cls, args):
dcargs = []
# Translate common arguments to flags the LDC/DMD compilers
# can understand.
# The flags might have been added by pkg-config files,
# and are therefore out of the user's control.
for arg in args:
if arg == '-pthread':
continue
if arg.startswith('-Wl,'):
linkargs = arg[arg.index(',') + 1:].split(',')
for la in linkargs:
dcargs.append('-L' + la.strip())
continue
elif arg.startswith('-l'):
# translate library link flag
dcargs.append('-L' + arg)
continue
elif arg.startswith('-L/') or arg.startswith('-L./'):
# we need to handle cases where -L is set by e.g. a pkg-config
# setting to select a linker search path. We can however not
# unconditionally prefix '-L' with '-L' because the user might
# have set this flag too to do what it is intended to for this
# compiler (pass flag through to the linker)
# Hence, we guess here whether the flag was intended to pass
# a linker search path.
dcargs.append('-L' + arg)
continue
dcargs.append(arg)
return dcargs
class GnuDCompiler(DCompiler):
def __init__(self, exelist, version, is_cross):
DCompiler.__init__(self, exelist, version, is_cross)
self.id = 'gcc'
default_warn_args = ['-Wall', '-Wdeprecated']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
self.base_options = ['b_colorout', 'b_sanitize', 'b_staticpic']
def get_colorout_args(self, colortype):
if version_compare(self.version, '>=4.9.0'):
return gnu_color_args[colortype][:]
return []
def get_dependency_gen_args(self, outtarget, outfile):
return ['-fmake-deps=' + outfile]
def get_output_args(self, target):
return ['-o', target]
def get_linker_output_args(self, target):
return ['-o', target]
def get_include_args(self, path, is_system):
return ['-I' + path]
def get_warn_args(self, level):
return self.warn_args[level]
def get_werror_args(self):
return ['-Werror']
def get_linker_search_args(self, dirname):
return ['-L' + dirname]
def get_buildtype_args(self, buildtype):
return d_gdc_buildtype_args[buildtype]
def build_rpath_args(self, build_dir, from_dir, rpath_paths, install_rpath):
return self.build_unix_rpath_args(build_dir, from_dir, rpath_paths, install_rpath)
def get_unittest_args(self):
return ['-funittest']
class LLVMDCompiler(DCompiler):
def __init__(self, exelist, version, is_cross):
DCompiler.__init__(self, exelist, version, is_cross)
self.id = 'llvm'
self.base_options = ['b_coverage', 'b_colorout']
def get_colorout_args(self, colortype):
if colortype == 'always':
return ['-enable-color']
return []
def get_dependency_gen_args(self, outtarget, outfile):
# LDC using the -deps flag returns a non-Makefile dependency-info file, which
# the backends can not use. So we disable this feature for now.
return []
def get_output_args(self, target):
return ['-of', target]
def get_linker_output_args(self, target):
return ['-of', target]
def get_include_args(self, path, is_system):
return ['-I' + path]
def get_warn_args(self, level):
if level == '2' or level == '3':
return ['-wi', '-dw']
else:
return ['-wi']
def get_werror_args(self):
return ['-w']
def get_coverage_args(self):
return ['-cov']
def get_buildtype_args(self, buildtype):
return d_ldc_buildtype_args[buildtype]
def get_pic_args(self):
return ['-relocation-model=pic']
def get_linker_search_args(self, dirname):
# -L is recognized as "add this to the search path" by the linker,
# while the compiler recognizes it as "pass to linker". So, the first
# -L is for the compiler, telling it to pass the second -L to the linker.
return ['-L-L' + dirname]
@classmethod
def unix_args_to_native(cls, args):
return cls.translate_args_to_nongnu(args)
class DmdDCompiler(DCompiler):
def __init__(self, exelist, version, is_cross):
DCompiler.__init__(self, exelist, version, is_cross)
self.id = 'dmd'
self.base_options = ['b_coverage', 'b_colorout']
def get_colorout_args(self, colortype):
if colortype == 'always':
return ['-color=on']
return []
def get_dependency_gen_args(self, outtarget, outfile):
# LDC using the -deps flag returns a non-Makefile dependency-info file, which
# the backends can not use. So we disable this feature for now.
return []
def get_output_args(self, target):
return ['-of' + target]
def get_werror_args(self):
return ['-w']
def get_linker_output_args(self, target):
return ['-of' + target]
def get_include_args(self, path, is_system):
return ['-I' + path]
def get_warn_args(self, level):
return ['-wi']
def get_coverage_args(self):
return ['-cov']
def get_linker_search_args(self, dirname):
# -L is recognized as "add this to the search path" by the linker,
# while the compiler recognizes it as "pass to linker". So, the first
# -L is for the compiler, telling it to pass the second -L to the linker.
return ['-L-L' + dirname]
def get_buildtype_args(self, buildtype):
return d_dmd_buildtype_args[buildtype]
def get_std_shared_lib_link_args(self):
return ['-shared', '-defaultlib=libphobos2.so']
@classmethod
def unix_args_to_native(cls, args):
return cls.translate_args_to_nongnu(args)
|
|
#!/usr/bin/env python
"""Tests for the hunt database api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import random
from future.utils import text_type
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_core.lib.util import compatibility
from grr_response_server import flow
from grr_response_server.databases import db
from grr_response_server.databases import db_test_utils
from grr_response_server.output_plugins import email_plugin
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import hunt_objects as rdf_hunt_objects
from grr_response_server.rdfvalues import objects as rdf_objects
from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin
class DatabaseTestHuntMixin(object):
"""An abstract class for testing db.Database implementations.
This mixin adds methods to test the handling of hunts.
"""
def _SetupHuntClientAndFlow(self,
client_id=None,
hunt_id=None,
flow_id=None,
**additional_flow_args):
client_id = db_test_utils.InitializeClient(self.db, client_id=client_id)
# Top-level hunt-induced flows should have hunt's id.
flow_id = flow_id or hunt_id
self.db.WriteClientMetadata(client_id, fleetspeak_enabled=False)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
parent_hunt_id=hunt_id,
create_time=rdfvalue.RDFDatetime.Now(),
**additional_flow_args)
self.db.WriteFlowObject(rdf_flow)
return client_id, flow_id
def testWritingAndReadingHuntObjectWorks(self):
then = rdfvalue.RDFDatetime.Now()
hunt_obj = rdf_hunt_objects.Hunt(creator="Foo", description="Lorem ipsum.")
self.db.WriteHuntObject(hunt_obj)
read_hunt_obj = self.db.ReadHuntObject(hunt_obj.hunt_id)
self.assertEqual(read_hunt_obj.creator, "Foo")
self.assertEqual(read_hunt_obj.description, "Lorem ipsum.")
self.assertGreater(read_hunt_obj.create_time, then)
self.assertGreater(read_hunt_obj.last_update_time, then)
def testHuntObjectCannotBeOverwritten(self):
hunt_id = "ABCDEF42"
hunt_obj_v1 = rdf_hunt_objects.Hunt(hunt_id=hunt_id, description="foo")
hunt_obj_v2 = rdf_hunt_objects.Hunt(hunt_id=hunt_id, description="bar")
hunt_obj_v2.hunt_id = hunt_obj_v1.hunt_id
self.db.WriteHuntObject(hunt_obj_v1)
with self.assertRaises(db.DuplicatedHuntError) as context:
self.db.WriteHuntObject(hunt_obj_v2)
self.assertEqual(context.exception.hunt_id, hunt_id)
def testHuntObjectCannotBeWrittenInNonPausedState(self):
hunt_object = rdf_hunt_objects.Hunt(
hunt_state=rdf_hunt_objects.Hunt.HuntState.STARTED)
with self.assertRaises(ValueError):
self.db.WriteHuntObject(hunt_object)
def testReadingNonExistentHuntObjectRaises(self):
with self.assertRaises(db.UnknownHuntError):
self.db.ReadHuntObject(rdf_hunt_objects.RandomHuntId())
def testUpdateHuntObjectRaisesIfHuntDoesNotExist(self):
with self.assertRaises(db.UnknownHuntError):
self.db.UpdateHuntObject(
rdf_hunt_objects.RandomHuntId(),
hunt_state=rdf_hunt_objects.Hunt.HuntState.STARTED)
def testUpdateHuntObjectCorrectlyUpdatesHuntObject(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
self.db.UpdateHuntObject(
hunt_obj.hunt_id,
duration=rdfvalue.Duration.From(1, rdfvalue.WEEKS),
client_rate=33,
client_limit=48,
hunt_state=rdf_hunt_objects.Hunt.HuntState.STOPPED,
hunt_state_comment="foo",
start_time=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(43),
num_clients_at_start_time=44)
updated_hunt_obj = self.db.ReadHuntObject(hunt_obj.hunt_id)
self.assertEqual(updated_hunt_obj.duration,
rdfvalue.Duration.From(1, rdfvalue.WEEKS))
self.assertEqual(updated_hunt_obj.client_rate, 33)
self.assertEqual(updated_hunt_obj.client_limit, 48)
self.assertEqual(updated_hunt_obj.hunt_state,
rdf_hunt_objects.Hunt.HuntState.STOPPED)
self.assertEqual(updated_hunt_obj.hunt_state_comment, "foo")
self.assertEqual(updated_hunt_obj.init_start_time,
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(43))
self.assertEqual(updated_hunt_obj.last_start_time,
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(43))
self.assertEqual(updated_hunt_obj.num_clients_at_start_time, 44)
def testUpdateHuntObjectCorrectlyUpdatesInitAndLastStartTime(self):
hunt_object = rdf_hunt_objects.Hunt(description="Lorem ipsum.")
self.db.WriteHuntObject(hunt_object)
timestamp_1 = rdfvalue.RDFDatetime.Now()
self.db.UpdateHuntObject(hunt_object.hunt_id, start_time=timestamp_1)
timestamp_2 = rdfvalue.RDFDatetime.Now()
self.db.UpdateHuntObject(hunt_object.hunt_id, start_time=timestamp_2)
updated_hunt_object = self.db.ReadHuntObject(hunt_object.hunt_id)
self.assertEqual(updated_hunt_object.init_start_time, timestamp_1)
self.assertEqual(updated_hunt_object.last_start_time, timestamp_2)
def testDeletingHuntObjectWorks(self):
hunt_obj = rdf_hunt_objects.Hunt()
self.db.WriteHuntObject(hunt_obj)
# This shouldn't raise.
self.db.ReadHuntObject(hunt_obj.hunt_id)
self.db.DeleteHuntObject(hunt_obj.hunt_id)
# The hunt is deleted: this should raise now.
with self.assertRaises(db.UnknownHuntError):
self.db.ReadHuntObject(hunt_obj.hunt_id)
def testReadHuntObjectsReturnsEmptyListWhenNoHunts(self):
self.assertEqual(self.db.ReadHuntObjects(offset=0, count=db.MAX_COUNT), [])
def _CreateMultipleHunts(self):
result = []
for i in range(10):
if i < 5:
creator = "user-a"
else:
creator = "user-b"
hunt_obj = rdf_hunt_objects.Hunt(
description="foo_%d" % i, creator=creator)
self.db.WriteHuntObject(hunt_obj)
result.append(self.db.ReadHuntObject(hunt_obj.hunt_id))
return result
def testReadHuntObjectsWithoutFiltersReadsAllHunts(self):
expected = self._CreateMultipleHunts()
got = self.db.ReadHuntObjects(0, db.MAX_COUNT)
self.assertListEqual(got, list(reversed(expected)))
def testReadHuntObjectsWithCreatorFilterIsAppliedCorrectly(self):
all_hunts = self._CreateMultipleHunts()
got = self.db.ReadHuntObjects(0, db.MAX_COUNT, with_creator="user-a")
self.assertListEqual(got, list(reversed(all_hunts[:5])))
got = self.db.ReadHuntObjects(0, db.MAX_COUNT, with_creator="user-b")
self.assertListEqual(got, list(reversed(all_hunts[5:])))
def testReadHuntObjectsCreatedAfterFilterIsAppliedCorrectly(self):
all_hunts = self._CreateMultipleHunts()
got = self.db.ReadHuntObjects(
0,
db.MAX_COUNT,
created_after=all_hunts[0].create_time -
rdfvalue.Duration.From(1, rdfvalue.SECONDS))
self.assertListEqual(got, list(reversed(all_hunts)))
got = self.db.ReadHuntObjects(
0, db.MAX_COUNT, created_after=all_hunts[2].create_time)
self.assertListEqual(got, list(reversed(all_hunts[3:])))
got = self.db.ReadHuntObjects(
0, db.MAX_COUNT, created_after=all_hunts[-1].create_time)
self.assertEmpty(got)
def testReadHuntObjectsWithDescriptionMatchFilterIsAppliedCorrectly(self):
all_hunts = self._CreateMultipleHunts()
got = self.db.ReadHuntObjects(
0, db.MAX_COUNT, with_description_match="foo_")
self.assertListEqual(got, list(reversed(all_hunts)))
got = self.db.ReadHuntObjects(
0, db.MAX_COUNT, with_description_match="blah")
self.assertEmpty(got)
got = self.db.ReadHuntObjects(
0, db.MAX_COUNT, with_description_match="foo_3")
self.assertListEqual(got, [all_hunts[3]])
def testReadHuntObjectsCombinationsOfFiltersAreAppliedCorrectly(self):
expected = self._CreateMultipleHunts()
self.DoFilterCombinationsAndOffsetCountTest(
self.db.ReadHuntObjects,
conditions=dict(
with_creator="user-a",
created_after=expected[2].create_time,
with_description_match="foo_4"),
error_desc="ReadHuntObjects")
def testListHuntObjectsReturnsEmptyListWhenNoHunts(self):
self.assertEqual(self.db.ListHuntObjects(offset=0, count=db.MAX_COUNT), [])
def testListHuntObjectsWithoutFiltersReadsAllHunts(self):
expected = [
rdf_hunt_objects.HuntMetadata.FromHunt(h)
for h in self._CreateMultipleHunts()
]
got = self.db.ListHuntObjects(0, db.MAX_COUNT)
self.assertListEqual(got, list(reversed(expected)))
def testListHuntObjectsWithCreatorFilterIsAppliedCorrectly(self):
all_hunts = [
rdf_hunt_objects.HuntMetadata.FromHunt(h)
for h in self._CreateMultipleHunts()
]
got = self.db.ListHuntObjects(0, db.MAX_COUNT, with_creator="user-a")
self.assertListEqual(got, list(reversed(all_hunts[:5])))
got = self.db.ListHuntObjects(0, db.MAX_COUNT, with_creator="user-b")
self.assertListEqual(got, list(reversed(all_hunts[5:])))
def testListHuntObjectsCreatedAfterFilterIsAppliedCorrectly(self):
all_hunts = [
rdf_hunt_objects.HuntMetadata.FromHunt(h)
for h in self._CreateMultipleHunts()
]
got = self.db.ListHuntObjects(
0,
db.MAX_COUNT,
created_after=all_hunts[0].create_time -
rdfvalue.Duration.From(1, rdfvalue.SECONDS))
self.assertListEqual(got, list(reversed(all_hunts)))
got = self.db.ListHuntObjects(
0, db.MAX_COUNT, created_after=all_hunts[2].create_time)
self.assertListEqual(got, list(reversed(all_hunts[3:])))
got = self.db.ListHuntObjects(
0, db.MAX_COUNT, created_after=all_hunts[-1].create_time)
self.assertEmpty(got)
def testListHuntObjectsWithDescriptionMatchFilterIsAppliedCorrectly(self):
all_hunts = [
rdf_hunt_objects.HuntMetadata.FromHunt(h)
for h in self._CreateMultipleHunts()
]
got = self.db.ListHuntObjects(
0, db.MAX_COUNT, with_description_match="foo_")
self.assertListEqual(got, list(reversed(all_hunts)))
got = self.db.ListHuntObjects(
0, db.MAX_COUNT, with_description_match="blah")
self.assertEmpty(got)
got = self.db.ListHuntObjects(
0, db.MAX_COUNT, with_description_match="foo_3")
self.assertListEqual(got, [all_hunts[3]])
def testListHuntObjectsCombinationsOfFiltersAreAppliedCorrectly(self):
expected = self._CreateMultipleHunts()
self.DoFilterCombinationsAndOffsetCountTest(
self.db.ListHuntObjects,
conditions=dict(
with_creator="user-a",
created_after=expected[2].create_time,
with_description_match="foo_4"),
error_desc="ListHuntObjects")
def testWritingAndReadingHuntOutputPluginsStatesWorks(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor(
plugin_name=compatibility.GetName(email_plugin.EmailOutputPlugin),
plugin_args=email_plugin.EmailOutputPluginArgs(emails_limit=42))
state_1 = rdf_flow_runner.OutputPluginState(
plugin_descriptor=plugin_descriptor, plugin_state={})
plugin_descriptor = rdf_output_plugin.OutputPluginDescriptor(
plugin_name=compatibility.GetName(email_plugin.EmailOutputPlugin),
plugin_args=email_plugin.EmailOutputPluginArgs(emails_limit=43))
state_2 = rdf_flow_runner.OutputPluginState(
plugin_descriptor=plugin_descriptor, plugin_state={})
written_states = [state_1, state_2]
self.db.WriteHuntOutputPluginsStates(hunt_obj.hunt_id, written_states)
read_states = self.db.ReadHuntOutputPluginsStates(hunt_obj.hunt_id)
self.assertEqual(read_states, written_states)
def testReadingHuntOutputPluginsReturnsThemInOrderOfWriting(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
states = []
for i in range(100):
states.append(
rdf_flow_runner.OutputPluginState(
plugin_descriptor=rdf_output_plugin.OutputPluginDescriptor(
plugin_name="DummyHuntOutputPlugin_%d" % i),
plugin_state={}))
random.shuffle(states)
self.db.WriteHuntOutputPluginsStates(hunt_obj.hunt_id, states)
read_states = self.db.ReadHuntOutputPluginsStates(hunt_obj.hunt_id)
self.assertEqual(read_states, states)
def testWritingHuntOutputStatesForZeroPlugins(self):
# Passing an empty list of states is always a no-op so this should not
# raise, even if the hunt does not exist.
self.db.WriteHuntOutputPluginsStates(rdf_hunt_objects.RandomHuntId(), [])
def testWritingHuntOutputStatesForUnknownHuntRaises(self):
state = rdf_flow_runner.OutputPluginState(
plugin_descriptor=rdf_output_plugin.OutputPluginDescriptor(
plugin_name="DummyHuntOutputPlugin1"),
plugin_state={})
with self.assertRaises(db.UnknownHuntError):
self.db.WriteHuntOutputPluginsStates(rdf_hunt_objects.RandomHuntId(),
[state])
def testReadingHuntOutputPluginsWithoutStates(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
res = self.db.ReadHuntOutputPluginsStates(hunt_obj.hunt_id)
self.assertEqual(res, [])
def testReadingHuntOutputStatesForUnknownHuntRaises(self):
with self.assertRaises(db.UnknownHuntError):
self.db.ReadHuntOutputPluginsStates(rdf_hunt_objects.RandomHuntId())
def testUpdatingHuntOutputStateForUnknownHuntRaises(self):
with self.assertRaises(db.UnknownHuntError):
self.db.UpdateHuntOutputPluginState(rdf_hunt_objects.RandomHuntId(),
0, lambda x: x)
def testUpdatingHuntOutputStateWorksCorrectly(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
state_1 = rdf_flow_runner.OutputPluginState(
plugin_descriptor=rdf_output_plugin.OutputPluginDescriptor(
plugin_name="DummyHuntOutputPlugin1"),
plugin_state={})
state_2 = rdf_flow_runner.OutputPluginState(
plugin_descriptor=rdf_output_plugin.OutputPluginDescriptor(
plugin_name="DummyHuntOutputPlugin2"),
plugin_state={})
self.db.WriteHuntOutputPluginsStates(hunt_obj.hunt_id, [state_1, state_2])
def Update(s):
s["foo"] = "bar"
return s
self.db.UpdateHuntOutputPluginState(hunt_obj.hunt_id, 0, Update)
states = self.db.ReadHuntOutputPluginsStates(hunt_obj.hunt_id)
self.assertEqual(states[0].plugin_state, {"foo": "bar"})
self.assertEqual(states[1].plugin_state, {})
self.db.UpdateHuntOutputPluginState(hunt_obj.hunt_id, 1, Update)
states = self.db.ReadHuntOutputPluginsStates(hunt_obj.hunt_id)
self.assertEqual(states[0].plugin_state, {"foo": "bar"})
self.assertEqual(states[1].plugin_state, {"foo": "bar"})
def testReadHuntLogEntriesReturnsEntryFromSingleHuntFlow(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(
client_id="C.12345678901234aa", hunt_id=hunt_obj.hunt_id)
self.db.WriteFlowLogEntries([
rdf_flow_objects.FlowLogEntry(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
message="blah")
])
hunt_log_entries = self.db.ReadHuntLogEntries(hunt_obj.hunt_id, 0, 10)
self.assertLen(hunt_log_entries, 1)
self.assertIsInstance(hunt_log_entries[0], rdf_flow_objects.FlowLogEntry)
self.assertEqual(hunt_log_entries[0].hunt_id, hunt_obj.hunt_id)
self.assertEqual(hunt_log_entries[0].client_id, client_id)
self.assertEqual(hunt_log_entries[0].flow_id, flow_id)
self.assertEqual(hunt_log_entries[0].message, "blah")
def _WriteNestedAndNonNestedLogEntries(self, hunt_obj):
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
self.db.WriteFlowLogEntries([
# Top-level hunt-induced flows should have the hunt's ID.
rdf_flow_objects.FlowLogEntry(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
message="blah_a"),
rdf_flow_objects.FlowLogEntry(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
message="blah_b")
])
for i in range(10):
_, nested_flow_id = self._SetupHuntClientAndFlow(
client_id=client_id,
parent_flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
flow_id=flow.RandomFlowId())
self.db.WriteFlowLogEntries([
rdf_flow_objects.FlowLogEntry(
client_id=client_id,
flow_id=nested_flow_id,
hunt_id=hunt_obj.hunt_id,
message="blah_a_%d" % i),
rdf_flow_objects.FlowLogEntry(
client_id=client_id,
flow_id=nested_flow_id,
hunt_id=hunt_obj.hunt_id,
message="blah_b_%d" % i)
])
def testReadHuntLogEntriesIgnoresNestedFlows(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
self._WriteNestedAndNonNestedLogEntries(hunt_obj)
hunt_log_entries = self.db.ReadHuntLogEntries(hunt_obj.hunt_id, 0, 10)
self.assertLen(hunt_log_entries, 2)
self.assertEqual(hunt_log_entries[0].message, "blah_a")
self.assertEqual(hunt_log_entries[1].message, "blah_b")
def testCountHuntLogEntriesIgnoresNestedFlows(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
self._WriteNestedAndNonNestedLogEntries(hunt_obj)
num_hunt_log_entries = self.db.CountHuntLogEntries(hunt_obj.hunt_id)
self.assertEqual(num_hunt_log_entries, 2)
def _WriteHuntLogEntries(self, msg="blah"):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
for i in range(10):
client_id, flow_id = self._SetupHuntClientAndFlow(
client_id="C.12345678901234a%d" % i, hunt_id=hunt_obj.hunt_id)
self.db.WriteFlowLogEntries([
rdf_flow_objects.FlowLogEntry(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
message="%s%d" % (msg, i))
])
return hunt_obj
def testReadHuntLogEntriesReturnsEntryFromMultipleHuntFlows(self):
hunt_obj = self._WriteHuntLogEntries()
hunt_log_entries = self.db.ReadHuntLogEntries(hunt_obj.hunt_id, 0, 100)
self.assertLen(hunt_log_entries, 10)
# Make sure messages are returned in timestamps-ascending order.
for i, e in enumerate(hunt_log_entries):
self.assertEqual(e.message, "blah%d" % i)
def testReadHuntLogEntriesCorrectlyAppliesOffsetAndCountFilters(self):
hunt_obj = self._WriteHuntLogEntries()
for i in range(10):
hunt_log_entries = self.db.ReadHuntLogEntries(hunt_obj.hunt_id, i, 1)
self.assertLen(hunt_log_entries, 1)
self.assertEqual(hunt_log_entries[0].message, "blah%d" % i)
def testReadHuntLogEntriesCorrectlyAppliesWithSubstringFilter(self):
hunt_obj = self._WriteHuntLogEntries()
hunt_log_entries = self.db.ReadHuntLogEntries(
hunt_obj.hunt_id, 0, 100, with_substring="foo")
self.assertEmpty(hunt_log_entries)
hunt_log_entries = self.db.ReadHuntLogEntries(
hunt_obj.hunt_id, 0, 100, with_substring="blah")
self.assertLen(hunt_log_entries, 10)
# Make sure messages are returned in timestamps-ascending order.
for i, e in enumerate(hunt_log_entries):
self.assertEqual(e.message, "blah%d" % i)
hunt_log_entries = self.db.ReadHuntLogEntries(
hunt_obj.hunt_id, 0, 100, with_substring="blah1")
self.assertLen(hunt_log_entries, 1)
self.assertEqual(hunt_log_entries[0].message, "blah1")
def testReadHuntLogEntriesSubstringFilterIsCorrectlyEscaped(self):
hunt_obj = self._WriteHuntLogEntries("ABC%1")
hunt_log_entries = self.db.ReadHuntLogEntries(
hunt_obj.hunt_id, 0, 100, with_substring="BC%1")
self.assertLen(hunt_log_entries, 10)
hunt_log_entries = self.db.ReadHuntLogEntries(
hunt_obj.hunt_id, 0, 100, with_substring="B%1")
self.assertLen(hunt_log_entries, 0)
def testReadHuntLogEntriesCorrectlyAppliesCombinationOfFilters(self):
hunt_obj = self._WriteHuntLogEntries()
hunt_log_entries = self.db.ReadHuntLogEntries(
hunt_obj.hunt_id, 0, 1, with_substring="blah")
self.assertLen(hunt_log_entries, 1)
self.assertEqual(hunt_log_entries[0].message, "blah0")
def testCountHuntLogEntriesReturnsCorrectHuntLogEntriesCount(self):
hunt_obj = self._WriteHuntLogEntries()
num_entries = self.db.CountHuntLogEntries(hunt_obj.hunt_id)
self.assertEqual(num_entries, 10)
def _WriteHuntResults(self, sample_results=None):
for r in sample_results:
self.db.WriteFlowResults([r])
# Update num_replies_sent for all flows referenced in sample_results:
# in case the DB implementation relies on this data when
# counting results.
results_per_flow = collections.Counter()
for r in sample_results:
results_per_flow[(r.client_id, r.flow_id)] += 1
for (client_id, flow_id), delta in results_per_flow.items():
f_obj = self.db.ReadFlowObject(client_id, flow_id)
f_obj.num_replies_sent += delta
self.db.UpdateFlow(client_id, flow_id, flow_obj=f_obj)
def _SampleSingleTypeHuntResults(self,
client_id=None,
flow_id=None,
hunt_id=None,
serial_number=None,
count=10):
self.assertIsNotNone(client_id)
self.assertIsNotNone(flow_id)
self.assertIsNotNone(hunt_id)
res = []
for i in range(count):
res.append(
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_id,
tag="tag_%d" % i,
payload=rdf_client.ClientSummary(
client_id=client_id,
system_manufacturer="manufacturer_%d" % i,
serial_number=serial_number,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i))))
return res
def _SampleTwoTypeHuntResults(self,
client_id=None,
flow_id=None,
hunt_id=None,
serial_number=None,
count_per_type=5,
timestamp_start=10):
self.assertIsNotNone(client_id)
self.assertIsNotNone(flow_id)
self.assertIsNotNone(hunt_id)
res = []
for i in range(count_per_type):
res.append(
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_id,
tag="tag_%d" % i,
payload=rdf_client.ClientSummary(
client_id=client_id,
system_manufacturer="manufacturer_%d" % i,
serial_number=serial_number,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
timestamp_start + i))))
for i in range(count_per_type):
res.append(
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_id,
tag="tag_%d" % i,
payload=rdf_client.ClientCrash(
client_id=client_id,
timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
timestamp_start + i))))
return res
def testReadHuntResultsReadsSingleResultOfSingleType(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
sample_results = self._SampleSingleTypeHuntResults(
client_id=client_id, flow_id=flow_id, hunt_id=hunt_obj.hunt_id, count=1)
self._WriteHuntResults(sample_results)
results = self.db.ReadHuntResults(hunt_obj.hunt_id, 0, 10)
self.assertLen(results, 1)
self.assertEqual(results[0].hunt_id, hunt_obj.hunt_id)
self.assertEqual(results[0].payload, sample_results[0].payload)
def testReadHuntResultsReadsMultipleResultOfSingleType(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
sample_results = self._SampleSingleTypeHuntResults(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
count=10)
self._WriteHuntResults(sample_results)
results = self.db.ReadHuntResults(hunt_obj.hunt_id, 0, 1000)
self.assertLen(results, 10)
for i in range(10):
self.assertEqual(results[i].hunt_id, hunt_obj.hunt_id)
self.assertEqual(results[i].payload, sample_results[i].payload)
def testReadHuntResultsReadsMultipleResultOfMultipleTypes(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id_1, flow_id_1 = self._SetupHuntClientAndFlow(
hunt_id=hunt_obj.hunt_id)
sample_results_1 = self._SampleTwoTypeHuntResults(
client_id=client_id_1, flow_id=flow_id_1, hunt_id=hunt_obj.hunt_id)
self._WriteHuntResults(sample_results_1)
client_id_2, flow_id_2 = self._SetupHuntClientAndFlow(
hunt_id=hunt_obj.hunt_id)
sample_results_2 = self._SampleTwoTypeHuntResults(
client_id=client_id_2, flow_id=flow_id_2, hunt_id=hunt_obj.hunt_id)
self._WriteHuntResults(sample_results_2)
sample_results = sample_results_1 + sample_results_2
results = self.db.ReadHuntResults(hunt_obj.hunt_id, 0, 1000)
self.assertLen(results, len(sample_results))
self.assertListEqual([i.payload for i in results],
[i.payload for i in sample_results])
def testReadHuntResultsCorrectlyAppliedOffsetAndCountFilters(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
sample_results = []
for i in range(10):
client_id, flow_id = self._SetupHuntClientAndFlow(
hunt_id=hunt_obj.hunt_id)
results = self._SampleSingleTypeHuntResults(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
count=1)
sample_results.extend(results)
self._WriteHuntResults(results)
for l in range(1, 11):
for i in range(10):
results = self.db.ReadHuntResults(hunt_obj.hunt_id, i, l)
expected = sample_results[i:i + l]
result_payloads = [x.payload for x in results]
expected_payloads = [x.payload for x in expected]
self.assertEqual(
result_payloads, expected_payloads,
"Results differ from expected (from %d, size %d): %s vs %s" %
(i, l, result_payloads, expected_payloads))
def testReadHuntResultsCorrectlyAppliesWithTagFilter(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
sample_results = self._SampleSingleTypeHuntResults(
client_id=client_id, flow_id=flow_id, hunt_id=hunt_obj.hunt_id)
self._WriteHuntResults(sample_results)
results = self.db.ReadHuntResults(hunt_obj.hunt_id, 0, 100, with_tag="blah")
self.assertFalse(results)
results = self.db.ReadHuntResults(hunt_obj.hunt_id, 0, 100, with_tag="tag")
self.assertFalse(results)
results = self.db.ReadHuntResults(
hunt_obj.hunt_id, 0, 100, with_tag="tag_1")
self.assertEqual([i.payload for i in results],
[i.payload for i in sample_results if i.tag == "tag_1"])
def testReadHuntResultsCorrectlyAppliesWithTypeFilter(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
sample_results = []
for i in range(10):
client_id, flow_id = self._SetupHuntClientAndFlow(
hunt_id=hunt_obj.hunt_id)
results = self._SampleTwoTypeHuntResults(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
count_per_type=1)
sample_results.extend(results)
self._WriteHuntResults(results)
results = self.db.ReadHuntResults(
hunt_obj.hunt_id,
0,
100,
with_type=compatibility.GetName(rdf_client.ClientInformation))
self.assertFalse(results)
results = self.db.ReadHuntResults(
hunt_obj.hunt_id,
0,
100,
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertCountEqual(
[i.payload for i in results],
[
i.payload
for i in sample_results
if isinstance(i.payload, rdf_client.ClientSummary)
],
)
def testReadHuntResultsCorrectlyAppliesWithSubstringFilter(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
sample_results = self._SampleSingleTypeHuntResults(
client_id=client_id, flow_id=flow_id, hunt_id=hunt_obj.hunt_id)
self._WriteHuntResults(sample_results)
results = self.db.ReadHuntResults(
hunt_obj.hunt_id, 0, 100, with_substring="blah")
self.assertEmpty(results)
results = self.db.ReadHuntResults(
hunt_obj.hunt_id, 0, 100, with_substring="manufacturer")
self.assertEqual(
[i.payload for i in results],
[i.payload for i in sample_results],
)
results = self.db.ReadHuntResults(
hunt_obj.hunt_id, 0, 100, with_substring="manufacturer_1")
self.assertEqual([i.payload for i in results], [sample_results[1].payload])
def testReadHuntResultsSubstringFilterIsCorrectlyEscaped(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
sample_results = self._SampleSingleTypeHuntResults(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
serial_number="ABC%123")
self._WriteHuntResults(sample_results)
results = self.db.ReadHuntResults(
hunt_obj.hunt_id, 0, 100, with_substring="ABC%123")
self.assertLen(results, 10)
results = self.db.ReadHuntResults(
hunt_obj.hunt_id, 0, 100, with_substring="AB%23")
self.assertLen(results, 0)
def testReadHuntResultsCorrectlyAppliesVariousCombinationsOfFilters(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
sample_results = []
for i in range(10):
client_id, flow_id = self._SetupHuntClientAndFlow(
hunt_id=hunt_obj.hunt_id)
results = self._SampleTwoTypeHuntResults(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
count_per_type=5)
sample_results.extend(results)
self._WriteHuntResults(results)
tags = {
None: list(sample_results),
"tag_1": [s for s in sample_results if s.tag == "tag_1"]
}
substrings = {
None:
list(sample_results),
"manufacturer": [
s for s in sample_results
if "manufacturer" in getattr(s.payload, "system_manufacturer", "")
],
"manufacturer_1": [
s for s in sample_results
if "manufacturer_1" in getattr(s.payload, "system_manufacturer", "")
]
}
types = {
None:
list(sample_results),
compatibility.GetName(rdf_client.ClientSummary): [
s for s in sample_results
if isinstance(s.payload, rdf_client.ClientSummary)
]
}
for tag_value, tag_expected in tags.items():
for substring_value, substring_expected in substrings.items():
for type_value, type_expected in types.items():
expected = [
e for e in tag_expected
if e in substring_expected and e in type_expected
]
results = self.db.ReadHuntResults(
hunt_obj.hunt_id,
0,
100,
with_tag=tag_value,
with_type=type_value,
with_substring=substring_value)
self.assertCountEqual(
[i.payload for i in expected], [i.payload for i in results],
"Result items do not match for "
"(tag=%s, type=%s, substring=%s): %s vs %s" %
(tag_value, type_value, substring_value, expected, results))
def testReadHuntResultsReturnsPayloadWithMissingTypeAsSpecialValue(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
sample_results = self._SampleSingleTypeHuntResults(
client_id=client_id, flow_id=flow_id, hunt_id=hunt_obj.hunt_id)
self._WriteHuntResults(sample_results)
type_name = compatibility.GetName(rdf_client.ClientSummary)
try:
cls = rdfvalue.RDFValue.classes.pop(type_name)
results = self.db.ReadHuntResults(hunt_obj.hunt_id, 0, 100)
finally:
rdfvalue.RDFValue.classes[type_name] = cls
self.assertLen(sample_results, len(results))
for r in results:
self.assertTrue(
isinstance(r.payload, rdf_objects.SerializedValueOfUnrecognizedType))
self.assertEqual(r.payload.type_name, type_name)
def testCountHuntResultsReturnsCorrectResultsCount(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
sample_results = self._SampleSingleTypeHuntResults(
client_id=client_id, flow_id=flow_id, hunt_id=hunt_obj.hunt_id)
self._WriteHuntResults(sample_results)
num_results = self.db.CountHuntResults(hunt_obj.hunt_id)
self.assertEqual(num_results, len(sample_results))
def testCountHuntResultsCorrectlyAppliesWithTagFilter(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
sample_results = self._SampleSingleTypeHuntResults(
client_id=client_id, flow_id=flow_id, hunt_id=hunt_obj.hunt_id)
self._WriteHuntResults(sample_results)
num_results = self.db.CountHuntResults(hunt_obj.hunt_id, with_tag="blah")
self.assertEqual(num_results, 0)
num_results = self.db.CountHuntResults(hunt_obj.hunt_id, with_tag="tag_1")
self.assertEqual(num_results, 1)
def testCountHuntResultsCorrectlyAppliesWithTypeFilter(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
sample_results = []
for _ in range(10):
client_id, flow_id = self._SetupHuntClientAndFlow(
hunt_id=hunt_obj.hunt_id)
results = self._SampleTwoTypeHuntResults(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
count_per_type=1)
sample_results.extend(results)
self._WriteHuntResults(results)
num_results = self.db.CountHuntResults(
hunt_obj.hunt_id,
with_type=compatibility.GetName(rdf_client.ClientInformation))
self.assertEqual(num_results, 0)
num_results = self.db.CountHuntResults(
hunt_obj.hunt_id,
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertEqual(num_results, 10)
num_results = self.db.CountHuntResults(
hunt_obj.hunt_id,
with_type=compatibility.GetName(rdf_client.ClientCrash))
self.assertEqual(num_results, 10)
def testCountHuntResultsCorrectlyAppliesWithTagAndWithTypeFilters(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
sample_results = []
for _ in range(10):
client_id, flow_id = self._SetupHuntClientAndFlow(
hunt_id=hunt_obj.hunt_id)
results = self._SampleTwoTypeHuntResults(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
count_per_type=5)
sample_results.extend(results)
self._WriteHuntResults(results)
num_results = self.db.CountHuntResults(
hunt_obj.hunt_id,
with_tag="tag_1",
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertEqual(num_results, 10)
def testCountHuntResultsCorrectlyAppliesWithTimestampFilter(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
sample_results = []
for _ in range(10):
client_id, flow_id = self._SetupHuntClientAndFlow(
hunt_id=hunt_obj.hunt_id)
sample_results = self._SampleSingleTypeHuntResults(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
count=10)
self._WriteHuntResults(sample_results[:5])
self._WriteHuntResults(sample_results[5:])
hunt_results = self.db.ReadHuntResults(hunt_obj.hunt_id, 0, 10)
for hr in hunt_results:
self.assertEqual([hr],
self.db.ReadHuntResults(
hunt_obj.hunt_id, 0, 10,
with_timestamp=hr.timestamp))
def testCountHuntResultsByTypeGroupsResultsCorrectly(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
results = self._SampleTwoTypeHuntResults(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
count_per_type=5)
self._WriteHuntResults(results)
counts = self.db.CountHuntResultsByType(hunt_obj.hunt_id)
for key in counts:
self.assertIsInstance(key, text_type)
self.assertEqual(
counts, {
compatibility.GetName(rdf_client.ClientSummary): 5,
compatibility.GetName(rdf_client.ClientCrash): 5
})
def testReadHuntFlowsReturnsEmptyListWhenNoFlows(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
self.assertEmpty(self.db.ReadHuntFlows(hunt_obj.hunt_id, 0, 10))
def testReadHuntFlowsReturnsAllHuntFlowsWhenNoFilterCondition(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
_, flow_id_1 = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
_, flow_id_2 = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
flows = self.db.ReadHuntFlows(hunt_obj.hunt_id, 0, 10)
self.assertCountEqual([f.flow_id for f in flows], [flow_id_1, flow_id_2])
def _BuildFilterConditionExpectations(self, hunt_obj):
_, running_flow_id = self._SetupHuntClientAndFlow(
flow_state=rdf_flow_objects.Flow.FlowState.RUNNING,
hunt_id=hunt_obj.hunt_id)
_, succeeded_flow_id = self._SetupHuntClientAndFlow(
flow_state=rdf_flow_objects.Flow.FlowState.FINISHED,
hunt_id=hunt_obj.hunt_id)
_, failed_flow_id = self._SetupHuntClientAndFlow(
flow_state=rdf_flow_objects.Flow.FlowState.ERROR,
hunt_id=hunt_obj.hunt_id)
_, crashed_flow_id = self._SetupHuntClientAndFlow(
flow_state=rdf_flow_objects.Flow.FlowState.CRASHED,
hunt_id=hunt_obj.hunt_id)
client_id, flow_with_results_id = self._SetupHuntClientAndFlow(
hunt_id=hunt_obj.hunt_id)
sample_results = self._SampleSingleTypeHuntResults(
client_id=client_id,
flow_id=flow_with_results_id,
hunt_id=hunt_obj.hunt_id)
self._WriteHuntResults(sample_results)
return {
db.HuntFlowsCondition.UNSET: [
running_flow_id, succeeded_flow_id, failed_flow_id, crashed_flow_id,
flow_with_results_id
],
db.HuntFlowsCondition.FAILED_FLOWS_ONLY: [failed_flow_id],
db.HuntFlowsCondition.SUCCEEDED_FLOWS_ONLY: [succeeded_flow_id],
db.HuntFlowsCondition.COMPLETED_FLOWS_ONLY: [
failed_flow_id, succeeded_flow_id
],
db.HuntFlowsCondition.FLOWS_IN_PROGRESS_ONLY: [running_flow_id],
db.HuntFlowsCondition.CRASHED_FLOWS_ONLY: [crashed_flow_id],
}
def testReadHuntFlowsAppliesFilterConditionCorrectly(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
expectations = self._BuildFilterConditionExpectations(hunt_obj)
for filter_condition, expected in expectations.items():
results = self.db.ReadHuntFlows(
hunt_obj.hunt_id, 0, 10, filter_condition=filter_condition)
results_ids = [r.flow_id for r in results]
self.assertCountEqual(
results_ids, expected, "Result items do not match for "
"(filter_condition=%d): %s vs %s" %
(filter_condition, expected, results_ids))
def testReadHuntFlowsCorrectlyAppliesOffsetAndCountFilters(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
expectations = self._BuildFilterConditionExpectations(hunt_obj)
for filter_condition, _ in expectations.items():
full_results = self.db.ReadHuntFlows(
hunt_obj.hunt_id, 0, 1024, filter_condition=filter_condition)
full_results_ids = [r.flow_id for r in full_results]
for index in range(0, 2):
for count in range(1, 3):
results = self.db.ReadHuntFlows(
hunt_obj.hunt_id, index, count, filter_condition=filter_condition)
results_ids = [r.flow_id for r in results]
expected_ids = full_results_ids[index:index + count]
self.assertCountEqual(
results_ids, expected_ids, "Result items do not match for "
"(filter_condition=%d, index=%d, count=%d): %s vs %s" %
(filter_condition, index, count, expected_ids, results_ids))
def testReadHuntFlowsIgnoresSubflows(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
hunt_id = hunt_obj.hunt_id
_, flow_id = self._SetupHuntClientAndFlow(
hunt_id=hunt_id, flow_state=rdf_flow_objects.Flow.FlowState.RUNNING)
# Whatever state the subflow is in, it should be ignored.
self._SetupHuntClientAndFlow(
hunt_id=hunt_id,
flow_id=flow.RandomFlowId(),
parent_flow_id=flow_id,
flow_state=rdf_flow_objects.Flow.FlowState.ERROR)
self._SetupHuntClientAndFlow(
hunt_id=hunt_id,
flow_id=flow.RandomFlowId(),
parent_flow_id=flow_id,
flow_state=rdf_flow_objects.Flow.FlowState.FINISHED)
self._SetupHuntClientAndFlow(
hunt_id=hunt_id,
flow_id=flow.RandomFlowId(),
parent_flow_id=flow_id,
flow_state=rdf_flow_objects.Flow.FlowState.RUNNING)
for state, expceted_results in [
(db.HuntFlowsCondition.COMPLETED_FLOWS_ONLY, 0),
(db.HuntFlowsCondition.SUCCEEDED_FLOWS_ONLY, 0),
(db.HuntFlowsCondition.FLOWS_IN_PROGRESS_ONLY, 1)
]:
results = self.db.ReadHuntFlows(hunt_id, 0, 10, filter_condition=state)
self.assertLen(results, expceted_results)
def testCountHuntFlowsIgnoresSubflows(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
hunt_id = hunt_obj.hunt_id
_, flow_id = self._SetupHuntClientAndFlow(
hunt_id=hunt_id, flow_state=rdf_flow_objects.Flow.FlowState.RUNNING)
# Whatever state the subflow is in, it should be ignored.
self._SetupHuntClientAndFlow(
hunt_id=hunt_id,
flow_id=flow.RandomFlowId(),
parent_flow_id=flow_id,
flow_state=rdf_flow_objects.Flow.FlowState.ERROR)
self._SetupHuntClientAndFlow(
hunt_id=hunt_id,
flow_id=flow.RandomFlowId(),
parent_flow_id=flow_id,
flow_state=rdf_flow_objects.Flow.FlowState.FINISHED)
self._SetupHuntClientAndFlow(
hunt_id=hunt_id,
flow_id=flow.RandomFlowId(),
parent_flow_id=flow_id,
flow_state=rdf_flow_objects.Flow.FlowState.RUNNING)
self.assertEqual(self.db.CountHuntFlows(hunt_id), 1)
def testCountHuntFlowsReturnsEmptyListWhenNoFlows(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
self.assertEqual(self.db.CountHuntFlows(hunt_obj.hunt_id), 0)
def testCountHuntFlowsReturnsAllHuntFlowsWhenNoFilterCondition(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
self.assertEqual(self.db.CountHuntFlows(hunt_obj.hunt_id), 2)
def testCountHuntFlowsAppliesFilterConditionCorrectly(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
expectations = self._BuildFilterConditionExpectations(hunt_obj)
for filter_condition, expected in expectations.items():
result = self.db.CountHuntFlows(
hunt_obj.hunt_id, filter_condition=filter_condition)
self.assertEqual(
result, len(expected), "Result count does not match for "
"(filter_condition=%d): %d vs %d" %
(filter_condition, len(expected), result))
def testReadHuntCountersForNewHunt(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
hunt_counters = self.db.ReadHuntCounters(hunt_obj.hunt_id)
self.assertEqual(hunt_counters.num_clients, 0)
self.assertEqual(hunt_counters.num_successful_clients, 0)
self.assertEqual(hunt_counters.num_failed_clients, 0)
self.assertEqual(hunt_counters.num_clients_with_results, 0)
self.assertEqual(hunt_counters.num_crashed_clients, 0)
self.assertEqual(hunt_counters.num_results, 0)
self.assertEqual(hunt_counters.total_cpu_seconds, 0)
self.assertEqual(hunt_counters.total_network_bytes_sent, 0)
def testReadHuntCountersCorrectlyAggregatesResultsAmongDifferentFlows(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
expectations = self._BuildFilterConditionExpectations(hunt_obj)
hunt_counters = self.db.ReadHuntCounters(hunt_obj.hunt_id)
self.assertEqual(hunt_counters.num_clients,
len(expectations[db.HuntFlowsCondition.UNSET]))
self.assertEqual(
hunt_counters.num_successful_clients,
len(expectations[db.HuntFlowsCondition.SUCCEEDED_FLOWS_ONLY]))
self.assertEqual(hunt_counters.num_failed_clients,
len(expectations[db.HuntFlowsCondition.FAILED_FLOWS_ONLY]))
# _BuildFilterConditionExpectations writes 10 sample results for one client.
self.assertEqual(hunt_counters.num_clients_with_results, 1)
self.assertEqual(
hunt_counters.num_crashed_clients,
len(expectations[db.HuntFlowsCondition.CRASHED_FLOWS_ONLY]))
# _BuildFilterConditionExpectations writes 10 sample results.
self.assertEqual(hunt_counters.num_results, 10)
self.assertEqual(hunt_counters.total_cpu_seconds, 0)
self.assertEqual(hunt_counters.total_network_bytes_sent, 0)
# Check that after adding a flow with resource metrics, total counters
# get updated.
self._SetupHuntClientAndFlow(
flow_state=rdf_flow_objects.Flow.FlowState.FINISHED,
cpu_time_used=rdf_client_stats.CpuSeconds(
user_cpu_time=4.5, system_cpu_time=10),
network_bytes_sent=42,
hunt_id=hunt_obj.hunt_id)
hunt_counters = self.db.ReadHuntCounters(hunt_obj.hunt_id)
self.assertAlmostEqual(hunt_counters.total_cpu_seconds, 14.5)
self.assertEqual(hunt_counters.total_network_bytes_sent, 42)
def testReadHuntClientResourcesStatsIgnoresSubflows(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(
flow_state=rdf_flow_objects.Flow.FlowState.FINISHED,
cpu_time_used=rdf_client_stats.CpuSeconds(
user_cpu_time=100, system_cpu_time=200),
network_bytes_sent=300,
hunt_id=hunt_obj.hunt_id)
# Create a subflow that used some resources too. This resource usage is
# already accounted for in the parent flow so the overall hunt resource
# usage should ignore those numbers.
sub_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id="12345678",
parent_flow_id=flow_id,
parent_hunt_id=hunt_obj.hunt_id,
create_time=rdfvalue.RDFDatetime.Now(),
cpu_time_used=rdf_client_stats.CpuSeconds(
user_cpu_time=10, system_cpu_time=20),
network_bytes_sent=30)
self.db.WriteFlowObject(sub_flow)
usage_stats = self.db.ReadHuntClientResourcesStats(hunt_obj.hunt_id)
network_bins = usage_stats.network_bytes_sent_stats.histogram.bins
user_cpu_bins = usage_stats.user_cpu_stats.histogram.bins
system_cpu_bins = usage_stats.system_cpu_stats.histogram.bins
self.assertEqual(usage_stats.network_bytes_sent_stats.sum, 300)
self.assertEqual(sum([b.num for b in network_bins]), 1)
self.assertEqual(usage_stats.system_cpu_stats.sum, 200)
self.assertEqual(sum([b.num for b in system_cpu_bins]), 1)
self.assertEqual(usage_stats.user_cpu_stats.sum, 100)
self.assertEqual(sum([b.num for b in user_cpu_bins]), 1)
self.assertLen(usage_stats.worst_performers, 1)
def testReadHuntClientResourcesStatsCorrectlyAggregatesData(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
flow_data = []
expected_user_cpu_histogram = rdf_stats.StatsHistogram.FromBins(
rdf_stats.ClientResourcesStats.CPU_STATS_BINS)
expected_system_cpu_histogram = rdf_stats.StatsHistogram.FromBins(
rdf_stats.ClientResourcesStats.CPU_STATS_BINS)
expected_network_histogram = rdf_stats.StatsHistogram.FromBins(
rdf_stats.ClientResourcesStats.NETWORK_STATS_BINS)
for i in range(10):
user_cpu_time = 4.5 + i
system_cpu_time = 10 + i * 2
network_bytes_sent = 42 + i * 3
client_id, flow_id = self._SetupHuntClientAndFlow(
flow_state=rdf_flow_objects.Flow.FlowState.FINISHED,
cpu_time_used=rdf_client_stats.CpuSeconds(
user_cpu_time=user_cpu_time, system_cpu_time=system_cpu_time),
network_bytes_sent=network_bytes_sent,
hunt_id=hunt_obj.hunt_id)
expected_user_cpu_histogram.RegisterValue(user_cpu_time)
expected_system_cpu_histogram.RegisterValue(system_cpu_time)
expected_network_histogram.RegisterValue(network_bytes_sent)
flow_data.append((client_id, flow_id, (user_cpu_time, system_cpu_time,
network_bytes_sent)))
usage_stats = self.db.ReadHuntClientResourcesStats(hunt_obj.hunt_id)
self.assertEqual(usage_stats.user_cpu_stats.num, 10)
self.assertAlmostEqual(usage_stats.user_cpu_stats.mean, 9)
self.assertAlmostEqual(usage_stats.user_cpu_stats.stddev, 2.87228, 5)
self.assertLen(usage_stats.user_cpu_stats.histogram.bins,
len(expected_user_cpu_histogram.bins))
for b, model_b in zip(usage_stats.user_cpu_stats.histogram.bins,
expected_user_cpu_histogram.bins):
self.assertAlmostEqual(b.range_max_value, model_b.range_max_value)
self.assertEqual(b.num, model_b.num)
self.assertEqual(usage_stats.system_cpu_stats.num, 10)
self.assertAlmostEqual(usage_stats.system_cpu_stats.mean, 19)
self.assertAlmostEqual(usage_stats.system_cpu_stats.stddev, 5.74456, 5)
self.assertLen(usage_stats.system_cpu_stats.histogram.bins,
len(expected_system_cpu_histogram.bins))
for b, model_b in zip(usage_stats.system_cpu_stats.histogram.bins,
expected_system_cpu_histogram.bins):
self.assertAlmostEqual(b.range_max_value, model_b.range_max_value)
self.assertEqual(b.num, model_b.num)
self.assertEqual(usage_stats.network_bytes_sent_stats.num, 10)
self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.mean, 55.5)
self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.stddev, 8.6168,
4)
self.assertLen(usage_stats.network_bytes_sent_stats.histogram.bins,
len(expected_network_histogram.bins))
for b, model_b in zip(usage_stats.network_bytes_sent_stats.histogram.bins,
expected_network_histogram.bins):
self.assertAlmostEqual(b.range_max_value, model_b.range_max_value)
self.assertEqual(b.num, model_b.num)
self.assertLen(usage_stats.worst_performers, 10)
for worst_performer, flow_d in zip(usage_stats.worst_performers,
reversed(flow_data)):
client_id, flow_id, (user_cpu_time, system_cpu_time,
network_bytes_sent) = flow_d
self.assertEqual(worst_performer.client_id.Basename(), client_id)
self.assertAlmostEqual(worst_performer.cpu_usage.user_cpu_time,
user_cpu_time)
self.assertAlmostEqual(worst_performer.cpu_usage.system_cpu_time,
system_cpu_time)
self.assertEqual(worst_performer.network_bytes_sent, network_bytes_sent)
self.assertEqual(worst_performer.session_id.Path(),
"/%s/%s" % (client_id, flow_id))
def testReadHuntClientResourcesStatsCorrectlyAggregatesVeryLargeNumbers(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
self._SetupHuntClientAndFlow(
flow_state=rdf_flow_objects.Flow.FlowState.FINISHED,
cpu_time_used=rdf_client_stats.CpuSeconds(
user_cpu_time=3810072130, system_cpu_time=3810072130),
network_bytes_sent=3810072130,
hunt_id=hunt_obj.hunt_id)
self._SetupHuntClientAndFlow(
flow_state=rdf_flow_objects.Flow.FlowState.FINISHED,
cpu_time_used=rdf_client_stats.CpuSeconds(
user_cpu_time=2143939532, system_cpu_time=2143939532),
network_bytes_sent=2143939532,
hunt_id=hunt_obj.hunt_id)
usage_stats = self.db.ReadHuntClientResourcesStats(hunt_obj.hunt_id)
self.assertEqual(usage_stats.user_cpu_stats.num, 2)
self.assertAlmostEqual(usage_stats.user_cpu_stats.mean, 2977005831, 5)
self.assertAlmostEqual(usage_stats.user_cpu_stats.stddev, 833066299, 5)
self.assertAlmostEqual(usage_stats.system_cpu_stats.mean, 2977005831, 5)
self.assertAlmostEqual(usage_stats.system_cpu_stats.stddev, 833066299, 5)
self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.mean,
2977005831, 5)
self.assertAlmostEqual(usage_stats.network_bytes_sent_stats.stddev,
833066299, 5)
def testReadHuntFlowsStatesAndTimestampsWorksCorrectlyForMultipleFlows(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
expected = []
for i in range(10):
client_id, flow_id = self._SetupHuntClientAndFlow(
hunt_id=hunt_obj.hunt_id)
if i % 2 == 0:
flow_state = rdf_flow_objects.Flow.FlowState.RUNNING
else:
flow_state = rdf_flow_objects.Flow.FlowState.FINISHED
self.db.UpdateFlow(client_id, flow_id, flow_state=flow_state)
flow_obj = self.db.ReadFlowObject(client_id, flow_id)
expected.append(
db.FlowStateAndTimestamps(
flow_state=flow_obj.flow_state,
create_time=flow_obj.create_time,
last_update_time=flow_obj.last_update_time))
state_and_times = self.db.ReadHuntFlowsStatesAndTimestamps(hunt_obj.hunt_id)
self.assertCountEqual(state_and_times, expected)
def testReadHuntFlowsStatesAndTimestampsIgnoresNestedFlows(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_obj.hunt_id)
self._SetupHuntClientAndFlow(
hunt_id=hunt_obj.hunt_id, parent_flow_id=flow_id)
state_and_times = self.db.ReadHuntFlowsStatesAndTimestamps(hunt_obj.hunt_id)
self.assertLen(state_and_times, 1)
flow_obj = self.db.ReadFlowObject(client_id, flow_id)
self.assertEqual(
state_and_times[0],
db.FlowStateAndTimestamps(
flow_state=flow_obj.flow_state,
create_time=flow_obj.create_time,
last_update_time=flow_obj.last_update_time))
def testReadHuntOutputPluginLogEntriesReturnsEntryFromSingleHuntFlow(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
output_plugin_id = "1"
client_id, flow_id = self._SetupHuntClientAndFlow(
client_id="C.12345678901234aa", hunt_id=hunt_obj.hunt_id)
self.db.WriteFlowOutputPluginLogEntries([
rdf_flow_objects.FlowOutputPluginLogEntry(
client_id=client_id,
flow_id=flow_id,
output_plugin_id=output_plugin_id,
hunt_id=hunt_obj.hunt_id,
message="blah")
])
hunt_op_log_entries = self.db.ReadHuntOutputPluginLogEntries(
hunt_obj.hunt_id, output_plugin_id, 0, 10)
self.assertLen(hunt_op_log_entries, 1)
self.assertIsInstance(hunt_op_log_entries[0],
rdf_flow_objects.FlowOutputPluginLogEntry)
self.assertEqual(hunt_op_log_entries[0].hunt_id, hunt_obj.hunt_id)
self.assertEqual(hunt_op_log_entries[0].client_id, client_id)
self.assertEqual(hunt_op_log_entries[0].flow_id, flow_id)
self.assertEqual(hunt_op_log_entries[0].message, "blah")
def _WriteHuntOutputPluginLogEntries(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
output_plugin_id = "1"
for i in range(10):
client_id, flow_id = self._SetupHuntClientAndFlow(
client_id="C.12345678901234a%d" % i, hunt_id=hunt_obj.hunt_id)
enum = rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType
if i % 3 == 0:
log_entry_type = enum.ERROR
else:
log_entry_type = enum.LOG
self.db.WriteFlowOutputPluginLogEntries([
rdf_flow_objects.FlowOutputPluginLogEntry(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_obj.hunt_id,
output_plugin_id=output_plugin_id,
log_entry_type=log_entry_type,
message="blah%d" % i)
])
return hunt_obj
def testReadHuntOutputPluginLogEntriesReturnsEntryFromMultipleHuntFlows(self):
hunt_obj = self._WriteHuntOutputPluginLogEntries()
hunt_op_log_entries = self.db.ReadHuntOutputPluginLogEntries(
hunt_obj.hunt_id, "1", 0, 100)
self.assertLen(hunt_op_log_entries, 10)
# Make sure messages are returned in timestamps-ascending order.
for i, e in enumerate(hunt_op_log_entries):
self.assertEqual(e.message, "blah%d" % i)
def testReadHuntOutputPluginLogEntriesCorrectlyAppliesOffsetAndCountFilters(
self):
hunt_obj = self._WriteHuntOutputPluginLogEntries()
for i in range(10):
hunt_op_log_entries = self.db.ReadHuntOutputPluginLogEntries(
hunt_obj.hunt_id, "1", i, 1)
self.assertLen(hunt_op_log_entries, 1)
self.assertEqual(hunt_op_log_entries[0].message, "blah%d" % i)
def testReadHuntOutputPluginLogEntriesCorrectlyAppliesWithTypeFilter(self):
hunt_obj = self._WriteHuntOutputPluginLogEntries()
hunt_op_log_entries = self.db.ReadHuntOutputPluginLogEntries(
hunt_obj.hunt_id,
"1",
0,
100,
with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.UNSET)
self.assertEmpty(hunt_op_log_entries)
hunt_op_log_entries = self.db.ReadHuntOutputPluginLogEntries(
hunt_obj.hunt_id,
"1",
0,
100,
with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.ERROR)
self.assertLen(hunt_op_log_entries, 4)
hunt_op_log_entries = self.db.ReadHuntOutputPluginLogEntries(
hunt_obj.hunt_id,
"1",
0,
100,
with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.LOG)
self.assertLen(hunt_op_log_entries, 6)
def testReadHuntOutputPluginLogEntriesCorrectlyAppliesCombinationOfFilters(
self):
hunt_obj = self._WriteHuntOutputPluginLogEntries()
hunt_log_entries = self.db.ReadHuntOutputPluginLogEntries(
hunt_obj.hunt_id,
"1",
0,
1,
with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.LOG)
self.assertLen(hunt_log_entries, 1)
self.assertEqual(hunt_log_entries[0].message, "blah1")
def testCountHuntOutputPluginLogEntriesReturnsCorrectCount(self):
hunt_obj = self._WriteHuntOutputPluginLogEntries()
num_entries = self.db.CountHuntOutputPluginLogEntries(hunt_obj.hunt_id, "1")
self.assertEqual(num_entries, 10)
def testCountHuntOutputPluginLogEntriesRespectsWithTypeFilter(self):
hunt_obj = self._WriteHuntOutputPluginLogEntries()
num_entries = self.db.CountHuntOutputPluginLogEntries(
hunt_obj.hunt_id,
"1",
with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.LOG)
self.assertEqual(num_entries, 6)
num_entries = self.db.CountHuntOutputPluginLogEntries(
hunt_obj.hunt_id,
"1",
with_type=rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.ERROR)
self.assertEqual(num_entries, 4)
def testFlowStateUpdateUsingUpdateFlow(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
hunt_id = hunt_obj.hunt_id
client_id, flow_id = self._SetupHuntClientAndFlow(
hunt_id=hunt_id, flow_state=rdf_flow_objects.Flow.FlowState.RUNNING)
results = self.db.ReadHuntFlows(
hunt_id,
0,
10,
filter_condition=db.HuntFlowsCondition.FLOWS_IN_PROGRESS_ONLY)
self.assertLen(results, 1)
results = self.db.ReadHuntFlows(
hunt_id,
0,
10,
filter_condition=db.HuntFlowsCondition.COMPLETED_FLOWS_ONLY)
self.assertEmpty(results)
rdf_flow = self.db.ReadFlowObject(client_id, flow_id)
rdf_flow.flow_state = rdf_flow_objects.Flow.FlowState.FINISHED
self.db.UpdateFlow(client_id, rdf_flow.flow_id, flow_obj=rdf_flow)
results = self.db.ReadHuntFlows(
hunt_id,
0,
10,
filter_condition=db.HuntFlowsCondition.FLOWS_IN_PROGRESS_ONLY)
self.assertEmpty(results)
results = self.db.ReadHuntFlows(
hunt_id,
0,
10,
filter_condition=db.HuntFlowsCondition.COMPLETED_FLOWS_ONLY)
self.assertLen(results, 1)
def testFlowStateUpdateUsingReleaseProcessedFlow(self):
hunt_obj = rdf_hunt_objects.Hunt(description="foo")
self.db.WriteHuntObject(hunt_obj)
hunt_id = hunt_obj.hunt_id
client_id, flow_id = self._SetupHuntClientAndFlow(hunt_id=hunt_id)
flow_obj = self.db.LeaseFlowForProcessing(
client_id, flow_id, rdfvalue.Duration.From(1, rdfvalue.MINUTES))
self.assertEqual(flow_obj.flow_state, rdf_flow_objects.Flow.FlowState.UNSET)
flow_obj.flow_state = rdf_flow_objects.Flow.FlowState.ERROR
self.db.ReleaseProcessedFlow(flow_obj)
results = self.db.ReadHuntFlows(
hunt_id,
0,
10,
filter_condition=db.HuntFlowsCondition.FAILED_FLOWS_ONLY)
self.assertLen(results, 1)
# This file is a test library and thus does not require a __main__ block.
|
|
"""
Provides utility functions for providing standardized behavior among python scripts.
In addition, many of the functions are designed to significantly ease the burden
to produce a new script for validation, transformation, or conversion.
"""
import sys
import os
import logging
import pydoc
import time
import re
import io
import gzip
import bz2
import tarfile
import zipfile
import argparse
import simplejson
import magic
import ftputil
import requests
from requests_toolbelt import MultipartEncoder
import subprocess
try:
from biokbase.HandleService.Client import HandleService
except:
from biokbase.AbstractHandle.Client import AbstractHandle as HandleService
import biokbase.workspace.client
# override default ArgumentParser behavior
class ArgumentParser(argparse.ArgumentParser):
def exit(self, status=1, message=None):
if message:
self._print_message(message, sys.stderr)
sys.exit(status)
def get_token():
"""
Retrieve the KBase token in the local shell environment.
"""
token = os.environ.get("KB_AUTH_TOKEN")
if token is None:
try:
task = subprocess.Popen(["kbase-whoami", "-t"],
stdout=subprocess.PIPE, shell=True)
stdout, stderr = task.communicate()
if stderr is not None:
raise Exception("Unable to retrieve user token : {0}".format(stderr))
elif stdout is not None:
if 'You are not logged in' in stdout or 'command not found' in stdout:
raise Exception("Unable to retrieve user token, " +
"login with CLI or export KB_AUTH_TOKEN")
return stdout.strip()
else:
raise None
except:
raise Exception("Unable to find token, export KB_AUTH_TOKEN")
else:
return token
def stderrlogger(name, level=logging.INFO):
"""
Return a standard python logger with a stderr handler attached and using a prefix
format that will make logging consistent between scripts.
"""
logger = logging.getLogger(name)
logger.setLevel(level)
# send messages to sys.stderr
streamHandler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter("%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s")
formatter.converter = time.gmtime
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
return logger
def stdoutlogger(name, level=logging.INFO):
"""
Return a standard python logger with a stdout handler attached and using a prefix
format that will make logging consistent between scripts.
"""
logger = logging.getLogger(name)
logger.setLevel(level)
# send messages to sys.stderr
streamHandler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s")
formatter.converter = time.gmtime
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
return logger
def parse_docs(docstring=None):
"""
Parses the docstring of a function and returns a dictionary of the elements.
"""
# TODO, revisit this, probably can use other ways of doing this
script_details = dict()
keys = ["Authors","Returns","Args"]
remainder = docstring[:]
for k in keys:
remainder, script_details[k] = remainder.split(k+":",1)
script_details[k] = script_details[k].strip()
script_details["Description"] = remainder
# special treatment for Args since we want a dict, split on :, then cleanup whitespace
# keep the : in the keys to do clean splits when getting the values
argument_keys = [x.strip() for x in re.findall(".*:",script_details["Args"])]
# split on the string in reverse by the keys, then wash out the extra whitespace
remainder = script_details["Args"]
argument_values = list()
for k in reversed(argument_keys):
remainder, value = remainder.split(k)
argument_values.append(" ".join([x.strip() for x in value.split("\n")]))
# create the dict using they keys without :, then get the values in the correct order
script_details["Args"] = dict(zip([x.replace(":","") for x in argument_keys], reversed(argument_values)))
return script_details
def extract_data(logger = stderrlogger(__file__), filePath = None, chunkSize = 2**30):
"""
Unpack a data file that may be compressed or an archive.
"""
def extract_tar(tarPath):
if not tarfile.is_tarfile(tarPath):
raise Exception("Inavalid tar file " + tarPath)
with tarfile.open(tarPath, 'r') as tarDataFile:
memberlist = tarDataFile.getmembers()
for member in memberlist:
memberPath = os.path.join(os.path.dirname(os.path.abspath(tarPath)),os.path.basename(os.path.abspath(member.name)))
if member.isfile():
with io.open(memberPath, 'wb') as f:
inputFile = tarDataFile.extractfile(member.name)
f.write(inputFile.read(chunkSize))
os.remove(tarPath)
mimeType = None
with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as m:
mimeType = m.id_filename(filePath)
logger.info("Extracting {0} as {1}".format(filePath, mimeType))
if mimeType == "application/x-gzip" or mimeType == "application/gzip":
outFile = os.path.splitext(filePath)[0]
with gzip.GzipFile(filePath, 'rb') as gzipDataFile, io.open(outFile, 'wb') as f:
for chunk in gzipDataFile:
f.write(chunk)
os.remove(filePath)
outPath = os.path.dirname(filePath)
# check for tar
with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as m:
mimeType = m.id_filename(outFile)
if mimeType == "application/x-tar":
logger.info("Extracting {0} as tar".format(outFile))
extract_tar(outFile)
elif mimeType == "application/x-bzip2":
outFile = os.path.splitext(filePath)[0]
with bz2.BZ2File(filePath, 'r') as bz2DataFile, io.open(outFile, 'wb') as f:
for chunk in bz2DataFile:
f.write(chunk)
os.remove(filePath)
outPath = os.path.dirname(filePath)
# check for tar
with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as m:
mimeType = m.id_filename(outFile)
if mimeType == "application/x-tar":
logger.info("Extracting {0} as tar".format(outFile))
extract_tar(outFile)
elif mimeType == "application/zip":
if not zipfile.is_zipfile(filePath):
raise Exception("Invalid zip file!")
outPath = os.path.dirname(filePath)
with zipfile.ZipFile(filePath, 'r') as zipDataFile:
bad = zipDataFile.testzip()
if bad is not None:
raise Exception("Encountered a bad file in the zip : " + str(bad))
infolist = zipDataFile.infolist()
# perform sanity check on file names, extract each file individually
for x in infolist:
infoPath = os.path.join(outPath, os.path.basename(os.path.abspath(x.filename)))
if not os.path.exists(os.path.dirname(infoPath)):
os.makedirs(infoPath)
if os.path.exists(os.path.join(infoPath,os.path.split(x.filename)[-1])):
raise Exception("Extracting zip contents will overwrite an existing file!")
with io.open(infoPath, 'wb') as f:
f.write(zipDataFile.read(x.filename))
os.remove(filePath)
elif mimeType == "application/x-gtar":
if not tarfile.is_tarfile(filePath):
raise Exception("Inavalid tar file " + filePath)
outPath = os.path.dirname(filePath)
with tarfile.open(filePath, 'r|*') as tarDataFile:
memberlist = tarDataFile.getmembers()
# perform sanity check on file names, extract each file individually
for member in memberlist:
memberPath = os.path.join(outPath, os.path.basename(os.path.abspath(member.name)))
if os.path.exists(os.path.dirname(infoPath)):
os.makedirs(infoPath)
if os.path.exists(os.path.join(infoPath,os.path.split(member.name)[-1])):
raise Exception("Extracting tar contents will overwrite an existing file!")
if member.isfile():
with io.open(memberPath, 'wb') as f, tarDataFile.extractfile(member.name) as inputFile:
f.write(inputFile.read(chunkSize))
os.remove(filePath)
def download_file_from_shock(logger = stderrlogger(__file__),
shock_service_url = None,
shock_id = None,
filename = None,
directory = None,
token = None):
"""
Given a SHOCK instance URL and a SHOCK node id, download the contents of that node
to a file on disk.
"""
header = dict()
header["Authorization"] = "Oauth {0}".format(token)
logger.info("Downloading shock node {0}/node/{1}".format(shock_service_url,shock_id))
metadata_response = requests.get("{0}/node/{1}?verbosity=metadata".format(shock_service_url, shock_id), headers=header, stream=True, verify=True)
shock_metadata = metadata_response.json()['data']
shockFileName = shock_metadata['file']['name']
shockFileSize = shock_metadata['file']['size']
metadata_response.close()
download_url = "{0}/node/{1}?download_raw".format(shock_service_url, shock_id)
data = requests.get(download_url, headers=header, stream=True, verify=True)
if filename is not None:
shockFileName = filename
if directory is not None:
filePath = os.path.join(directory, shockFileName)
else:
filePath = shockFileName
chunkSize = shockFileSize/4
maxChunkSize = 2**30
if chunkSize > maxChunkSize:
chunkSize = maxChunkSize
f = io.open(filePath, 'wb')
try:
for chunk in data.iter_content(chunkSize):
f.write(chunk)
finally:
data.close()
f.close()
extract_data(logger, filePath)
def upload_file_to_shock(logger = stderrlogger(__file__),
shock_service_url = None,
filePath = None,
ssl_verify = True,
token = None):
"""
Use HTTP multi-part POST to save a file to a SHOCK instance.
"""
if token is None:
raise Exception("Authentication token required!")
#build the header
header = dict()
header["Authorization"] = "Oauth {0}".format(token)
if filePath is None:
raise Exception("No file given for upload to SHOCK!")
dataFile = open(os.path.abspath(filePath), 'r')
m = MultipartEncoder(fields={'upload': (os.path.split(filePath)[-1], dataFile)})
header['Content-Type'] = m.content_type
logger.info("Sending {0} to {1}".format(filePath,shock_service_url))
try:
response = requests.post(shock_service_url + "/node", headers=header, data=m, allow_redirects=True, verify=ssl_verify)
dataFile.close()
except:
dataFile.close()
raise
if not response.ok:
response.raise_for_status()
result = response.json()
if result['error']:
raise Exception(result['error'][0])
else:
return result["data"]
def getHandles(logger = None,
shock_service_url = None,
handle_service_url = None,
shock_ids = None,
handle_ids = None,
token = None):
"""
Retrieve KBase handles for a list of shock ids or a list of handle ids.
"""
if token is None:
raise Exception("Authentication token required!")
hs = HandleService(url=handle_service_url, token=token)
handles = list()
if shock_ids is not None:
header = dict()
header["Authorization"] = "Oauth {0}".format(token)
for sid in shock_ids:
info = None
try:
logger.info("Found shock id {0}, retrieving information about the data.".format(sid))
response = requests.get("{0}/node/{1}".format(shock_service_url, sid), headers=header, verify=True)
info = response.json()["data"]
except:
logger.error("There was an error retrieving information about the shock node id {0} from url {1}".format(sid, shock_service_url))
try:
logger.info("Retrieving a handle id for the data.")
handle = hs.persist_handle({"id" : sid,
"type" : "shock",
"url" : shock_service_url,
"file_name": info["file"]["name"],
"remote_md5": info["file"]["checksum"]["md5"]})
handles.append(handle)
except:
try:
handle_id = hs.ids_to_handles([sid])[0]["hid"]
single_handle = hs.hids_to_handles([handle_id])
assert len(single_handle) != 0
if info is not None:
single_handle[0]["file_name"] = info["file"]["name"]
single_handle[0]["remote_md5"] = info["file"]["checksum"]["md5"]
logger.debug(single_handle)
handles.append(single_handle[0])
except:
logger.error("The input shock node id {} is already registered or could not be registered".format(sid))
hs = HandleService(url=handle_service_url, token=token)
all_handles = hs.list_handles()
for x in all_handles:
if x[0] == sid:
logger.info("FOUND shock id as existing handle")
logger.info(x)
break
else:
logger.info("Unable to find a handle containing shock id")
logger.info("Trying again to get a handle id for the data.")
handle_id = hs.persist_handle({"id" : sid,
"type" : "shock",
"url" : shock_service_url,
"file_name": info["file"]["name"],
"remote_md5": info["file"]["checksum"]["md5"]})
handles.append(handle_id)
raise
elif handle_ids is not None:
for hid in handle_ids:
try:
single_handle = hs.hids_to_handles([hid])
assert len(single_handle) != 0
handles.append(single_handle[0])
except:
logger.error("Invalid handle id {0}".format(hid))
raise
return handles
def download_from_urls(logger = stderrlogger(__file__),
working_directory = os.getcwd(),
urls = None,
ssl_verify = True,
token = None,
chunkSize = 2**30):
"""
Downloads urls defined by key names in a dictionary, with each key name getting
its own subdirectory and the contents of the url for that key deposited in the
subdirectory that matches the key name. Key names are defined by developers in
a config file per upload conversion.
"""
if token is None:
raise Exception("Unable to find token!")
if not os.path.exists(working_directory):
os.mkdir(working_directory)
elif not os.path.isdir(working_directory):
raise Exception("Attempting to process downloads using a path that is not a directory!")
if urls is None:
raise Exception("No urls to upload from!")
if type(urls) != type(dict()):
raise Exception("Expected dictionary of urls, instead found {0}".format(type(urls)))
assert len(urls.keys()) != 0
for name, url in urls.items():
data = None
download_directory = os.path.join(working_directory, name)
os.mkdir(download_directory)
# detect url type
if url.startswith("ftp://"):
threshold=1024
# check if file or directory
host = url.split("ftp://")[1].split("/")[0]
path = url.split("ftp://")[1].split("/", 1)[1]
ftp_connection = ftputil.FTPHost(host, 'anonymous', 'anonymous@')
if ftp_connection.path.isdir(path):
file_list = ftp_connection.listdir(path)
elif ftp_connection.path.isfile(path):
file_list = [path]
if len(file_list) > 1:
if len(file_list) > threshold:
raise Exception("Too many files to process, found so far {0:d}".format(len(file_list)))
if len(path.split("/")[-1]) == 0:
dirname = os.path.join(download_directory, path.split("/")[-2])
else:
dirname = os.path.join(download_directory, path.split("/")[-1])
all_files = list()
check = file_list[:]
while len(check) > 0:
x = check.pop()
new_files = ftp_connection.listdir(x)
for n in new_files:
if ftp_connection.path.isfile(n):
all_files.append(n)
elif ftp_connection.path.isdir(n):
check.append(n)
if len(all_files) > threshold:
raise Exception("Too many files to process, found so far {0:d}".format(len(all_files)))
os.mkdir(dirname)
for x in all_files:
filePath = os.path.join(os.path.abspath(dirname), os.path.basename(x))
logger.info("Downloading {0}".format(host + x))
ftp_connection.download(x, filePath)
extract_data(logger, filePath)
else:
filePath = os.path.join(download_directory, os.path.split(path)[-1])
logger.info("Downloading {0}".format(url))
ftp_connection.download(path, filePath)
extract_data(logger, filePath)
ftp_connection.close()
elif url.startswith("http://") or url.startswith("https://"):
logger.info("Downloading {0}".format(url))
download_url = None
fileSize = 0
fileName = None
header = dict()
header["Authorization"] = "Oauth {0}".format(token)
# check for a shock url
try:
shock_id = re.search('^http[s]://.*/node/([a-fA-f0-9\-]+).*', url).group(1)
shock_download_url = re.search('^(http[s]://.*)/node/[a-fA-f0-9\-]+.*', url).group(1)
except Exception, e:
shock_id = None
if shock_id is None:
download_url = url
fileName = url.split('/')[-1]
else:
metadata_response = requests.get("{0}/node/{1}?verbosity=metadata".format(shock_download_url, shock_id), headers=header, stream=True, verify=ssl_verify)
shock_metadata = metadata_response.json()['data']
fileName = shock_metadata['file']['name']
fileSize = shock_metadata['file']['size']
metadata_response.close()
download_url = "{0}/node/{1}?download_raw".format(shock_download_url, shock_id)
data = None
size = 0
try:
data = requests.get(download_url, headers=header, stream=True, verify=ssl_verify)
if 'content-length' in data.headers:
size = int(data.headers['content-length'])
if 'content-disposition' in data.headers:
fileName = data.headers['content-disposition'].split("filename=")[-1].replace("\\","").replace("\"","")
except Exception, e:
raise
if size > 0 and fileSize == 0:
fileSize = size
filePath = os.path.join(download_directory, fileName)
logger.info("Writing out {0}".format(fileName))
f = io.open(filePath, 'wb')
try:
for chunk in data.iter_content(chunkSize):
f.write(chunk)
finally:
data.close()
f.close()
resultFile = extract_data(logger, filePath)
else:
raise Exception("Unrecognized protocol or url format : " + url)
def save_json_to_workspace(logger = None,
workspace_service_url = None,
json_file = None,
kbase_info_file = None,
object_details = None,
token = None):
"""
Saves an object to a workspace given a JSON data file.
TODO
Optionally if this data was originally from KBase, a KBase info file can
be given that will be saved to the workspace with the object.
"""
f = open(json_file, 'r')
data = simplejson.loads(f.read())
f.close()
workspaceClient = biokbase.workspace.client.Workspace(workspace_service_url, token=os.environ.get("KB_AUTH_TOKEN"))
workspaceClient.save_objects({"workspace": object_details["workspace_name"],
"objects": [{
"type": object_details["kbase_type"],
"data": data,
"name": object_details["object_name"],
"meta": object_details["object_meta"],
"provenance": object_details["provenance"]}
]})
|
|
# (C) Copyright 2020 IBM Corp.
# (C) Copyright 2020 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _pywbemcli_operations module.
"""
from __future__ import absolute_import, print_function
import sys
import os
import io
import glob
import re
import warnings
import packaging.version
import urllib3
import pytest
import pywbem
from pywbemtools.pywbemcli._pywbemcli_operations import PYWBEMCLIFakedConnection
from pywbemtools._utils import ensure_unicode, \
DEFAULT_CONNECTIONS_FILE
from pywbemtools.pywbemcli.mockscripts import DeprecatedSetupWarning, \
SetupNotSupportedError
from ..pytest_extensions import simplified_test_function
from ..utils import captured_output
OK = True
FAIL = False
PDB = "pdb"
PYWBEM_VERSION = packaging.version.parse(pywbem.__version__)
# Click (as of 7.1.2) raises UnsupportedOperation in click.echo() when
# the pytest capsys fixture is used. That happens only on Windows.
# See Click issue https://github.com/pallets/click/issues/1590. This
# run condition skips the testcases on Windows.
CLICK_ISSUE_1590 = sys.platform == 'win32'
SCRIPT_DIR = os.path.dirname(__file__)
USER_CONNECTIONS_FILE = os.path.join(SCRIPT_DIR, '.user_connections_file.yaml')
# Backup of default connections file
DEFAULT_CONNECTIONS_FILE_BAK = DEFAULT_CONNECTIONS_FILE + \
'.test_pywbemcli_operations.bak'
# Flag indicating that the new-style setup approach() with a setup() function
# is supported.
NEWSTYLE_SUPPORTED = sys.version_info[0:2] >= (3, 5)
# RETRY_DEPRECATION: Flag indicating that pywbem raises a DeprecationWarning
# for urllib3.Retry.
# urllib3 1.26.0 started issuing a DeprecationWarning for using the
# 'method_whitelist' init parameter of Retry and announced its removal in
# version 2.0. The replacement parameter is 'allowed_methods'.
# pywbem >=1.1.0 avoids the warning, but pywbem <1.1.0 issues it.
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
urllib3.Retry(method_whitelist={})
except (DeprecationWarning, TypeError):
RETRY_DEPRECATION = PYWBEM_VERSION.release < (1, 1)
else:
RETRY_DEPRECATION = False
def save_default_connections_file():
"""
Save the default connections file.
"""
if os.path.exists(DEFAULT_CONNECTIONS_FILE):
if os.path.exists(DEFAULT_CONNECTIONS_FILE_BAK):
os.remove(DEFAULT_CONNECTIONS_FILE_BAK)
os.rename(DEFAULT_CONNECTIONS_FILE, DEFAULT_CONNECTIONS_FILE_BAK)
def restore_default_connections_file():
"""
Restore the default connections file.
"""
if os.path.exists(DEFAULT_CONNECTIONS_FILE_BAK):
if os.path.exists(DEFAULT_CONNECTIONS_FILE):
os.remove(DEFAULT_CONNECTIONS_FILE)
os.rename(DEFAULT_CONNECTIONS_FILE_BAK, DEFAULT_CONNECTIONS_FILE)
def get_mockcache_dir(connection_name):
"""
Return mock cache directory path for a connection name.
We assume the connection name is unique across all connection files.
"""
mockcache_rootdir = os.path.join(os.path.expanduser('~'),
'.pywbemcli_mockcache')
if os.path.isdir(mockcache_rootdir):
for _dir in os.listdir(mockcache_rootdir):
if _dir.endswith('.' + connection_name):
dir_path = os.path.join(mockcache_rootdir, _dir)
return dir_path
raise ValueError
def remove_mockcache(connection_name):
"""
Remove mock cache for a connection name.
"""
try:
mockcache_dir = get_mockcache_dir(connection_name)
except ValueError:
return
file_list = glob.glob(os.path.join(mockcache_dir, '*'))
for _file in file_list:
os.remove(_file)
os.rmdir(mockcache_dir)
# Testcase parameters for simple model with old-style method provider
SIMPLE_V1_OLD_NAMESPACE = pywbem.DEFAULT_NAMESPACE
SIMPLE_V1_OLD_MOCK_FILES = [
'tests/unit/pywbemcli/simple_mock_model.mof',
'tests/unit/pywbemcli/simple_mock_invokemethod_v1old.py',
]
SIMPLE_V1_OLD_EXP_CLASSES = [
(SIMPLE_V1_OLD_NAMESPACE, 'CIM_Foo'),
(SIMPLE_V1_OLD_NAMESPACE, 'CIM_Foo_sub'),
(SIMPLE_V1_OLD_NAMESPACE, 'CIM_Foo_sub_sub'),
(SIMPLE_V1_OLD_NAMESPACE, 'CIM_Foo_sub2'),
]
SIMPLE_V1_OLD_EXP_PROVIDERS = [
(SIMPLE_V1_OLD_NAMESPACE, 'CIM_Foo', 'method', 'CIM_FooMethodProvider'),
]
# Testcase parameters for simple model with new-style method provider
SIMPLE_V1_NEW_NAMESPACE = pywbem.DEFAULT_NAMESPACE
SIMPLE_V1_NEW_MOCK_FILES = [
'tests/unit/pywbemcli/simple_mock_model.mof',
'tests/unit/pywbemcli/simple_mock_invokemethod_v1new.py',
]
SIMPLE_V1_NEW_EXP_CLASSES = [
(SIMPLE_V1_NEW_NAMESPACE, 'CIM_Foo'),
(SIMPLE_V1_NEW_NAMESPACE, 'CIM_Foo_sub'),
(SIMPLE_V1_NEW_NAMESPACE, 'CIM_Foo_sub_sub'),
(SIMPLE_V1_NEW_NAMESPACE, 'CIM_Foo_sub2'),
]
SIMPLE_V1_NEW_EXP_PROVIDERS = [
(SIMPLE_V1_NEW_NAMESPACE, 'CIM_Foo', 'method', 'CIM_FooMethodProvider'),
]
# Testcase parameters for standalone mock script
STANDALONE_NAMESPACE = pywbem.DEFAULT_NAMESPACE
STANDALONE_MOCK_FILES = [
'tests/unit/pywbemcli/standalone_mock_script.py',
]
STANDALONE_EXP_CLASSES = [
(STANDALONE_NAMESPACE, 'CIM_Foo'),
(STANDALONE_NAMESPACE, 'CIM_Foo_sub'),
(STANDALONE_NAMESPACE, 'CIM_Foo_sub_sub'),
(STANDALONE_NAMESPACE, 'CIM_Foo_sub2'),
]
STANDALONE_EXP_PROVIDERS = [
(STANDALONE_NAMESPACE, 'CIM_Foo', 'method', 'CIM_FooMethodProvider'),
]
TESTCASES_BUILD_MOCKENV = [
# TESTCASES for BuildMockenvMixin.build_mockenv()
#
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function and response:
# * test_mode: Test mode, with the following values:
# - build: Test build of non-existing cache
# - load: Test load of up-to-date cache
# - load_rebuild_missing_pklfile: Test load resulting in rebuild due to
# missing pkl file
# - load_rebuild_missing_md5file: Test load resulting in rebuild due to
# missing md5 file
# - load_rebuild_changed_moffile: Test load resulting in rebuild due to
# changed MOF file
# - load_rebuild_changed_pyfile: Test load resulting in rebuild due to
# missing mock script
# * verbose: verbose flag to be used in test.
# * connections_file: Path name of connections file to use.
# * default_namespace: Default namespace for the mock connection.
# * mock_files: List of file paths of mock scripts and MOF files.
# * exp_dep_files: List of expected file paths of dependent files
# registered.
# * exp_classes: List of expected classes in mock environment, as
# tuple(namespace, classname).
# * exp_providers: List of expected providers in mock environment, as
# tuple(namespace, classname, provider_type, provider_classname).
# * exp_stdout_lines: List of lines expected to be written to stdout
# during execution of the code to be tested. Each line is a regexp.
# * exp_stdout_lines_all: Boolean indicating that the lines in
# exp_stdout_lines are all expected lines, i.e. any additional lines
# will cause a test failure.
# * exp_stderr_lines: List of lines expected to be written to stderr
# during execution of the code to be tested. Each line is a regexp.
# * exp_exc_types: Expected exception type(s), or None.
# * exp_warn_types: Expected warning type(s), or None.
# * condition: Boolean condition for testcase to run, or 'pdb' for debugger
# Testcases with verbose disabled.
(
"Mock env with MOF file and old-style mock script, "
"cache does not exist",
dict(
test_mode='build',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
None, DeprecatedSetupWarning, OK
),
(
"Mock env with MOF file and old-style mock script, "
"cache exists, and load succeeds",
dict(
test_mode='load',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
None, DeprecatedSetupWarning, OK
),
(
"Mock env with MOF file and old-style mock script, "
"cache exists, but load results in rebuild due to missing pkl file",
dict(
test_mode='load_rebuild_missing_pklfile',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
# This testcase removes the pkl file from the mock cache as a
# preparation for executing the code to be tested. If the mock env is
# not cached, there is no pkl file that can be removed, so this
# testcase is skipped when the mock env cannot be cached.
None, DeprecatedSetupWarning, NEWSTYLE_SUPPORTED
),
(
"Mock env with MOF file and old-style mock script, "
"cache exists, but load results in rebuild due to missing md5 file",
dict(
test_mode='load_rebuild_missing_md5file',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
# This testcase removes the md5 file from the mock cache as a
# preparation for executing the code to be tested. If the mock env is
# not cached, there is no md5 file that can be removed, so this
# testcase is skipped when the mock env cannot be cached.
None, DeprecatedSetupWarning, NEWSTYLE_SUPPORTED
),
(
"Mock env with MOF file and old-style mock script, "
"cache exists, but load results in rebuild due to changed MOF file",
dict(
test_mode='load_rebuild_changed_moffile',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
None, DeprecatedSetupWarning, OK
),
(
"Mock env with MOF file and old-style mock script, "
"cache exists, but load results in rebuild due to changed .py file",
dict(
test_mode='load_rebuild_changed_pyfile',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
None, DeprecatedSetupWarning, OK
),
(
"Mock env with MOF file and new-style mock script, "
"cache does not exist",
dict(
test_mode='build',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
OK
),
(
"Mock env with MOF file and new-style mock script, "
"cache exists, and load succeeds",
dict(
test_mode='load',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
OK
),
(
"Mock env with MOF file and new-style mock script, "
"cache exists, but load results in rebuild due to missing pkl file",
dict(
test_mode='load_rebuild_missing_pklfile',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
OK
),
(
"Mock env with MOF file and new-style mock script, "
"cache exists, but load results in rebuild due to missing md5 file",
dict(
test_mode='load_rebuild_missing_md5file',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
OK
),
(
"Mock env with MOF file and new-style mock script, "
"cache exists, but load results in rebuild due to missing dep file",
dict(
test_mode='load_rebuild_missing_depfile',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
OK
),
(
"Mock env with MOF file and new-style mock script, "
"cache exists, but load results in rebuild due to changed MOF file",
dict(
test_mode='load_rebuild_changed_moffile',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
OK
),
(
"Mock env with MOF file and new-style mock script, "
"cache exists, but load results in rebuild due to changed .py file",
dict(
test_mode='load_rebuild_changed_pyfile',
verbose=False,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[],
exp_stdout_lines_all=True,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
OK
),
# Testcases with verbose enabled.
(
"Mock env with MOF file and old-style mock script, "
"cache does not exist",
dict(
test_mode='build',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[
"Mock environment .* will be built because it was not cached.",
"Mock environment .* has been written to cache.",
] if NEWSTYLE_SUPPORTED else [
"Mock environment .* will be built because it was not cached.",
"Mock environment .* will be built because it is not "
"cacheable",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None, DeprecatedSetupWarning, not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and old-style mock script, "
"cache exists, and load succeeds",
dict(
test_mode='load',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[
"Mock environment .* will be rebuilt because it is not "
"cacheable",
] if NEWSTYLE_SUPPORTED else [
"Mock environment .* will be built because it was not cached.",
"Mock environment .* will be built because it is not "
"cacheable",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None, DeprecatedSetupWarning, not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and old-style mock script, "
"cache exists, but load results in rebuild due to missing pkl file",
dict(
test_mode='load_rebuild_missing_pklfile',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[ # Only NEWSTYLE_SUPPORTED
"Mock environment .* will be built because it was not cached.",
"Mock environment .* has been written to cache.",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
# This testcase removes the pkl file from the mock cache as a
# preparation for executing the code to be tested. If the mock env is
# not cached, there is no pkl file that can be removed, so this
# testcase is skipped when the mock env cannot be cached.
None, DeprecatedSetupWarning,
NEWSTYLE_SUPPORTED and not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and old-style mock script, "
"cache exists, but load results in rebuild due to missing md5 file",
dict(
test_mode='load_rebuild_missing_md5file',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[ # Only NEWSTYLE_SUPPORTED
"Mock environment .* will be built because it was not cached.",
"Mock environment .* has been written to cache.",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
# This testcase removes the md5 file from the mock cache as a
# preparation for executing the code to be tested. If the mock env is
# not cached, there is no md5 file that can be removed, so this
# testcase is skipped when the mock env cannot be cached.
None, DeprecatedSetupWarning,
NEWSTYLE_SUPPORTED and not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and old-style mock script, "
"cache exists, but load results in rebuild due to missing dep file",
dict(
test_mode='load_rebuild_missing_depfile',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[ # Only NEWSTYLE_SUPPORTED
"Mock environment .* will be built because it was not cached.",
"Mock environment .* has been written to cache.",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
# This testcase removes the dep file from the mock cache as a
# preparation for executing the code to be tested. If the mock env is
# not cached, there is no dep file that can be removed, so this
# testcase is skipped when the mock env cannot be cached.
None, DeprecatedSetupWarning,
NEWSTYLE_SUPPORTED and not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and old-style mock script, "
"cache exists, but load results in rebuild due to changed MOF file",
dict(
test_mode='load_rebuild_changed_moffile',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[
"Mock environment .* will be rebuilt because the mock files "
"have changed.",
"Mock environment .* has been written to cache.",
] if NEWSTYLE_SUPPORTED else [
"Mock environment .* will be built because it was not cached.",
"Mock environment .* will be built because it is not "
"cacheable",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None, DeprecatedSetupWarning, not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and old-style mock script, "
"cache exists, but load results in rebuild due to changed .py file",
dict(
test_mode='load_rebuild_changed_pyfile',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[
"Mock environment .* will be rebuilt because the mock files "
"have changed.",
"Mock environment .* has been written to cache.",
] if NEWSTYLE_SUPPORTED else [
"Mock environment .* will be built because it was not cached.",
"Mock environment .* will be built because it is not "
"cacheable",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None, DeprecatedSetupWarning, not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and new-style mock script, "
"cache does not exist",
dict(
test_mode='build',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[
"Mock environment .* will be built because it was not cached.",
"Mock environment .* has been written to cache.",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and new-style mock script, "
"cache exists, and load succeeds",
dict(
test_mode='load',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[ # Only NEWSTYLE_SUPPORTED
"Mock environment .* has been loaded from cache.",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and new-style mock script, "
"cache exists, but load results in rebuild due to missing pkl file",
dict(
test_mode='load_rebuild_missing_pklfile',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[ # Only NEWSTYLE_SUPPORTED
"Mock environment .* will be built because it was not cached.",
"Mock environment .* has been written to cache.",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and new-style mock script, "
"cache exists, but load results in rebuild due to missing md5 file",
dict(
test_mode='load_rebuild_missing_md5file',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[ # Only NEWSTYLE_SUPPORTED
"Mock environment .* will be built because it was not cached.",
"Mock environment .* has been written to cache.",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and new-style mock script, "
"cache exists, but load results in rebuild due to changed MOF file",
dict(
test_mode='load_rebuild_changed_moffile',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[ # Only NEWSTYLE_SUPPORTED
"Mock environment .* will be rebuilt because the mock files "
"have changed.",
"Mock environment .* has been written to cache.",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
not CLICK_ISSUE_1590
),
(
"Mock env with MOF file and new-style mock script, "
"cache exists, but load results in rebuild due to changed .py file",
dict(
test_mode='load_rebuild_changed_pyfile',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_NEW_NAMESPACE,
mock_files=SIMPLE_V1_NEW_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_NEW_EXP_CLASSES,
exp_providers=SIMPLE_V1_NEW_EXP_PROVIDERS,
exp_stdout_lines=[ # Only NEWSTYLE_SUPPORTED
"Mock environment .* will be rebuilt because the mock files "
"have changed.",
"Mock environment .* has been written to cache.",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
not CLICK_ISSUE_1590
),
# Testcases with non-cacheable user-specified connections file
(
"Mock env with user-specified connections file",
dict(
test_mode='build',
verbose=True,
connections_file=USER_CONNECTIONS_FILE,
default_namespace=SIMPLE_V1_OLD_NAMESPACE,
mock_files=SIMPLE_V1_OLD_MOCK_FILES,
exp_dep_files=[],
exp_classes=SIMPLE_V1_OLD_EXP_CLASSES,
exp_providers=SIMPLE_V1_OLD_EXP_PROVIDERS,
exp_stdout_lines=[
"Mock environment .* will be built because user-specified "
"connections files are not cached",
] if NEWSTYLE_SUPPORTED else [
"Mock environment .* will be built because user-specified "
"connections files are not cached",
"Mock environment .* will be built because it is not "
"cacheable",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None, DeprecatedSetupWarning, not CLICK_ISSUE_1590
),
# Testcases with standalone mock script that has dependents
(
"Mock env with standalone mock script with deps; normal build",
dict(
test_mode='build',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=STANDALONE_NAMESPACE,
mock_files=STANDALONE_MOCK_FILES,
exp_dep_files=[],
exp_classes=STANDALONE_EXP_CLASSES,
exp_providers=STANDALONE_EXP_PROVIDERS,
exp_stdout_lines=[
"Mock environment .* will be built because it was not cached.",
"Mock environment .* has been written to cache.",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
not CLICK_ISSUE_1590
),
(
"Mock env with standalone mock script with deps; change dependent file",
dict(
test_mode='load_rebuild_changed_depfile',
verbose=True,
connections_file=DEFAULT_CONNECTIONS_FILE,
default_namespace=STANDALONE_NAMESPACE,
mock_files=STANDALONE_MOCK_FILES,
exp_dep_files=['tests/unit/pywbemcli/simple_mock_model.mof'],
exp_classes=STANDALONE_EXP_CLASSES,
exp_providers=STANDALONE_EXP_PROVIDERS,
exp_stdout_lines=[
"Mock environment .* will be rebuilt because the mock files "
"have changed.",
"Mock environment .* has been written to cache.",
],
exp_stdout_lines_all=False,
exp_stderr_lines=[],
),
None if NEWSTYLE_SUPPORTED else SetupNotSupportedError,
DeprecationWarning if RETRY_DEPRECATION else None,
not CLICK_ISSUE_1590
),
]
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_BUILD_MOCKENV)
@simplified_test_function
def test_build_mockenv(testcase, test_mode, verbose, connections_file,
default_namespace, mock_files, exp_dep_files,
exp_classes, exp_providers, exp_stdout_lines,
exp_stdout_lines_all, exp_stderr_lines):
"""
Test function for BuildMockenvMixin.build_mockenv().
"""
# The connections file is used by PywbemServer() and its build_mockenv()
# method only as a file path, and is never actually created or accessed.
connection_name = 'test_build_mockenv'
# Make sure the mock cache does not exist
remove_mockcache(connection_name)
if connections_file == DEFAULT_CONNECTIONS_FILE:
save_default_connections_file()
try:
if test_mode == 'build':
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
with captured_output() as captured:
# The code to be tested
conn.build_mockenv(server, mock_files, connections_file,
connection_name, verbose)
if NEWSTYLE_SUPPORTED and \
connections_file == DEFAULT_CONNECTIONS_FILE:
mockcache_dir = get_mockcache_dir(connection_name)
assert os.path.isdir(mockcache_dir)
elif test_mode == 'load':
# This test only makes sense when caching is possible
assert connections_file == DEFAULT_CONNECTIONS_FILE
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
conn.build_mockenv(server, mock_files, connections_file,
connection_name, False)
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
with captured_output() as captured:
# The code to be tested
conn.build_mockenv(server, mock_files, connections_file,
connection_name, verbose)
elif test_mode == 'load_rebuild_missing_pklfile':
# This test only makes sense when caching is possible
assert connections_file == DEFAULT_CONNECTIONS_FILE
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
conn.build_mockenv(server, mock_files, connections_file,
connection_name, False)
mockcache_dir = get_mockcache_dir(connection_name)
pkl_file = os.path.join(mockcache_dir, 'mockenv.pkl')
os.remove(pkl_file)
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
with captured_output() as captured:
# The code to be tested
conn.build_mockenv(server, mock_files, connections_file,
connection_name, verbose)
elif test_mode == 'load_rebuild_missing_md5file':
# This test only makes sense when caching is possible
assert connections_file == DEFAULT_CONNECTIONS_FILE
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
conn.build_mockenv(server, mock_files, connections_file,
connection_name, False)
mockcache_dir = get_mockcache_dir(connection_name)
md5_file = os.path.join(mockcache_dir, 'mockfiles.md5')
os.remove(md5_file)
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
with captured_output() as captured:
# The code to be tested
conn.build_mockenv(server, mock_files, connections_file,
connection_name, verbose)
elif test_mode == 'load_rebuild_missing_depfile':
# This test only makes sense when caching is possible
assert connections_file == DEFAULT_CONNECTIONS_FILE
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
conn.build_mockenv(server, mock_files, connections_file,
connection_name, False)
mockcache_dir = get_mockcache_dir(connection_name)
dep_file = os.path.join(mockcache_dir, 'depreg.pkl')
os.remove(dep_file)
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
with captured_output() as captured:
# The code to be tested
conn.build_mockenv(server, mock_files, connections_file,
connection_name, verbose)
elif test_mode == 'load_rebuild_changed_moffile':
# This test only makes sense when caching is possible
assert connections_file == DEFAULT_CONNECTIONS_FILE
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
conn.build_mockenv(server, mock_files, connections_file,
connection_name, False)
# Change the MOF file
mof_file = [mf for mf in mock_files if mf.endswith('.mof')][0]
mof_size = os.stat(mof_file).st_size
with io.open(mof_file, 'a', encoding='utf-8') as fp:
fp.write(u'\n// test_build_mockenv: Dummy line\n')
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
try:
with captured_output() as captured:
# The code to be tested
conn.build_mockenv(server, mock_files, connections_file,
connection_name, verbose)
finally:
# Undo change to the MOF file
with io.open(mof_file, 'ab') as fp:
fp.truncate(mof_size)
elif test_mode == 'load_rebuild_changed_pyfile':
# This test only makes sense when caching is possible
assert connections_file == DEFAULT_CONNECTIONS_FILE
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
conn.build_mockenv(server, mock_files, connections_file,
connection_name, False)
# Change the mock script file
py_file = [mf for mf in mock_files if mf.endswith('.py')][0]
py_size = os.stat(py_file).st_size
with io.open(py_file, 'a', encoding='utf-8') as fp:
fp.write(u'\n# test_build_mockenv: Dummy line\n')
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
try:
with captured_output() as captured:
# The code to be tested
conn.build_mockenv(server, mock_files, connections_file,
connection_name, verbose)
finally:
# Undo change to the mock script file
with io.open(py_file, 'ab') as fp:
fp.truncate(py_size)
elif test_mode == 'load_rebuild_changed_depfile':
# This test only makes sense when caching is possible
assert connections_file == DEFAULT_CONNECTIONS_FILE
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
conn.build_mockenv(server, mock_files, connections_file,
connection_name, False)
# Change the first dependent file (must be a MOF file in this test)
assert len(exp_dep_files) > 0
dep_file = exp_dep_files[0]
assert dep_file.endswith('.mof')
dep_size = os.stat(dep_file).st_size
with io.open(dep_file, 'a', encoding='utf-8') as fp:
fp.write(u'\n// test_build_mockenv: Dummy line\n')
conn = PYWBEMCLIFakedConnection(default_namespace=default_namespace)
server = pywbem.WBEMServer(conn)
try:
with captured_output() as captured:
# The code to be tested
conn.build_mockenv(server, mock_files, connections_file,
connection_name, verbose)
finally:
# Undo change to the dependent file
with io.open(dep_file, 'ab') as fp:
fp.truncate(dep_size)
finally:
# Clean up the mock cache
remove_mockcache(connection_name)
if connections_file == DEFAULT_CONNECTIONS_FILE:
restore_default_connections_file()
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None
for ns, cln in exp_classes:
class_store = conn.cimrepository.get_class_store(ns)
assert class_store.object_exists(cln)
for ns, cln, pt, _ in exp_providers:
# pylint: disable=protected-access
assert conn._provider_registry.get_registered_provider(ns, pt, cln)
if exp_stdout_lines_all:
if captured.stdout == '':
stdout_lines = []
else:
stdout_lines = captured.stdout.strip('\n').split('\n')
assert len(stdout_lines) == len(exp_stdout_lines), \
"Unexpected number of lines on stdout:\n" \
"Testcase: {}\n" \
"---- Actually: {} lines:\n{}" \
"---- Expected: {} lines (regexp):\n{}" \
"---- End\n". \
format(testcase.desc,
len(stdout_lines),
''.join([ensure_unicode(ln) + '\n'
for ln in stdout_lines]),
len(exp_stdout_lines),
''.join([ensure_unicode(ln) + '\n'
for ln in exp_stdout_lines]))
for i, regexp in enumerate(exp_stdout_lines):
line = stdout_lines[i]
assert re.search(regexp, line), \
"Unexpected line #{} on stdout:\n" \
"Testcase: {}\n" \
"---- Actually:\n" \
"{}\n" \
"---- Expected (regexp):\n" \
"{}\n" \
"---- End\n". \
format(i + 1, testcase.desc, line, regexp)
else:
for regexp in exp_stdout_lines:
assert re.search(regexp, captured.stdout), \
"Missing line on stdout:\n" \
"Testcase: {}\n" \
"---- Actual stdout:\n" \
"{}\n" \
"---- Expected line (regexp):\n" \
"{}\n" \
"---- End\n". \
format(testcase.desc, captured.stdout, regexp)
if captured.stderr == '':
stderr_lines = []
else:
stderr_lines = captured.stderr.strip('\n').split('\n')
assert len(stderr_lines) == len(exp_stderr_lines), \
"Unexpected number of lines on stderr:\n" \
"Testcase: {}\n" \
"---- Actually: {} lines:\n{}" \
"---- Expected: {} lines (regexp):\n{}" \
"---- End\n". \
format(testcase.desc,
len(stderr_lines),
''.join([ensure_unicode(ln) + '\n'
for ln in stderr_lines]),
len(exp_stderr_lines),
''.join([ensure_unicode(ln) + '\n'
for ln in exp_stderr_lines]))
for i, regexp in enumerate(exp_stderr_lines):
line = stderr_lines[i]
assert re.search(regexp, line), \
"Unexpected line #{} on stderr:\n" \
"Testcase: {}\n" \
"---- Actually:\n" \
"{}\n" \
"---- Expected (regexp):\n" \
"{}\n" \
"---- End\n". \
format(i + 1, testcase.desc, line, regexp)
|
|
#!/usr/bin/env python3
import json
import os
import subprocess
import enum
import sys
from xml.sax import saxutils
class PAMResult:
"""
A test result from pam.
"""
class Status(enum.Enum):
PASS = 1
FAIL = 2
ERROR = 3
def __init__(self, class_name, name, status, description='', message=''):
self.class_name = class_name
self.name = name
self.status = status
self.description = description
self.message = message
class PAMWrapper:
"""
A base wrapper class to run the Python AutoMarker (pam - https://github.com/ProjectAT/uam).
"""
def __init__(self, path_to_uam, test_files, test_timeout=5, global_timeout=20, path_to_virtualenv=None,
result_filename='result.json', timeout_filename='timedout'):
"""
Initializes the various parameters to run pam.
:param path_to_uam: The path to the uam installation.
:param test_files: A list of test files to be run by pam.
:param test_timeout: The max time to run a single test on the student submission.
:param global_timeout: The max time to run all tests on the student submission.
:param path_to_virtualenv: The path to the virtualenv to be used to run pam, can be None if all necessary.
:param result_filename: The file name of pam's json output.
:param timeout_filename: The file name of pam's output when a test times out.
dependencies are installed system-wide.
"""
self.path_to_uam = path_to_uam
self.path_to_pam = path_to_uam + '/pam/pam.py'
self.test_files = test_files
self.test_timeout = test_timeout
self.global_timeout = global_timeout
self.path_to_virtualenv = path_to_virtualenv
self.result_filename = result_filename
self.timeout_filename = timeout_filename
def collect_results(self):
"""
Collects pam results.
:return: A list of results (possibly empty), or None if the tests timed out.
"""
results = None
try:
with open(self.result_filename) as result_file:
results = []
result = json.load(result_file)
for test_class_name, test_class_result in result['results'].items():
if 'passes' in test_class_result:
for test_name, test_desc in test_class_result['passes'].items():
results.append(
PAMResult(class_name=test_class_name.partition('.')[2], name=test_name,
status=PAMResult.Status.PASS, description=test_desc))
if 'failures' in test_class_result:
for test_name, test_stack in test_class_result['failures'].items():
results.append(
PAMResult(class_name=test_class_name.partition('.')[2], name=test_name,
status=PAMResult.Status.FAIL, description=test_stack['description'],
message=test_stack['message']))
if 'errors' in test_class_result:
for test_name, test_stack in test_class_result['errors'].items():
results.append(
PAMResult(class_name=test_class_name.partition('.')[2], name=test_name,
status=PAMResult.Status.ERROR, description=test_stack['description'],
message=test_stack['message']))
except OSError:
if not os.path.isfile(self.timeout_filename):
print('Test framework error: no result or time out generated', file=sys.stderr)
exit(1)
return results
def print_results(self, results):
"""
Prints pam results: must be overridden.
:param results: A list of results (possibly empty), or None if the tests timed out.
"""
pass
def run(self):
"""
Runs pam.
"""
if self.path_to_virtualenv is None:
shell_command = [self.path_to_pam, '-t', str(self.test_timeout), self.result_filename]
shell_command.extend(self.test_files)
shell = False
else:
shell_command = '''
. {cmd.path_to_virtualenv}/bin/activate;
{cmd.path_to_pam} -t {cmd.test_timeout} {cmd.result_filename} {files}
'''.format(cmd=self, files=' '.join(self.test_files))
shell = True
try:
env = os.environ.copy() # need to add path to uam libs
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = "{systempath}:{pampath}".format(systempath=env['PYTHONPATH'],
pampath=self.path_to_uam)
else:
env['PYTHONPATH'] = self.path_to_uam
subprocess.run(shell_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True, shell=shell,
env=env, timeout=self.global_timeout)
# use the following with Python < 3.5
# subprocess.check_call(shell_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=shell,
# env=env, timeout=self.global_timeout)
results = self.collect_results()
self.print_results(results)
except subprocess.TimeoutExpired as e:
self.print_results(None)
exit(1)
except subprocess.CalledProcessError as e:
print('Test framework error: stdout: {stdout}, stderr: {stderr}'.format(stdout=e.stdout, stderr=e.stderr),
file=sys.stderr)
# use the following with Python < 3.5
# print('Test framework error', file=sys.stderr)
exit(1)
except Exception as e:
print('Test framework error: {exception}'.format(exception=e), file=sys.stderr)
exit(1)
class MarkusPAMWrapper(PAMWrapper):
"""
A wrapper to run the Python AutoMarker (pam - https://github.com/ProjectAT/uam) within Markus' test framework.
"""
def print_result(self, name, input, expected, actual, marks, status):
"""
Prints one pam result in the format expected by Markus' test framework.
"""
print('''
<test>
<name>{name}</name>
<input>{input}</input>
<expected>{expected}</expected>
<actual>{actual}</actual>
<marks_earned>{marks}</marks_earned>
<status>{status}</status>
</test>
'''.format(name=name, input=input, expected=expected, actual=actual, marks=marks, status=status))
def print_results(self, results):
"""
Prints pam results.
:param results: A list of results (possibly empty), or None if the tests timed out.
"""
if results is None:
self.print_result(name='All tests', input='', expected='', actual='Timeout', marks=0, status='error')
else:
for result in results:
marks = 1 if result.status == PAMResult.Status.PASS else 0
status = 'pass' if result.status == PAMResult.Status.PASS else 'fail'
name = result.name if not result.description else '{name} ({desc})'.format(name=result.name,
desc=result.description)
self.print_result(name=name, input='', expected='',
actual=saxutils.escape(result.message, entities={"'": '''}),
marks=marks, status=status)
if __name__ == '__main__':
# Modify uppercase variables with your settings
# The path to the UAM root folder
PATH_TO_UAM = '/path/to/uam'
# A list of test files uploaded as support files to be executed against the student submission
MARKUS_TEST_FILES = ['test.py']
# The max time to run a single test on the student submission.
TEST_TIMEOUT = 5
# The max time to run all tests on the student submission.
GLOBAL_TIMEOUT = 20
# The path to a Python virtualenv that has the test dependencies
# (if None, dependencies must be installed system-wide)
PATH_TO_VIRTUALENV = None
wrapper = MarkusPAMWrapper(path_to_uam=PATH_TO_UAM, test_files=MARKUS_TEST_FILES, test_timeout=TEST_TIMEOUT,
global_timeout=GLOBAL_TIMEOUT, path_to_virtualenv=PATH_TO_VIRTUALENV)
wrapper.run()
# use with markusapi.py if needed (import markusapi)
ROOT_URL = sys.argv[1]
API_KEY = sys.argv[2]
ASSIGNMENT_ID = sys.argv[3]
GROUP_ID = sys.argv[4]
# FILE_NAME = 'result.json'
# api = markusapi.Markus(API_KEY, ROOT_URL)
# with open(FILE_NAME) as open_file:
# api.upload_feedback_file(ASSIGNMENT_ID, GROUP_ID, FILE_NAME, open_file.read())
|
|
#!/usr/bin/env python
#
# Copyright (c) 2013-2016, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
import scheduling
import logging
import model
import queue
import config
# Number of messages to send to a remote NUMA node before stopping to
# do so
NUM_STOP_REMOTE=1
# --------------------------------------------------
class SchedAdaptive(scheduling.Scheduling):
"""Scheduler supporting dynamic creation of broadcast tree
"""
"""For each node <s>, Stores all outgoing connections (cost, r) that
are used in the schedule, where r is the receiver and cost the
cost associated with sending a message from s to r.
Deprecated: <cost> is not used anywhere any longer, so not
maintaining it in here would simplify our life considerably.
"""
store = dict()
num = 0
finished = False
def __init__(self, graph, mod, overlay):
assert isinstance(mod, model.Model)
self.mod = mod
self.overlay = overlay
self.store = {key: [] for key in range(self.mod.get_num_cores())}
super(SchedAdaptive, self).__init__(graph)
def _numa_domain_active(self, core, nodes_active):
"""
Return whether at least one of the cores of the given NUMA
domain is active
"""
num = 0
for c in self.mod.get_numa_node(core):
if c in nodes_active:
num += 1
return num >= NUM_STOP_REMOTE
def get_parents(self):
"""Return a dictionary core -> parent identifying each core's parent
in the current tree.
"""
_p = {}
for s, children in self.store.items():
for (_, child) in children:
assert not child in _p # Each core has only one parent
_p[child] = s
return _p
def get_root(self):
"""Returns the root of the current tree
The root does not have a parent. So we need to find a node
without parents.
"""
parent = self.get_parents()
for s in self.mod.get_cores(True):
if not s in parent:
return s
raise Exception('Could not find root')
def assert_history(self):
"""Sanity check to verify that the send history as known to the
machine model matches the order as given from the adaptivetree.
If this is true, we can use the build-in function
get_send_cost() with the corrected option, as the send history
as stored in the model matches the one in the adaptive tree.
"""
for sender, children in self.store.items():
# Retrieve send history for that node
sh_them = self.mod.send_history.get(sender, [])
# Drop the costs
sh_us = [ n for (_, n) in children ]
assert len(sh_them) == len(sh_us) # same number of children
for us, them in zip(sh_us, sh_them):
assert us == them # send history matches in each element
logging.info(('Send history is correct :-)'))
def cost_tree(self):
return self.cost_subtree()[self.get_root()]
def cost_subtree(self):
"""Determine the cost of each node's subtree.
Returns the cost as a dictionary core -> cost at subtree.
"""
r = {}
for n in self.store.keys():
_, t_avail = self.simulate_current(start=n)
# Select the maximum t_avail from all calculated nodes
_, r[n] = sorted(t_avail.items(), key=lambda x: x[1], reverse=True)[0]
return r
def reorder(self):
"""Optimize the current schedule.
This does NOT change the topology, but only the send order in
each node. Rather than sending on the most expensive _link_
first, as generated in the initial unoptimized adaptive tree,
we here changed the schedule to send to the most expensive
_subtree_ of each child first
However, the send history is affected by this. So, this
function rewrites it while "simulating".
Needs to be executed in 2 steps in order for the send
histories to be consistent.
"""
# Determine the cost of each subtree
cost = self.cost_subtree()
logging.info(('Re-ordering: subtree costs are:', str(cost)))
# reset send history in machine model
self.mod.reset()
new_store = {}
# Find new order
# ------------------------------
for core, _children in self.store.items():
# Determin children of a node
children = [ c for (_, c) in _children ]
# Get cost of each child's subtree
children_cost = [ cost[c] for c in children ]
# Sort, most expensive first
children_sorted = sorted(zip(children, children_cost), \
key=lambda x: x[1], reverse=True)
new_store[core] = [ (_cost, _node) for (_node, _cost) in children_sorted ]
self.store = {key: [] for key in range(self.mod.get_num_cores())}
# Apply new order
# ------------------------------
for core, _children in new_store.items():
for (cost, node) in _children:
self.mod.add_send_history(core, node)
self.store[core] = self.store.get(core, []) + [(cost, node)]
logging.info(('Storing new send order', self.store.get(core, [])))
# Fix send history
self.assert_history()
def get_slowest(self):
_, t_avail = self.simulate_current()
return sorted(t_avail.items(), key=lambda x: x[1], reverse=True)[0]
def simulate_current(self, start=None, visu=None):
"""Simulate the current tree starting at node <start>.
Use root if <start> is not given."""
# Send history
send_history = {}
_start = start
if _start == None :
_start = self.get_root()
# Calculate cost of subtree
q = queue.Queue()
q.put((_start, 0))
# Dictionary core c -> time when message is availabe on each
# core, after receiving
t_avail = {}
# Dictionary core c -> time when core is idle, i.e. after
# sending the last message. If no message is sent, this equals t_avail
t_idle = {}
# Determine the time where the message is available in each node
# --------------------------------------------------
while not q.empty():
c, time = q.get() # caluclate next core
assert not c in t_avail
t_avail[c] = time
# Get an order list of neighbors
for _, nb in self.store[c]:
# Send time as perceived by the client
t_send = self.mod.get_send_cost_for_history(c, nb, send_history.get(c, []))
assert t_send > 0
# Actul send time
t_send_propagate = max(self.mod.get_send_cost(c, nb, False, False), t_send)
t_receive = self.mod.get_receive_cost(c, nb)
# Visualize send and receive events
if visu:
assert time + t_send <= time + t_send_propagate
visu.send(c, nb, time, t_send)
visu.receive(nb, c, time + t_send_propagate, t_receive)
q.put((nb, time + t_send_propagate + t_receive))
time += t_send
send_history[c] = send_history.get(c, []) + [nb]
assert not c in t_idle
t_idle[c] = time # time after sending all messages OR
# after receiving if no message is sent.
# Either the tree is full connected, or we started the tree
# traversal somewhere else as in the root of the tree.
assert _start!=self.get_root() or (len(t_avail)==len(self.store)) or len(t_avail)==self.mod.get_num_cores(True)
assert _start!=self.get_root() or (len(t_idle) ==len(self.store)) or len(t_avail)==self.mod.get_num_cores(True)
return t_idle, t_avail
def optimize_scheduling(self):
"""Find optimizations for current schedule.
This is a 2-step process:
1) Optimize the schedule - sort by cost of subtree rather than
cost of individual link's cost. The send history is rebuild
after this.
Invariant: send history remains intact.
"""
assert (len(self.store) == sum([len(c) for (s, c) in self.store.items()])+1) or \
(self.mod.get_num_cores(True) == sum([len(c) for (s, c) in self.store.items()])+1)
# Store old state
cost_old = self.cost_tree()
store_old = self.store
history_old = self.mod.send_history
logging.info(('root is', self.get_root()))
for core, children in self.store.items():
logging.info((core, '->', [ core for (_, core) in children ]))
# --------------------------------------------------
# REORDER - for each core, reorder messages
# most expensive subgraph first
# --------------------------------------------------
self.reorder()
self.assert_history()
# Sanity checks
# --------------------------------------------------
for sender, cld in sorted(self.store.items(), key=lambda x: x[0]):
cld_old = store_old[sender]
# We change only the order of the neighbors, so if we sort
# each node's children, they lists should be the same
cld_c = [c for _,c in cld]
cld_old_c = [c for _,c in cld_old]
assert sorted(cld_c) == sorted(cld_old_c)
if cld_c != cld_old_c:
logging.info(('Changed schedule in node %4d from %20s to %20s'%\
(sender, str(cld_old_c), str(cld_c))))
else:
logging.info(('Unchanged schedule in node %2d' % sender))
# XXX I think the problem here is that the cost in the very
# first iteration is given by the external evaluation, rather
# than internally by the adaptive scheduler - and they don't
# match!
# Two options:
# 1) make consistent
# 2) recalculate before optimizting
#
# Strong preference for 2)
# Sanity check: cost of the new tree has to be smaller
cost_new = self.cost_tree()
# XXX OKAY - for some reason, the re-ordere schedule is
# sometimes slower. I think this is to be expected in some
# cases, but on gruyere after the first re-ordering, the cost
# of subtrees (5) and (8) seem to be calculated wrong, from
# looking at the visualization.
# assert cost_new <= cost_old
if cost_new > cost_old:
self.mod.send_history = history_old
self.store = store_old
self.assert_history()
return self.simulate_current()
def replace(self, sender, receiver):
"""Updates the adaptive tree
Remove (x, receiver) and add (sender, receiver) instead.
"""
logging.info(('ADAPTIVE before' + str(self.store)))
# Determine send cost before messing around with the send histories
self.assert_history() # still intact, but we also don't use them elsewehere
cost = self.mod.get_send_cost(sender, receiver)
# Remove previous sender
for (s, l_receivers) in self.store.items():
logging.info(('ADAPTIVE', 'looking for ', receiver, 'in', l_receivers))
self.store[s] = [ (c, r) for (c, r) in l_receivers if \
r != receiver ]
# Add new pair
self.store[sender].append((cost, receiver))
logging.info(('ADAPTIVE after' + str(self.store)))
def find_schedule(self, sending_node, cores_active=None):
"""
Find a schedule for the given node.
@param sending_node Sending node for which to determine scheduling
@param cores_active List of active nodes
@return A list containing all inactive nodes sorted by cost.
"""
# We calculate the adaptive tree only once. After we finished
# this, we just return whatever tree was generated in the
# first iteration.
if self.finished:
return self.store[sending_node]
assert cores_active is not None
assert sending_node in cores_active
cheap_first = self.overlay.options.get('min', False)
self.num += 1
# Find cores that ...
# Principles are:
# * The filter makes sure that we do _not_ send unnecessary messages
# across NUMA domains
# * Send expensive messages first
cores = self.graph.nodes()
inactive_nodes = []
for c in cores:
# Never send to ourself
if c == sending_node:
continue
# Do not consider any nodes that are not in the multicast
if not c in self.mod.get_cores(True):
continue
logging.info('Sending node from %d to %d' % (sending_node, c))
# Is the target node active already?
node_active = self._numa_domain_active(c, cores_active)
# Is the target core on the sender's local node?
# but we did not yet send it there before
same_node = self.mod.on_same_numa_node(sending_node, c)
# Shoulds this node be considered for sending?
if cheap_first:
# Consider all cores to which we are not sending a message yet
consider = not c in cores_active
else:
# Yes, if receiver is on inactive node, or on local node
consider = not node_active or same_node
# Do not resent messages to local cores. The local node is
# already active at that point, so remove nodes will not
# recent messages to any of the cores on that node.
#
# What remains to be checked is whether any of the other
# cores on the same node already sent a message.
if same_node or True:
# Check if somebody else sent a message there already
for othercores in self.mod.get_numa_node(sending_node):
if c in [s for (_,s) in self.store[othercores]]:
logging.info('Not considering %d, as message was already sent' % c)
consider = False
# Check if node is already active (e.g. the root,
# which no one sent a message to already
if c in cores_active:
logging.info('Not considering %d, already active' % c)
consider = False
# If we consider this core, it's node is inactive, which
# means that c is inactive itself.
assert not consider or not c in cores_active
if consider:
# build (cost, core) tuple
# Here, we use send + receive time as the cost, as we are
# looking for a metric that estimates the total cost of
# sending a message across - not just from the perspective
# if the sender
inactive_nodes.append((self.mod.get_send_cost(sending_node, c, False, False)+\
self.mod.get_receive_cost(sending_node, c), c))
logging.info('%s %d -> %d, as node_active=%d and same_node=%d' % \
('Considering' if consider else 'Not sending', \
sending_node, c, node_active, same_node))
logging.info("inactive_nodes from %d with cost: %s" % \
(sending_node, inactive_nodes))
# Prefer expensive links
should_reverse = False if cheap_first else True
inactive_nodes.sort(key=lambda tup: tup[0], reverse=should_reverse)
logging.info(" sorted: %s" % (inactive_nodes))
if len(inactive_nodes)==0:
return []
# Return only one node
(_, next_r) = inactive_nodes[0]
logging.info('Choosing %d' % next_r)
# Replace target core (which is the most expensive node in the
# system), with the cheapest on that node for remote nodes,
# For local nodes, just send to the previously selected one,
# which is already the most expensive one on that node.
# Assumption: fully-connected model
# All nodes on receivers node and their cost for current sender
# Here, we only consider the send time, as we want to minimize time
# spent on the sender
if self.mod.on_same_numa_node(next_r, sending_node):
# LOCAL SEND
# --------------------------------------------------
next_hop = (self.mod.get_send_cost(sending_node, next_r, False, False), next_r)
else:
# REMOTE SEND
# --------------------------------------------------
# Add other cores from same remote node, but ONLY if they are
# multicast members. Essentiall, this means that we selected
# the NUMA node to send to and now, we want to select the
# cheapest core on that node.
# --------------------------------------------------
c_all = self.mod.filter_active_cores(self.mod.get_numa_node(next_r), True)
c_all = [ c for c in c_all if not c in cores_active ]
c_cost = [ (self.mod.get_send_cost(sending_node, r, False, False), r) \
for r in c_all if r != sending_node ]
# Sort, cheapest node first
c_cost.sort(key=lambda tup: tup[0])
logging.info(('Other cores on that node: %s ' % str(c_cost)))
# Pick first - but list returned needs to have same length
# as number of inactive nodes
next_hop = (c_cost[0][0], c_cost[0][1])
# Remember choice
assert next_hop not in self.store[sending_node]
# Otherwise, we already sent a message to the same core
self.store[sending_node].append(next_hop)
logging.info(("Targets from", sending_node, ":", self.store[sending_node]))
return [next_hop]
def get_final_schedule(self, sending_node, active_nodes=None):
"""Return schedule previously found by iterative find_schedule calls.
Note: the final schedule does NOT need the cost! Also,
reordering does NOT need the cost. The cost here is what was
previously stored in self.store
"""
try:
res = [(None, r) for (_, r) in self.store[sending_node]]
logging.info(('Node', sending_node, 'is sending a message to', \
[ r for (_, r) in res ]))
return res
except:
logging.info(('Node', sending_node, 'is not sending any message'))
return []
def next_eval(self):
logging.info(('--------------------------------------------------'))
logging.info(('FINISHED'))
logging.info(('--------------------------------------------------'))
self.finished = True
def visualize(self, m, topo):
if not config.args.debug:
return
import draw
d = draw.Output('visu.tex', m, topo)
_, t_avail = self.simulate_current(visu=d)
d.finalize(int(max([ t for (__, t) in t_avail.items()])))
d.generate_image()
raw_input("Press the <ENTER> key to continue...")
import subprocess, shlex
subprocess.check_call(shlex.split('cp visu.png visu-old.png'))
|
|
# Author: David Decotigny 2008 Oct 3
# @brief Routines to determine which new objects are reachable
# between 2 points in the code
import gc, cPickle as pickle, weakref, sys, traceback
#
# Method 1: use weak ref to track new live objects
# Advantages: we have live pointers to the new live objects. And fast
# Drawbacks: doesn't track many types (such as list, dict, etc.) but
# generally this is not a problem because: if they contain
# sub-objects, these objects might most probably be track-able
#
class RefTracker(object):
"""
The scan() method will apply the given callback to the list of
new objects created since last call to scan() (or since the
construction, for the 1st time).
"""
def __init__(self):
self._not_tracked_types = set()
self._current_refs = dict()
self.scan()
def _get_objects(self):
return gc.get_objects()
def _scan(self, callback_new_object):
"""
This is NOT MT-safe and will not work for most builtin types
"""
objs = self._get_objects()
# First: remove the objects that are not available anymore
to_remove = []
for oid, ref in self._current_refs.iteritems():
if ref() is None:
to_remove.append(oid)
for oid in to_remove:
del self._current_refs[oid]
del to_remove
# Create the list of objects that are brand new:
for obj in objs:
try:
my_ref = self._current_refs[id(obj)]
# The object was already recorded last time.
# If the recorded object were not the current one,
# it would mean that the recorded object had been
# deallocated... this is caught by the previous loop
#
# Do some sanity checks, just to make sure:
assert my_ref() is not None
assert my_ref() == obj
except KeyError:
# This is a new object. Try to make a weak-ref out of it:
try:
wref = weakref.ref(obj)
except TypeError:
# Track only weak-ref-friendly objects, remember
# the types of the objects we couldn't weak-reference:
self._not_tracked_types.add(str(type(obj)))
continue
# Ok, good, we have a weak ref. Record it:
self._current_refs[id(obj)] = wref
# We also want to know that it's a new thing
try:
if callback_new_object:
callback_new_object(obj)
del obj
except:
traceback.print_exc()
def scan(self, callback_new_object = None):
"""Call the callback on each new object"""
# We need this in order to free the refs still held
# by _scan due to the callback (approx explanation...)
gc.collect()
self._scan(callback_new_object)
gc.collect()
@property
def not_tracked_types(self):
"""Return the list of type names of the objects that could not
be tracked"""
return self._not_tracked_types
@staticmethod
def _print_new_obj(obj):
"""Callback used by scan_and_print_new_objs"""
print "New obj:", repr(obj)
def scan_and_print_new_objs(self, msg = None):
# Print list of new objs, making sure that the list is
# correctly garbage-collected by the GC
print "\n# -- %s:" % (msg or "New objects")
self.scan(self._print_new_obj)
print "# ---------------\n"
#
# Method 2: Keep track of the garbage list
# Advantages: we have live pointers to the new live objects. And fast
# Drawbacks: will only show the object /after/ the GC had tried to
# reclaim them, not as soon as they have been
# creaded. Still useful to debug leaks... But: are we sure
# that lost objects are only found in cycles ??? Same
# type restrictions as for method 1 ???
#
class GarbageTracker(RefTracker):
def _get_objects(self):
return gc.garbage
#
# Method 3: approximate method storing signatures of objects to a file
# and comparing the signatures. The signature consist of a pair
# object id / str(type(obj))
# Advantages: all object types can potentially be tracked. Can allow
# basic offline analysis
# Drawbacks: might not see some new objects if they are at the same address
# as previous ones having the same signature. Slow
#
first_time = True
def make_gc_snapShot(filename, name):
"""Append the signatures to a file, giving them the given
'name'. A signature is a pair object_id / type_name"""
global first_time
if first_time:
gc.collect()
first_time = False
contents = []
for o in gc.get_objects():
try:
tname = o.__class__.__name__
except AttributeError:
tname = str(type(o))
contents.append((id(o), tname))
del tname
f = open(filename, 'a')
pickle.dump((name, contents), f)
f.close()
del contents
del f
class GCSnapshot(object):
"""Used to read a set of signatures from the file"""
def __init__(self, stream):
self.name, contents = pickle.load(stream)
self._contents = set(contents)
def __sub__(self, other):
"""Give the differences between 2 sets of
signatures. Return a set of pairs object_id /
type_name"""
return self._contents - other._contents
def reach(self, ids):
"""
\param ids Iterable of object id, as returned by x[0],
with x in the result of (snapshot2 - snapshot1)
Return a dict id -> object with that id currently known.
The objects recorded with these id might have been
replaced by new ones... so we might end-up seeing objects
that don't correspond to the original ones. This is
especially true after a gc.collect()
"""
result = dict()
for obj in gc.get_objects():
if id(obj) in ids:
result[id(obj)] = obj
return result
def read_snapshots(filename):
"""Sequentially reads the sets of signatures from a file. For
each set of signatures, a GCSnapshot is created with the
stored name. return the dict set name -> GCSnapshot object"""
result = dict()
f = open(filename, 'r')
while 1:
try:
snap = GCSnapshot(f)
result[snap.name] = snap
except (EOFError, pickle.UnpicklingError):
break
f.close()
return result
#### BEGIN: ONLY FOR THE TESTS
class Dummy:
def __init__(self):
print "INFO: ctor", self
def __del__(self):
print "INFO: dtor", self
# A pair of mutually-referencing objects with __del__ methods
# See http://docs.python.org/library/gc.html#gc.garbage
# for an explanation why they are not automatically reclaimable
class ObjectReferencer:
def __init__( self, obj ):
print "INFO: ctor", self
self.reference = obj
def __del__(self):
print "INFO: dtor", self
class ReferencerCreator:
def __init__( self ):
print "INFO: ctor", self
self.attribute = ObjectReferencer( self )
def __del__(self):
print "INFO: dtor", self
def break_cycle(self):
# Necessary to break the cycle that prevents the GC from
# doing its job
print "INFO: break_cycle", self
self.attribute = None
def _test1():
"""Tests for method 1 (RefTracker)"""
print "*** Method 1 (RefTracker) ***"
r = RefTracker()
d = Dummy()
print "del dummy now..."
del d
r.scan_and_print_new_objs("After creation/del of Dummy()")
# Contains a cycle: will not be freed by GC...
o = ReferencerCreator()
print "del obj now..."
del o
r.scan_and_print_new_objs("After creation/del of ReferencerCreator")
# The same, but we break the cycle
o = ReferencerCreator()
print "break_cycle now..."
o.break_cycle()
print "del obj now..."
del o
r.scan_and_print_new_objs("After creation/break_cycle/del of ReferencerCreator")
print "Types not tracked:"
for typ in r.not_tracked_types:
print " %s" % typ
print "End of test method 1."
def _test2():
"""Tests for method 2 (GarbageTracker)"""
print "*** Method 2 (GarbageTracker) ***"
r = GarbageTracker()
d = Dummy()
print "del dummy now..."
del d
r.scan_and_print_new_objs("After creation/del of Dummy()")
# Contains a cycle: will not be freed by GC...
o = ReferencerCreator()
print "del obj now..."
del o
r.scan_and_print_new_objs("After creation/del of ReferencerCreator")
# The same, but we break the cycle
o = ReferencerCreator()
print "break_cycle now..."
o.break_cycle()
print "del obj now..."
del o
r.scan_and_print_new_objs("After creation/break_cycle/del of ReferencerCreator")
print "Types not tracked:"
for typ in r.not_tracked_types:
print " %s" % typ
print "End of test method 2."
def _test3():
"""Tests for method 3 (compare signatures)"""
import os
print "*** Method 3 (compare signatures) ***"
fname = "/tmp/gc-%s-snapshot" % os.environ["USER"]
make_gc_snapShot(fname, "0")
make_gc_snapShot(fname, "1")
l = list()
l.append(l)
make_gc_snapShot(fname, "2")
l.append(42)
t = ReferencerCreator()
make_gc_snapShot(fname, "3")
# Now analyzing
snaps = read_snapshots(fname)
os.remove(fname)
print "Between 2 and 1, diff is:"
diff21 = snaps["2"] - snaps["1"]
for d in diff21:
print " ", d
print "Between 2 and 1, diff as live objects is:"
for obj in snaps["3"].reach([d[0] for d in diff21]).itervalues():
print " ", obj
print "Between 3 and 2, diff is:"
diff32 = snaps["3"] - snaps["2"]
for d in diff32:
print " ", d
print "Between 3 and 2, diff as live objects is:"
for obj in snaps["3"].reach([d[0] for d in diff32]).itervalues():
print " ", obj
print "Between 3 and 1, diff is:"
diff31 = snaps["3"] - snaps["1"]
for d in diff31:
print " ", d
print "Between 3 and 1, diff as live objects is:"
for obj in snaps["3"].reach([d[0] for d in diff31]).itervalues():
print " ", obj
print "End of test method 3."
#### END: ONLY FOR THE TESTS
if __name__ == "__main__":
_test1()
_test2()
_test3()
print "Bye."
|
|
#!/usr/bin/python
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A package is a JSON file describing a list of package archives."""
import json
import os
import posixpath
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import pynacl.file_tools
import pynacl.gsd_storage
import archive_info
import error
PACKAGE_KEY_ARCHIVES = 'archives'
PACKAGE_KEY_VERSION = 'version'
CURRENT_PACKAGE_VERSION = 1
def ReadPackageFile(package_file):
"""Returns a PackageInfoTuple representation of a JSON package file."""
with open(package_file, 'rt') as f:
json_value = json.load(f)
# TODO(dyen): Support old format temporarily when it was a list of archives.
if isinstance(json_value, list):
return { PACKAGE_KEY_ARCHIVES: json_value, PACKAGE_KEY_VERSION: 0 }
else:
return json_value
def GetFileBaseName(filename):
"""Removes all extensions from a file.
(Note: os.path.splitext() only removes the last extension).
"""
first_ext = filename.find('.')
if first_ext != -1:
filename = filename[:first_ext]
return filename
def GetLocalPackageName(local_package_file):
"""Returns the package name given a local package file."""
package_basename = os.path.basename(local_package_file)
return GetFileBaseName(package_basename)
def GetRemotePackageName(remote_package_file):
"""Returns the package name given a remote posix based package file."""
package_basename = posixpath.basename(remote_package_file)
return GetFileBaseName(package_basename)
def DownloadPackageInfoFiles(local_package_file, remote_package_file,
downloader=None):
"""Downloads all package info files from a downloader.
Downloads a package file from the cloud along with all of the archive
info files. Archive info files are expected to be in a directory with the
name of the package along side the package file. Files will be downloaded
in the same structure.
Args:
local_package_file: Local package file where root file will live.
remote_package_file: Remote package URL to download from.
downloader: Optional downloader if standard HTTP one should not be used.
"""
if downloader is None:
downloader = pynacl.gsd_storage.HttpDownload
pynacl.file_tools.MakeParentDirectoryIfAbsent(local_package_file)
downloader(remote_package_file, local_package_file)
if not os.path.isfile(local_package_file):
raise error.Error('Could not download package file: %s.' %
remote_package_file)
package_data = ReadPackageFile(local_package_file)
archive_list = package_data[PACKAGE_KEY_ARCHIVES]
local_package_name = GetLocalPackageName(local_package_file)
remote_package_name = GetRemotePackageName(remote_package_file)
local_archive_dir = os.path.join(os.path.dirname(local_package_file),
local_package_name)
remote_archive_dir = posixpath.join(posixpath.dirname(remote_package_file),
remote_package_name)
pynacl.file_tools.MakeDirectoryIfAbsent(local_archive_dir)
for archive in archive_list:
archive_file = archive + '.json'
local_archive_file = os.path.join(local_archive_dir, archive_file)
remote_archive_file = posixpath.join(remote_archive_dir, archive_file)
downloader(remote_archive_file, local_archive_file)
if not os.path.isfile(local_archive_file):
raise error.Error('Could not download archive file: %s.' %
remote_archive_file)
def UploadPackageInfoFiles(storage, package_target, package_name,
remote_package_file, local_package_file,
skip_missing=False, annotate=False):
"""Uploads all package info files from a downloader.
Uploads a package file to the cloud along with all of the archive info
files. Archive info files are expected to be in a directory with the
name of the package along side the package file. Files will be uploaded
using the same file structure.
Args:
storage: Cloud storage object to store the files to.
remote_package_file: Remote package URL to upload to.
local_package_file: Local package file where root file lives.
skip_missing: Whether to skip missing archive files or error.
annotate: Whether to annotate build bot links.
Returns:
The URL where the root package file is located.
"""
package_data = ReadPackageFile(local_package_file)
archive_list = package_data[PACKAGE_KEY_ARCHIVES]
local_package_name = GetLocalPackageName(local_package_file)
remote_package_name = GetRemotePackageName(remote_package_file)
local_archive_dir = os.path.join(os.path.dirname(local_package_file),
local_package_name)
remote_archive_dir = posixpath.join(posixpath.dirname(remote_package_file),
remote_package_name)
num_archives = len(archive_list)
for index, archive in enumerate(archive_list):
archive_file = archive + '.json'
local_archive_file = os.path.join(local_archive_dir, archive_file)
remote_archive_file = posixpath.join(remote_archive_dir, archive_file)
if skip_missing and not os.path.isfile(local_archive_file):
continue
archive_url = storage.PutFile(local_archive_file, remote_archive_file)
if annotate:
print ('@@@STEP_LINK@download (%s/%s/%s.json [%d/%d])@%s@@@' %
(package_target, package_name, archive, index+1, num_archives,
archive_url))
package_url = storage.PutFile(local_package_file, remote_package_file)
if annotate:
print ('@@@STEP_LINK@download (%s/%s.json)@%s@@@' %
(package_target, package_name, package_url))
return package_url
class PackageInfo(object):
"""A package file is a list of package archives (usually .tar or .tgz files).
PackageInfo will contain a list of ArchiveInfo objects, ArchiveInfo will
contain all the necessary information for an archive (name, URL, hash...etc.).
"""
def __init__(self, package_file=None, skip_missing=False):
self._archive_list = []
self._package_version = CURRENT_PACKAGE_VERSION
if package_file is not None:
self.LoadPackageFile(package_file, skip_missing)
def __eq__(self, other):
if type(self) != type(other):
return False
elif self.GetPackageVersion() != other.GetPackageVersion():
return False
archives1 = [archive.GetArchiveData() for archive in self.GetArchiveList()]
archives2 = [archive.GetArchiveData() for archive in other.GetArchiveList()]
return set(archives1) == set(archives2)
def __repr__(self):
return 'PackageInfo(%s)' % self.DumpPackageJson()
def LoadPackageFile(self, package_file, skip_missing=False):
"""Loads a package file into this object.
Args:
package_file: Filename or JSON dictionary.
"""
archive_names = None
self._archive_list = []
# TODO(dyen): Support old format temporarily when it was a list of archives.
if isinstance(package_file, list) or isinstance(package_file, dict):
if isinstance(package_file, list):
self._package_version = 0
archive_list = package_file
else:
self._package_version = package_file[PACKAGE_KEY_VERSION]
archive_list = package_file[PACKAGE_KEY_ARCHIVES]
if archive_list:
if isinstance(archive_list[0], archive_info.ArchiveInfo):
# Setting a list of ArchiveInfo objects, no need to interpret JSON.
self._archive_list = archive_list
else:
# Assume to be JSON.
for archive_json in archive_list:
archive = archive_info.ArchiveInfo(archive_info_file=archive_json)
self._archive_list.append(archive)
elif isinstance(package_file, basestring):
package_data = ReadPackageFile(package_file)
self._package_version = package_data[PACKAGE_KEY_VERSION]
archive_names = package_data[PACKAGE_KEY_ARCHIVES]
package_name = GetLocalPackageName(package_file)
archive_dir = os.path.join(os.path.dirname(package_file), package_name)
for archive in archive_names:
arch_file = archive + '.json'
arch_path = os.path.join(archive_dir, arch_file)
if not os.path.isfile(arch_path):
if not skip_missing:
raise error.Error(
'Package (%s) points to invalid archive file (%s).' %
(package_file, arch_path))
archive_desc = archive_info.ArchiveInfo(name=archive)
else:
archive_desc = archive_info.ArchiveInfo(archive_info_file=arch_path)
self._archive_list.append(archive_desc)
else:
raise error.Error('Invalid load package file type (%s): %s.' %
(type(package_file), package_file))
def SavePackageFile(self, package_file):
"""Saves this object as a serialized JSON file.
Args:
package_file: File path where JSON file will be saved.
"""
package_name = GetLocalPackageName(package_file)
archive_dir = os.path.join(os.path.dirname(package_file), package_name)
pynacl.file_tools.RemoveDirectoryIfPresent(archive_dir)
os.makedirs(archive_dir)
archive_list = []
for archive in self.GetArchiveList():
archive_data = archive.GetArchiveData()
archive_list.append(archive_data.name)
archive_file = archive_data.name + '.json'
archive_path = os.path.join(archive_dir, archive_file)
archive.SaveArchiveInfoFile(archive_path)
package_json = {
PACKAGE_KEY_ARCHIVES: archive_list,
PACKAGE_KEY_VERSION: self._package_version
}
with open(package_file, 'wt') as f:
json.dump(package_json, f, sort_keys=True,
indent=2, separators=(',', ': '))
def DumpPackageJson(self):
"""Returns a dictionary representation of the JSON of this object."""
archives = [archive.DumpArchiveJson() for archive in self.GetArchiveList()]
return {
PACKAGE_KEY_ARCHIVES: archives,
PACKAGE_KEY_VERSION: self._package_version
}
def ClearArchiveList(self):
"""Clears this object so it represents no archives."""
self._archive_list = []
def AppendArchive(self, archive_info):
"""Append a package archive into this object"""
self._archive_list.append(archive_info)
def GetArchiveList(self):
"""Returns the sorted list of ARCHIVE_INFOs this object represents."""
return sorted(self._archive_list,
key=lambda archive : archive.GetArchiveData().name)
def GetPackageVersion(self):
"""Returns the version of this package."""
return self._package_version
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from migrate.changeset import UniqueConstraint
import sqlalchemy
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from fusion.openstack.common.gettextutils import _ # noqa
from fusion.openstack.common import log as logging
from fusion.openstack.common import timeutils
LOG = logging.getLogger(__name__)
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
def sanitize_db_url(url):
match = _DBURL_REGEX.match(url)
if match:
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
return url
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
try:
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
except KeyError:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(0, len(sort_keys)):
crit_attrs = []
for j in range(0, i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
else:
crit_attrs.append((model_attr > marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
"""Form the `INSERT INTO table (SELECT ... )` statement."""
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except KeyError:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise ColumnError(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise ColumnError(msg % column_name)
return column
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""Drop unique constraint from table.
This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their
type with NullType in metadata. We process these columns and replace
NullType with the correct column type.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
if migrate_engine.name == "sqlite":
override_cols = [
_get_not_supported_column(col_name_col_instance, col.name)
for col in t.columns
if isinstance(col.type, NullType)
]
for col in override_cols:
t.columns.replace(col)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""Drop all old rows having the same values for columns in uc_columns.
This method drop (or mark ad `deleted` if use_soft_delete is True) old
duplicate rows form table with name `table_name`.
:param migrate_engine: Sqlalchemy engine
:param table_name: Table with duplicates
:param use_soft_delete: If True - values will be marked as `deleted`,
if False - values will be removed from table
:param uc_column_names: Unique constraint columns
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = select(columns_for_select,
group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
is_none = None # workaround for pyflakes
delete_condition &= table.c.deleted_at == is_none
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise ColumnError(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict(
[(index['name'], index['column_names']) for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = table.metadata
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
deleted = True # workaround for pyflakes
table.update().\
where(table.c.deleted == deleted).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
return (sqltext.endswith("deleted in (0, 1)") or
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
deleted = True # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
deleted = False # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\
execute()
|
|
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
from pandas import (
DataFrame,
DatetimeIndex,
Series,
)
import pandas._testing as tm
from pandas.core.window import Expanding
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.expanding(2).sum()
@pytest.mark.filterwarnings(
"ignore:The `center` argument on `expanding` will be removed in the future"
)
def test_constructor(frame_or_series):
# GH 12669
c = frame_or_series(range(5)).expanding
# valid
c(min_periods=1)
c(min_periods=1, center=True)
c(min_periods=1, center=False)
@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])
@pytest.mark.filterwarnings(
"ignore:The `center` argument on `expanding` will be removed in the future"
)
def test_constructor_invalid(frame_or_series, w):
# not valid
c = frame_or_series(range(5)).expanding
msg = "min_periods must be an integer"
with pytest.raises(ValueError, match=msg):
c(min_periods=w)
msg = "center must be a boolean"
with pytest.raises(ValueError, match=msg):
c(min_periods=1, center=w)
@pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"])
def test_numpy_compat(method):
# see gh-12811
e = Expanding(Series([2, 4, 6]))
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
@pytest.mark.parametrize(
"expander",
[
1,
pytest.param(
"ls",
marks=pytest.mark.xfail(
reason="GH#16425 expanding with offset not supported"
),
),
],
)
def test_empty_df_expanding(expander):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().expanding(expander).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer expanding windows can be applied
# to empty DataFrames with datetime index
expected = DataFrame(index=DatetimeIndex([]))
result = DataFrame(index=DatetimeIndex([])).expanding(expander).sum()
tm.assert_frame_equal(result, expected)
def test_missing_minp_zero():
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = Series([np.nan])
result = x.expanding(min_periods=0).sum()
expected = Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.expanding(min_periods=1).sum()
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
def test_expanding_axis(axis_frame):
# see gh-23372.
df = DataFrame(np.ones((10, 20)))
axis = df._get_axis_number(axis_frame)
if axis == 0:
expected = DataFrame(
{i: [np.nan] * 2 + [float(j) for j in range(3, 11)] for i in range(20)}
)
else:
# axis == 1
expected = DataFrame([[np.nan] * 2 + [float(i) for i in range(3, 21)]] * 10)
result = df.expanding(3, axis=axis_frame).sum()
tm.assert_frame_equal(result, expected)
def test_expanding_count_with_min_periods(frame_or_series):
# GH 26996
result = frame_or_series(range(5)).expanding(min_periods=3).count()
expected = frame_or_series([np.nan, np.nan, 3.0, 4.0, 5.0])
tm.assert_equal(result, expected)
def test_expanding_count_default_min_periods_with_null_values(frame_or_series):
# GH 26996
values = [1, 2, 3, np.nan, 4, 5, 6]
expected_counts = [1.0, 2.0, 3.0, 3.0, 4.0, 5.0, 6.0]
result = frame_or_series(values).expanding().count()
expected = frame_or_series(expected_counts)
tm.assert_equal(result, expected)
def test_expanding_count_with_min_periods_exceeding_series_length(frame_or_series):
# GH 25857
result = frame_or_series(range(5)).expanding(min_periods=6).count()
expected = frame_or_series([np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"df,expected,min_periods",
[
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
3,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
2,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
1,
),
(DataFrame({"A": [1], "B": [4]}), [], 2),
(DataFrame(), [({}, [])], 1),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
3,
),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
2,
),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
1,
),
],
)
def test_iter_expanding_dataframe(df, expected, min_periods):
# GH 11704
expected = [DataFrame(values, index=index) for (values, index) in expected]
for (expected, actual) in zip(expected, df.expanding(min_periods)):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize(
"ser,expected,min_periods",
[
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 3),
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 2),
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 1),
(Series([1, 2]), [([1], [0]), ([1, 2], [0, 1])], 2),
(Series([np.nan, 2]), [([np.nan], [0]), ([np.nan, 2], [0, 1])], 2),
(Series([], dtype="int64"), [], 2),
],
)
def test_iter_expanding_series(ser, expected, min_periods):
# GH 11704
expected = [Series(values, index=index) for (values, index) in expected]
for (expected, actual) in zip(expected, ser.expanding(min_periods)):
tm.assert_series_equal(actual, expected)
def test_center_deprecate_warning():
# GH 20647
df = DataFrame()
with tm.assert_produces_warning(FutureWarning):
df.expanding(center=True)
with tm.assert_produces_warning(FutureWarning):
df.expanding(center=False)
with tm.assert_produces_warning(None):
df.expanding()
def test_expanding_sem(frame_or_series):
# GH: 26476
obj = frame_or_series([0, 1, 2])
result = obj.expanding().sem()
if isinstance(result, DataFrame):
result = Series(result[0].values)
expected = Series([np.nan] + [0.707107] * 2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["skew", "kurt"])
def test_expanding_skew_kurt_numerical_stability(method):
# GH: 6929
s = Series(np.random.rand(10))
expected = getattr(s.expanding(3), method)()
s = s + 5000
result = getattr(s.expanding(3), method)()
tm.assert_series_equal(result, expected)
|
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GDB support for Chrome types.
Add this to your gdb by amending your ~/.gdbinit as follows:
python
import sys
sys.path.insert(0, "/path/to/tools/gdb/")
import gdb_chrome
end
This module relies on the WebKit gdb module already existing in
your Python path.
Use
(gdb) p /r any_variable
to print |any_variable| without using any printers.
"""
import datetime
import gdb
import gdb.printing
import webkit
# When debugging this module, set the below variable to True, and then use
# (gdb) python del sys.modules['gdb_chrome']
# (gdb) python import gdb_chrome
# to reload.
_DEBUGGING = False
pp_set = gdb.printing.RegexpCollectionPrettyPrinter("chromium")
def typed_ptr(ptr):
"""Prints a pointer along with its exact type.
By default, gdb would print just the address, which takes more
steps to interpret.
"""
# Returning this as a cast expression surrounded by parentheses
# makes it easier to cut+paste inside of gdb.
return '((%s)%s)' % (ptr.dynamic_type, ptr)
def yield_fields(val):
"""Use this in a printer's children() method to print an object's fields.
e.g.
def children():
for result in yield_fields(self.val):
yield result
"""
try:
fields = val.type.target().fields()
except:
fields = val.type.fields()
for field in fields:
if field.is_base_class:
yield (field.name, val.cast(gdb.lookup_type(field.name)))
else:
yield (field.name, val[field.name])
class Printer(object):
def __init__(self, val):
self.val = val
class StringPrinter(Printer):
def display_hint(self):
return 'string'
class String16Printer(StringPrinter):
def to_string(self):
return webkit.ustring_to_string(self.val['_M_dataplus']['_M_p'])
pp_set.add_printer(
'string16',
'^string16|std::basic_string<(unsigned short|base::char16).*>$',
String16Printer);
class GURLPrinter(StringPrinter):
def to_string(self):
return self.val['spec_']
pp_set.add_printer('GURL', '^GURL$', GURLPrinter)
class FilePathPrinter(StringPrinter):
def to_string(self):
return self.val['path_']['_M_dataplus']['_M_p']
pp_set.add_printer('FilePath', '^FilePath$', FilePathPrinter)
class SizePrinter(Printer):
def to_string(self):
return '%sx%s' % (self.val['width_'], self.val['height_'])
pp_set.add_printer('gfx::Size', '^gfx::(Size|SizeF|SizeBase<.*>)$', SizePrinter)
class PointPrinter(Printer):
def to_string(self):
return '%s,%s' % (self.val['x_'], self.val['y_'])
pp_set.add_printer('gfx::Point', '^gfx::(Point|PointF|PointBase<.*>)$',
PointPrinter)
class RectPrinter(Printer):
def to_string(self):
return '%s %s' % (self.val['origin_'], self.val['size_'])
pp_set.add_printer('gfx::Rect', '^gfx::(Rect|RectF|RectBase<.*>)$',
RectPrinter)
class SmartPtrPrinter(Printer):
def to_string(self):
return '%s%s' % (self.typename, typed_ptr(self.ptr()))
class ScopedRefPtrPrinter(SmartPtrPrinter):
typename = 'scoped_refptr'
def ptr(self):
return self.val['ptr_']
pp_set.add_printer('scoped_refptr', '^scoped_refptr<.*>$', ScopedRefPtrPrinter)
class LinkedPtrPrinter(SmartPtrPrinter):
typename = 'linked_ptr'
def ptr(self):
return self.val['value_']
pp_set.add_printer('linked_ptr', '^linked_ptr<.*>$', LinkedPtrPrinter)
class WeakPtrPrinter(SmartPtrPrinter):
typename = 'base::WeakPtr'
def ptr(self):
flag = ScopedRefPtrPrinter(self.val['ref_']['flag_']).ptr()
if flag and flag['is_valid_']:
return self.val['ptr_']
return gdb.Value(0).cast(self.val['ptr_'].type)
pp_set.add_printer('base::WeakPtr', '^base::WeakPtr<.*>$', WeakPtrPrinter)
class CallbackPrinter(Printer):
"""Callbacks provide no usable information so reduce the space they take."""
def to_string(self):
return '...'
pp_set.add_printer('base::Callback', '^base::Callback<.*>$', CallbackPrinter)
class LocationPrinter(Printer):
def to_string(self):
return '%s()@%s:%s' % (self.val['function_name_'].string(),
self.val['file_name_'].string(),
self.val['line_number_'])
pp_set.add_printer('tracked_objects::Location', '^tracked_objects::Location$',
LocationPrinter)
class PendingTaskPrinter(Printer):
def to_string(self):
return 'From %s' % (self.val['posted_from'],)
def children(self):
for result in yield_fields(self.val):
if result[0] not in ('task', 'posted_from'):
yield result
pp_set.add_printer('base::PendingTask', '^base::PendingTask$',
PendingTaskPrinter)
class LockPrinter(Printer):
def to_string(self):
try:
if self.val['owned_by_thread_']:
return 'Locked by thread %s' % self.val['owning_thread_id_']
else:
return 'Unlocked'
except gdb.error:
return 'Unknown state'
pp_set.add_printer('base::Lock', '^base::Lock$', LockPrinter)
class TimeDeltaPrinter(object):
def __init__(self, val):
self._timedelta = datetime.timedelta(microseconds=int(val['delta_']))
def timedelta(self):
return self._timedelta
def to_string(self):
return str(self._timedelta)
pp_set.add_printer('base::TimeDelta', '^base::TimeDelta$', TimeDeltaPrinter)
class TimeTicksPrinter(TimeDeltaPrinter):
def __init__(self, val):
self._timedelta = datetime.timedelta(microseconds=int(val['ticks_']))
pp_set.add_printer('base::TimeTicks', '^base::TimeTicks$', TimeTicksPrinter)
class TimePrinter(object):
def __init__(self, val):
timet_offset = gdb.parse_and_eval(
'base::Time::kTimeTToMicrosecondsOffset')
self._datetime = (datetime.datetime.fromtimestamp(0) +
datetime.timedelta(microseconds=
int(val['us_'] - timet_offset)))
def datetime(self):
return self._datetime
def to_string(self):
return str(self._datetime)
pp_set.add_printer('base::Time', '^base::Time$', TimePrinter)
class IpcMessagePrinter(Printer):
def header(self):
return self.val['header_'].cast(
gdb.lookup_type('IPC::Message::Header').pointer())
def to_string(self):
message_type = self.header()['type']
return '%s of kind %s line %s' % (
self.val.dynamic_type,
(message_type >> 16).cast(gdb.lookup_type('IPCMessageStart')),
message_type & 0xffff)
def children(self):
yield ('header_', self.header().dereference())
yield ('capacity_after_header_', self.val['capacity_after_header_'])
for field in self.val.type.fields():
if field.is_base_class:
continue
yield (field.name, self.val[field.name])
pp_set.add_printer('IPC::Message', '^IPC::Message$', IpcMessagePrinter)
class NotificationRegistrarPrinter(Printer):
def to_string(self):
try:
registrations = self.val['registered_']
vector_finish = registrations['_M_impl']['_M_finish']
vector_start = registrations['_M_impl']['_M_start']
if vector_start == vector_finish:
return 'Not watching notifications'
if vector_start.dereference().type.sizeof == 0:
# Incomplete type: b/8242773
return 'Watching some notifications'
return ('Watching %s notifications; '
'print %s->registered_ for details') % (
int(vector_finish - vector_start),
typed_ptr(self.val.address))
except gdb.error:
return 'NotificationRegistrar'
pp_set.add_printer('content::NotificationRegistrar',
'^content::NotificationRegistrar$',
NotificationRegistrarPrinter)
class SiteInstanceImplPrinter(object):
def __init__(self, val):
self.val = val.cast(val.dynamic_type)
def to_string(self):
return 'SiteInstanceImpl@%s for %s' % (
self.val.address, self.val['site_'])
def children(self):
yield ('id_', self.val['id_'])
yield ('has_site_', self.val['has_site_'])
if self.val['browsing_instance_']['ptr_']:
yield ('browsing_instance_', self.val['browsing_instance_']['ptr_'])
if self.val['process_']:
yield ('process_', typed_ptr(self.val['process_']))
if self.val['render_process_host_factory_']:
yield ('render_process_host_factory_',
self.val['render_process_host_factory_'])
pp_set.add_printer('content::SiteInstanceImpl', '^content::SiteInstanceImpl$',
SiteInstanceImplPrinter)
class RenderProcessHostImplPrinter(object):
def __init__(self, val):
self.val = val.cast(val.dynamic_type)
def to_string(self):
pid = ''
try:
child_process_launcher_ptr = (
self.val['child_process_launcher_']['impl_']['data_']['ptr'])
if child_process_launcher_ptr:
context = (child_process_launcher_ptr['context_']['ptr_'])
if context:
pid = ' PID %s' % str(context['process_']['process_'])
except gdb.error:
# The definition of the Context type may not be available.
# b/8242773
pass
return 'RenderProcessHostImpl@%s%s' % (self.val.address, pid)
def children(self):
yield ('id_', self.val['id_'])
yield ('listeners_',
self.val['listeners_']['data_'])
yield ('worker_ref_count_', self.val['worker_ref_count_'])
yield ('fast_shutdown_started_', self.val['fast_shutdown_started_'])
yield ('deleting_soon_', self.val['deleting_soon_'])
yield ('pending_views_', self.val['pending_views_'])
yield ('visible_widgets_', self.val['visible_widgets_'])
yield ('backgrounded_', self.val['backgrounded_'])
yield ('widget_helper_', self.val['widget_helper_'])
yield ('is_initialized_', self.val['is_initialized_'])
yield ('browser_context_', typed_ptr(self.val['browser_context_']))
yield ('sudden_termination_allowed_',
self.val['sudden_termination_allowed_'])
yield ('ignore_input_events_', self.val['ignore_input_events_'])
yield ('is_guest_', self.val['is_guest_'])
pp_set.add_printer('content::RenderProcessHostImpl',
'^content::RenderProcessHostImpl$',
RenderProcessHostImplPrinter)
gdb.printing.register_pretty_printer(gdb, pp_set, replace=_DEBUGGING)
|
|
"""
HTML Widget classes
"""
import copy
import datetime
import re
import warnings
from collections import defaultdict
from itertools import chain
from django.conf import settings
from django.forms.utils import to_current_timezone
from django.templatetags.static import static
from django.utils import datetime_safe, formats
from django.utils.datastructures import OrderedSet
from django.utils.dates import MONTHS
from django.utils.formats import get_format
from django.utils.html import format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.topological_sort import (
CyclicDependencyError, stable_topological_sort,
)
from django.utils.translation import gettext_lazy as _
from .renderers import get_default_renderer
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
class MediaOrderConflictWarning(RuntimeWarning):
pass
@html_safe
class Media:
def __init__(self, media=None, css=None, js=None):
if media is not None:
css = getattr(media, 'css', {})
js = getattr(media, 'js', [])
else:
if css is None:
css = {}
if js is None:
js = []
self._css_lists = [css]
self._js_lists = [js]
def __repr__(self):
return 'Media(css=%r, js=%r)' % (self._css, self._js)
def __str__(self):
return self.render()
@property
def _css(self):
css = defaultdict(list)
for css_list in self._css_lists:
for medium, sublist in css_list.items():
css[medium].append(sublist)
return {medium: self.merge(*lists) for medium, lists in css.items()}
@property
def _js(self):
return self.merge(*self._js_lists)
def render(self):
return mark_safe('\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES)))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css)
return chain.from_iterable([
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet">',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media)
def absolute_path(self, path):
"""
Given a relative or absolute path to a static asset, return an absolute
path. An absolute path will be returned unchanged while a relative path
will be passed to django.templatetags.static.static().
"""
if path.startswith(('http://', 'https://', '/')):
return path
return static(path)
def __getitem__(self, name):
"""Return a Media object that only contains media of the given type."""
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
@staticmethod
def merge(*lists):
"""
Merge lists while trying to keep the relative order of the elements.
Warn if the lists have the same elements in a different relative order.
For static assets it can be important to have them included in the DOM
in a certain order. In JavaScript you may not be able to reference a
global or in CSS you might want to override a style.
"""
dependency_graph = defaultdict(set)
all_items = OrderedSet()
for list_ in filter(None, lists):
head = list_[0]
# The first items depend on nothing but have to be part of the
# dependency graph to be included in the result.
dependency_graph.setdefault(head, set())
for item in list_:
all_items.add(item)
# No self dependencies
if head != item:
dependency_graph[item].add(head)
head = item
try:
return stable_topological_sort(all_items, dependency_graph)
except CyclicDependencyError:
warnings.warn(
'Detected duplicate Media files in an opposite order: {}'.format(
', '.join(repr(l) for l in lists)
), MediaOrderConflictWarning,
)
return list(all_items)
def __add__(self, other):
combined = Media()
combined._css_lists = self._css_lists + other._css_lists
combined._js_lists = self._js_lists + other._js_lists
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
return Media(definition)
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = super(MediaDefiningClass, mcs).__new__(mcs, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class Widget(metaclass=MediaDefiningClass):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
self.attrs = {} if attrs is None else attrs.copy()
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None):
context = self.get_context(name, value, attrs)
yield context['widget']
def format_value(self, value):
"""
Return a value as it should appear when rendered in a template.
"""
if value == '' or value is None:
return None
if self.is_localized:
return formats.localize_input(value)
return str(value)
def get_context(self, name, value, attrs):
context = {}
context['widget'] = {
'name': name,
'is_hidden': self.is_hidden,
'required': self.is_required,
'value': self.format_value(value),
'attrs': self.build_attrs(self.attrs, attrs),
'template_name': self.template_name,
}
return context
def render(self, name, value, attrs=None, renderer=None):
"""Render the widget as an HTML string."""
context = self.get_context(name, value, attrs)
return self._render(self.template_name, context, renderer)
def _render(self, template_name, context, renderer=None):
if renderer is None:
renderer = get_default_renderer()
return mark_safe(renderer.render(template_name, context))
def build_attrs(self, base_attrs, extra_attrs=None):
"""Build an attribute dictionary."""
return {**base_attrs, **(extra_attrs or {})}
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, return the value
of this widget or None if it's not provided.
"""
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in data
def id_for_label(self, id_):
"""
Return the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Return None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
def use_required_attribute(self, initial):
return not self.is_hidden
class Input(Widget):
"""
Base class for all <input> widgets.
"""
input_type = None # Subclasses must define this.
template_name = 'django/forms/widgets/input.html'
def __init__(self, attrs=None):
if attrs is not None:
attrs = attrs.copy()
self.input_type = attrs.pop('type', self.input_type)
super().__init__(attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['type'] = self.input_type
return context
class TextInput(Input):
input_type = 'text'
template_name = 'django/forms/widgets/text.html'
class NumberInput(Input):
input_type = 'number'
template_name = 'django/forms/widgets/number.html'
class EmailInput(Input):
input_type = 'email'
template_name = 'django/forms/widgets/email.html'
class URLInput(Input):
input_type = 'url'
template_name = 'django/forms/widgets/url.html'
class PasswordInput(Input):
input_type = 'password'
template_name = 'django/forms/widgets/password.html'
def __init__(self, attrs=None, render_value=False):
super().__init__(attrs)
self.render_value = render_value
def get_context(self, name, value, attrs):
if not self.render_value:
value = None
return super().get_context(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
template_name = 'django/forms/widgets/hidden.html'
class MultipleHiddenInput(HiddenInput):
"""
Handle <input type="hidden"> for fields that have a list
of values.
"""
template_name = 'django/forms/widgets/multiple_hidden.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
final_attrs = context['widget']['attrs']
id_ = context['widget']['attrs'].get('id')
subwidgets = []
for index, value_ in enumerate(context['widget']['value']):
widget_attrs = final_attrs.copy()
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
widget_attrs['id'] = '%s_%s' % (id_, index)
widget = HiddenInput()
widget.is_required = self.is_required
subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def format_value(self, value):
return [] if value is None else value
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
template_name = 'django/forms/widgets/file.html'
def format_value(self, value):
"""File input never renders a value."""
return
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in files
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
clear_checkbox_label = _('Clear')
initial_text = _('Currently')
input_text = _('Change')
template_name = 'django/forms/widgets/clearable_file_input.html'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
return bool(value and getattr(value, 'url', False))
def format_value(self, value):
"""
Return the file object if it has a defined url attribute.
"""
if self.is_initial(value):
return value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
context['widget'].update({
'checkbox_name': checkbox_name,
'checkbox_id': checkbox_id,
'is_initial': self.is_initial(value),
'input_text': self.input_text,
'initial_text': self.initial_text,
'clear_checkbox_label': self.clear_checkbox_label,
})
return context
def value_from_datadict(self, data, files, name):
upload = super().value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
def use_required_attribute(self, initial):
return super().use_required_attribute(initial) and not initial
def value_omitted_from_data(self, data, files, name):
return (
super().value_omitted_from_data(data, files, name) and
self.clear_checkbox_name(name) not in data
)
class Textarea(Widget):
template_name = 'django/forms/widgets/textarea.html'
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super().__init__(attrs)
self.format = format or None
def format_value(self, value):
return formats.localize_input(value, self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
template_name = 'django/forms/widgets/date.html'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/datetime.html'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/time.html'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Input):
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox.html'
def __init__(self, attrs=None, check_test=None):
super().__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def format_value(self, value):
"""Only return the 'value' attribute if value isn't empty."""
if value is True or value is False or value is None or value == '':
return
return str(value)
def get_context(self, name, value, attrs):
if self.check_test(value):
if attrs is None:
attrs = {}
attrs['checked'] = True
return super().get_context(name, value, attrs)
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, str):
value = values.get(value.lower(), value)
return bool(value)
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
class ChoiceWidget(Widget):
allow_multiple_selected = False
input_type = None
template_name = None
option_template_name = None
add_id_index = True
checked_attribute = {'checked': True}
option_inherits_attrs = True
def __init__(self, attrs=None, choices=()):
super().__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self.choices)
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None):
"""
Yield all "subwidgets" of this widget. Used to enable iterating
options from a BoundField for choice widgets.
"""
value = self.format_value(value)
yield from self.options(name, value, attrs)
def options(self, name, value, attrs=None):
"""Yield a flat list of options for this widgets."""
for group in self.optgroups(name, value, attrs):
yield from group[1]
def optgroups(self, name, value, attrs=None):
"""Return a list of optgroups for this widget."""
groups = []
has_selected = False
for index, (option_value, option_label) in enumerate(self.choices):
if option_value is None:
option_value = ''
subgroup = []
if isinstance(option_label, (list, tuple)):
group_name = option_value
subindex = 0
choices = option_label
else:
group_name = None
subindex = None
choices = [(option_value, option_label)]
groups.append((group_name, subgroup, index))
for subvalue, sublabel in choices:
selected = (
str(subvalue) in value and
(not has_selected or self.allow_multiple_selected)
)
has_selected |= selected
subgroup.append(self.create_option(
name, subvalue, sublabel, selected, index,
subindex=subindex, attrs=attrs,
))
if subindex is not None:
subindex += 1
return groups
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
index = str(index) if subindex is None else "%s_%s" % (index, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
option_attrs['id'] = self.id_for_label(option_attrs['id'], index)
return {
'name': name,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
'wrap_label': True,
}
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs)
return context
def id_for_label(self, id_, index='0'):
"""
Use an incremented id for each option where the main widget
references the zero index.
"""
if id_ and self.add_id_index:
id_ = '%s_%s' % (id_, index)
return id_
def value_from_datadict(self, data, files, name):
getter = data.get
if self.allow_multiple_selected:
try:
getter = data.getlist
except AttributeError:
pass
return getter(name)
def format_value(self, value):
"""Return selected values as a list."""
if value is None and self.allow_multiple_selected:
return []
if not isinstance(value, (tuple, list)):
value = [value]
return [str(v) if v is not None else '' for v in value]
class Select(ChoiceWidget):
input_type = 'select'
template_name = 'django/forms/widgets/select.html'
option_template_name = 'django/forms/widgets/select_option.html'
add_id_index = False
checked_attribute = {'selected': True}
option_inherits_attrs = False
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.allow_multiple_selected:
context['widget']['attrs']['multiple'] = True
return context
@staticmethod
def _choice_has_empty_value(choice):
"""Return True if the choice's value is empty string or None."""
value, _ = choice
return value is None or value == ''
def use_required_attribute(self, initial):
"""
Don't render 'required' if the first <option> has a value, as that's
invalid HTML.
"""
use_required_attribute = super().use_required_attribute(initial)
# 'required' is always okay for <select multiple>.
if self.allow_multiple_selected:
return use_required_attribute
first_choice = next(iter(self.choices), None)
return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (
('unknown', _('Unknown')),
('true', _('Yes')),
('false', _('No')),
)
super().__init__(attrs, choices)
def format_value(self, value):
try:
return {
True: 'true', False: 'false',
'true': 'true', 'false': 'false',
# For backwards compatibility with Django < 2.2.
'2': 'true', '3': 'false',
}[value]
except KeyError:
return 'unknown'
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {
True: True,
'True': True,
'False': False,
False: False,
'true': True,
'false': False,
# For backwards compatibility with Django < 2.2.
'2': True,
'3': False,
}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def value_omitted_from_data(self, data, files, name):
# An unselected <select multiple> doesn't appear in POST data, so it's
# never known if the value is actually omitted.
return False
class RadioSelect(ChoiceWidget):
input_type = 'radio'
template_name = 'django/forms/widgets/radio.html'
option_template_name = 'django/forms/widgets/radio_option.html'
class CheckboxSelectMultiple(ChoiceWidget):
allow_multiple_selected = True
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox_select.html'
option_template_name = 'django/forms/widgets/checkbox_option.html'
def use_required_attribute(self, initial):
# Don't use the 'required' attribute because browser validation would
# require all checkboxes to be checked instead of at least one.
return False
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
def id_for_label(self, id_, index=None):
""""
Don't include for="field_0" in <label> because clicking such a label
would toggle the first checkbox.
"""
if index is None:
return ''
return super().id_for_label(id_, index)
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
In addition to the values added by Widget.get_context(), this widget
adds a list of subwidgets to the context as widget['subwidgets'].
These can be looped over and rendered like normal widgets.
You'll probably want to use this class with MultiValueField.
"""
template_name = 'django/forms/widgets/multiwidget.html'
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super().__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = context['widget']['attrs']
input_type = final_attrs.pop('type', None)
id_ = final_attrs.get('id')
subwidgets = []
for i, widget in enumerate(self.widgets):
if input_type is not None:
widget.input_type = input_type
widget_name = '%s_%s' % (name, i)
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
widget_attrs = final_attrs.copy()
widget_attrs['id'] = '%s_%s' % (id_, i)
else:
widget_attrs = final_attrs
subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def id_for_label(self, id_):
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def value_omitted_from_data(self, data, files, name):
return all(
widget.value_omitted_from_data(data, files, name + '_%s' % i)
for i, widget in enumerate(self.widgets)
)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"""
Media for a multiwidget is the combination of all media of the
subwidgets.
"""
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super().__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
template_name = 'django/forms/widgets/splitdatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
widgets = (
DateInput(
attrs=attrs if date_attrs is None else date_attrs,
format=date_format,
),
TimeInput(
attrs=attrs if time_attrs is None else time_attrs,
format=time_format,
),
)
super().__init__(widgets)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time()]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A widget that splits datetime input into two <input type="hidden"> inputs.
"""
template_name = 'django/forms/widgets/splithiddendatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
super().__init__(attrs, date_format, time_format, date_attrs, time_attrs)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = ('', '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
template_name = 'django/forms/widgets/select_date.html'
input_type = 'select'
select_widget = Select
date_re = re.compile(r'(\d{4}|0)-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = ('', empty_label[0])
self.month_none_value = ('', empty_label[1])
self.day_none_value = ('', empty_label[2])
else:
if empty_label is not None:
self.none_value = ('', empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
date_context = {}
year_choices = [(i, str(i)) for i in self.years]
if not self.is_required:
year_choices.insert(0, self.year_none_value)
year_name = self.year_field % name
date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context(
name=year_name,
value=context['widget']['value']['year'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % year_name},
)
month_choices = list(self.months.items())
if not self.is_required:
month_choices.insert(0, self.month_none_value)
month_name = self.month_field % name
date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context(
name=month_name,
value=context['widget']['value']['month'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % month_name},
)
day_choices = [(i, i) for i in range(1, 32)]
if not self.is_required:
day_choices.insert(0, self.day_none_value)
day_name = self.day_field % name
date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context(
name=day_name,
value=context['widget']['value']['day'],
attrs={**context['widget']['attrs'], 'id': 'id_%s' % day_name},
)
subwidgets = []
for field in self._parse_date_fmt():
subwidgets.append(date_context[field]['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def format_value(self, value):
"""
Return a dict containing the year, month, and day of the current value.
Use dict instead of a datetime to allow invalid dates such as February
31 to display correctly.
"""
year, month, day = None, None, None
if isinstance(value, (datetime.date, datetime.datetime)):
year, month, day = value.year, value.month, value.day
elif isinstance(value, str):
match = self.date_re.match(value)
if match:
# Convert any zeros in the date to empty strings to match the
# empty option value.
year, month, day = [int(val) or '' for val in match.groups()]
elif settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
d = datetime.datetime.strptime(value, input_format)
except ValueError:
pass
else:
year, month, day = d.year, d.month, d.day
return {'year': year, 'month': month, 'day': day}
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == '':
return None
if y is not None and m is not None and d is not None:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
pass
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
# Return pseudo-ISO dates with zeros for any unselected values,
# e.g. '2017-0-23'.
return '%s-%s-%s' % (y or 0, m or 0, d or 0)
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return not any(
('{}_{}'.format(name, interval) in data)
for interval in ('year', 'month', 'day')
)
|
|
# quick_info/controllers.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import QuickInfo, QuickInfoManager, QuickInfoMasterManager
from ballot.models import OFFICE, CANDIDATE, POLITICIAN, MEASURE
from candidate.models import CandidateManager
from config.base import get_environment_variable
from django.contrib import messages
from django.http import HttpResponse
from exception.models import handle_record_not_found_exception, handle_record_not_saved_exception
from organization.models import OrganizationManager
import json
from voter.models import fetch_voter_id_from_voter_device_link, VoterManager
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, is_voter_device_id_valid, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_API_KEY = get_environment_variable("WE_VOTE_API_KEY")
QUICK_INFO_URL = get_environment_variable("QUICK_INFO_URL")
def quick_info_save_for_api( # TODO to be converted
voter_device_id, quick_info_id, quick_info_we_vote_id,
organization_we_vote_id,
public_figure_we_vote_id,
voter_we_vote_id,
google_civic_election_id,
ballot_item_display_name,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
stance,
statement_text,
statement_html,
more_info_url
):
quick_info_id = convert_to_int(quick_info_id)
quick_info_we_vote_id = quick_info_we_vote_id.strip().lower()
existing_unique_identifier_found = positive_value_exists(quick_info_id) \
or positive_value_exists(quick_info_we_vote_id)
new_unique_identifier_found = positive_value_exists(organization_we_vote_id) \
and positive_value_exists(google_civic_election_id) and (
positive_value_exists(office_we_vote_id) or
positive_value_exists(candidate_we_vote_id) or
positive_value_exists(measure_we_vote_id)
)
unique_identifier_found = existing_unique_identifier_found or new_unique_identifier_found
# We must have these variables in order to create a new entry
required_variables_for_new_entry = positive_value_exists(organization_we_vote_id) \
and positive_value_exists(google_civic_election_id) and (
positive_value_exists(office_we_vote_id) or
positive_value_exists(candidate_we_vote_id) or
positive_value_exists(measure_we_vote_id)
)
if not unique_identifier_found:
results = {
'status': "QUICK_INFO_REQUIRED_UNIQUE_IDENTIFIER_VARIABLES_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'quick_info_id': quick_info_id,
'quick_info_we_vote_id': quick_info_we_vote_id,
'new_quick_info_created': False,
'ballot_item_display_name': ballot_item_display_name,
'is_support': False,
'is_oppose': False,
'is_information_only': False,
'organization_we_vote_id': organization_we_vote_id,
'google_civic_election_id': google_civic_election_id,
'voter_id': 0,
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'stance': stance,
'statement_text': statement_text,
'statement_html': statement_html,
'more_info_url': more_info_url,
'last_updated': '',
}
return results
elif not existing_unique_identifier_found and not required_variables_for_new_entry:
results = {
'status': "NEW_QUICK_INFO_REQUIRED_VARIABLES_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'quick_info_id': quick_info_id,
'quick_info_we_vote_id': quick_info_we_vote_id,
'new_quick_info_created': False,
'ballot_item_display_name': ballot_item_display_name,
'is_support': False,
'is_oppose': False,
'is_information_only': False,
'organization_we_vote_id': organization_we_vote_id,
'google_civic_election_id': google_civic_election_id,
'voter_id': 0,
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'stance': stance,
'statement_text': statement_text,
'statement_html': statement_html,
'more_info_url': more_info_url,
'last_updated': '',
}
return results
quick_info_manager = QuickInfoManager()
save_results = quick_info_manager.update_or_create_quick_info(
quick_info_id=quick_info_id,
quick_info_we_vote_id=quick_info_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
public_figure_we_vote_id=public_figure_we_vote_id,
voter_we_vote_id=voter_we_vote_id,
google_civic_election_id=google_civic_election_id,
ballot_item_display_name=ballot_item_display_name,
office_we_vote_id=office_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
measure_we_vote_id=measure_we_vote_id,
stance=stance,
statement_text=statement_text,
statement_html=statement_html,
more_info_url=more_info_url,
)
if save_results['success']:
quick_info = save_results['quick_info']
results = {
'success': save_results['success'],
'status': save_results['status'],
'voter_device_id': voter_device_id,
'quick_info_id': quick_info.id,
'quick_info_we_vote_id': quick_info.we_vote_id,
'new_quick_info_created': save_results['new_quick_info_created'],
'ballot_item_display_name': quick_info.ballot_item_display_name,
'is_support': quick_info.is_support(),
'is_oppose': quick_info.is_oppose(),
'is_information_only': quick_info.is_information_only(),
'organization_we_vote_id': quick_info.organization_we_vote_id,
'google_civic_election_id': quick_info.google_civic_election_id,
'voter_id': quick_info.voter_id,
'office_we_vote_id': '', # quick_info.office_we_vote_id,
'candidate_we_vote_id': quick_info.candidate_campaign_we_vote_id,
'measure_we_vote_id': quick_info.contest_measure_we_vote_id,
'stance': quick_info.stance,
'statement_text': quick_info.statement_text,
'statement_html': quick_info.statement_html,
'more_info_url': quick_info.more_info_url,
'last_updated': '',
}
return results
else:
results = {
'success': False,
'status': save_results['status'],
'voter_device_id': voter_device_id,
'quick_info_id': quick_info_id,
'quick_info_we_vote_id': quick_info_we_vote_id,
'new_quick_info_created': False,
'ballot_item_display_name': '',
'is_support': False,
'is_oppose': False,
'is_information_only': False,
'organization_we_vote_id': organization_we_vote_id,
'google_civic_election_id': google_civic_election_id,
'voter_id': 0,
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'stance': stance,
'statement_text': statement_text,
'statement_html': statement_html,
'more_info_url': more_info_url,
'last_updated': '',
}
return results
def quick_info_import_from_sample_file(request=None): # , load_from_uri=False # TODO to be converted
"""
Get the json data, and either create new entries or update existing
:return:
"""
# if load_from_uri:
# # Request json file from We Vote servers
# messages.add_message(request, messages.INFO, "Loading quick_info from We Vote Master servers")
# request = requests.get(QUICK_INFO_URL, params={
# "key": WE_VOTE_API_KEY, # This comes from an environment variable
# })
# structured_json = json.loads(request.text)
# else:
# Load saved json from local file
with open("quick_info/import_data/quick_info_sample.json") as json_data:
structured_json = json.load(json_data)
quick_info_saved = 0
quick_info_updated = 0
quick_info_not_processed = 0
for one_quick_info in structured_json:
# Make sure we have the minimum required variables
if not positive_value_exists(one_quick_info["we_vote_id"]) \
or not positive_value_exists(one_quick_info["organization_we_vote_id"])\
or not positive_value_exists(one_quick_info["candidate_campaign_we_vote_id"]):
quick_info_not_processed += 1
continue
# Check to see if this quick_info is already being used anywhere
quick_info_found = False
try:
if len(one_quick_info["we_vote_id"]) > 0:
quick_info_query = QuickInfo.objects.filter(we_vote_id=one_quick_info["we_vote_id"])
if len(quick_info_query):
quick_info = quick_info_query[0]
quick_info_found = True
except QuickInfo.DoesNotExist as e:
handle_record_not_found_exception(e, logger=logger)
pass
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
# We need to look up the local organization_id based on the newly saved we_vote_id
organization_manager = OrganizationManager()
organization_id = organization_manager.fetch_organization_id(one_quick_info["organization_we_vote_id"])
# We need to look up the local candidate_campaign_id
candidate_manager = CandidateManager()
candidate_campaign_id = candidate_manager.fetch_candidate_id_from_we_vote_id(
one_quick_info["candidate_campaign_we_vote_id"])
# Find the google_civic_candidate_name so we have a backup way to link quick_info if the we_vote_id is lost
google_civic_candidate_name = one_quick_info["google_civic_candidate_name"] if \
"google_civic_candidate_name" in one_quick_info else ''
if not positive_value_exists(google_civic_candidate_name):
google_civic_candidate_name = candidate_manager.fetch_google_civic_candidate_name_from_we_vote_id(
one_quick_info["candidate_campaign_we_vote_id"])
# TODO We need to look up contest_measure_id
contest_measure_id = 0
try:
if quick_info_found:
# Update
quick_info.we_vote_id = one_quick_info["we_vote_id"]
quick_info.organization_id = organization_id
quick_info.organization_we_vote_id = one_quick_info["organization_we_vote_id"]
quick_info.candidate_campaign_id = candidate_campaign_id
quick_info.candidate_campaign_we_vote_id = one_quick_info["candidate_campaign_we_vote_id"]
quick_info.google_civic_candidate_name = google_civic_candidate_name
quick_info.contest_measure_id = contest_measure_id
quick_info.date_entered = one_quick_info["date_entered"]
quick_info.google_civic_election_id = one_quick_info["google_civic_election_id"]
quick_info.stance = one_quick_info["stance"]
quick_info.more_info_url = one_quick_info["more_info_url"]
quick_info.statement_text = one_quick_info["statement_text"]
quick_info.statement_html = one_quick_info["statement_html"]
quick_info.save()
quick_info_updated += 1
# messages.add_message(request, messages.INFO, u"QuickInfo updated: {we_vote_id}".format(
# we_vote_id=one_quick_info["we_vote_id"]))
else:
# Create new
quick_info = QuickInfo(
we_vote_id=one_quick_info["we_vote_id"],
organization_id=organization_id,
organization_we_vote_id=one_quick_info["organization_we_vote_id"],
candidate_campaign_id=candidate_campaign_id,
candidate_campaign_we_vote_id=one_quick_info["candidate_campaign_we_vote_id"],
google_civic_candidate_name=google_civic_candidate_name,
contest_measure_id=contest_measure_id,
date_entered=one_quick_info["date_entered"],
google_civic_election_id=one_quick_info["google_civic_election_id"],
stance=one_quick_info["stance"],
more_info_url=one_quick_info["more_info_url"],
statement_text=one_quick_info["statement_text"],
statement_html=one_quick_info["statement_html"],
)
quick_info.save()
quick_info_saved += 1
# messages.add_message(request, messages.INFO, u"New quick_info imported: {we_vote_id}".format(
# we_vote_id=one_quick_info["we_vote_id"]))
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
if request is not None:
messages.add_message(request, messages.ERROR,
u"Could not save/update quick_info, "
u"quick_info_found: {quick_info_found}, "
u"we_vote_id: {we_vote_id}, "
u"organization_we_vote_id: {organization_we_vote_id}, "
u"candidate_campaign_we_vote_id: {candidate_campaign_we_vote_id}".format(
quick_info_found=quick_info_found,
we_vote_id=one_quick_info["we_vote_id"],
organization_we_vote_id=one_quick_info["organization_we_vote_id"],
candidate_campaign_we_vote_id=one_quick_info["candidate_campaign_we_vote_id"],
))
quick_info_not_processed += 1
quick_info_results = {
'saved': quick_info_saved,
'updated': quick_info_updated,
'not_processed': quick_info_not_processed,
}
return quick_info_results
# We retrieve the quick info for one ballot item. Could just be the stance, but for now we are
# retrieving all data
def quick_info_retrieve_for_api(kind_of_ballot_item, ballot_item_we_vote_id):
ballot_item_we_vote_id = ballot_item_we_vote_id.strip().lower()
if not positive_value_exists(kind_of_ballot_item) and \
not kind_of_ballot_item in (OFFICE, CANDIDATE, POLITICIAN, MEASURE):
json_data = {
'status': "QUICK_INFO_RETRIEVE_KIND_OF_BALLOT_ITEM_NOT_SPECIFIED",
'success': False,
'quick_info_id': 0,
'quick_info_we_vote_id': '',
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'quick_info_found': False,
'language': '',
'info_text': '',
'info_html': '',
'ballot_item_display_name': '',
'more_info_credit_text': '',
'more_info_url': '',
'last_updated': '',
'last_editor_we_vote_id': '',
'office_we_vote_id': '',
'candidate_we_vote_id': '',
'politician_we_vote_id': '',
'measure_we_vote_id': '',
'quick_info_master_we_vote_id': '',
'google_civic_election_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
if kind_of_ballot_item == OFFICE:
office_we_vote_id = ballot_item_we_vote_id
candidate_we_vote_id = ""
politician_we_vote_id = ""
measure_we_vote_id = ""
elif kind_of_ballot_item == CANDIDATE:
office_we_vote_id = ""
candidate_we_vote_id = ballot_item_we_vote_id
politician_we_vote_id = ""
measure_we_vote_id = ""
elif kind_of_ballot_item == POLITICIAN:
office_we_vote_id = ""
candidate_we_vote_id = ""
politician_we_vote_id = ballot_item_we_vote_id
measure_we_vote_id = ""
elif kind_of_ballot_item == MEASURE:
office_we_vote_id = ""
candidate_we_vote_id = ""
politician_we_vote_id = ""
measure_we_vote_id = ballot_item_we_vote_id
else:
office_we_vote_id = ""
candidate_we_vote_id = ""
politician_we_vote_id = ""
measure_we_vote_id = ""
if not positive_value_exists(office_we_vote_id) and \
not positive_value_exists(candidate_we_vote_id) and \
not positive_value_exists(politician_we_vote_id) and \
not positive_value_exists(measure_we_vote_id):
json_data = {
'status': "QUICK_INFO_RETRIEVE_MISSING_BALLOT_ITEM_ID",
'success': False,
'quick_info_id': 0,
'quick_info_we_vote_id': '',
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'quick_info_found': False,
'language': '',
'info_text': '',
'info_html': '',
'ballot_item_display_name': '',
'more_info_credit_text': '',
'more_info_url': '',
'last_updated': '',
'last_editor_we_vote_id': '',
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'politician_we_vote_id': politician_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'quick_info_master_we_vote_id': '',
'google_civic_election_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
quick_info_manager = QuickInfoManager()
if positive_value_exists(office_we_vote_id):
results = quick_info_manager.retrieve_contest_office_quick_info(office_we_vote_id)
elif positive_value_exists(candidate_we_vote_id):
results = quick_info_manager.retrieve_candidate_quick_info(candidate_we_vote_id)
elif positive_value_exists(measure_we_vote_id):
results = quick_info_manager.retrieve_contest_measure_quick_info(measure_we_vote_id)
# retrieve_quick_info results
# results = {
# 'success': success,
# 'status': status,
# 'error_result': error_result,
# 'DoesNotExist': exception_does_not_exist,
# 'MultipleObjectsReturned': exception_multiple_object_returned,
# 'quick_info_found': True if quick_info_id > 0 else False,
# 'quick_info_id': quick_info_id,
# 'quick_info_we_vote_id': quick_info_on_stage.we_vote_id,
# 'quick_info': quick_info_on_stage,
# 'is_chinese': quick_info_on_stage.is_chinese(),
# 'is_english': quick_info_on_stage.is_english(),
# 'is_spanish': quick_info_on_stage.is_spanish(),
# 'is_tagalog': quick_info_on_stage.is_tagalog(),
# 'is_vietnamese': quick_info_on_stage.is_vietnamese(),
# }
if results['quick_info_found']:
quick_info = results['quick_info']
if positive_value_exists(quick_info.quick_info_master_we_vote_id):
# If here, we are looking at a master entry
quick_info_master_manager = QuickInfoMasterManager()
quick_info_master_results = quick_info_master_manager.retrieve_quick_info_master_from_we_vote_id(
quick_info.quick_info_master_we_vote_id)
if quick_info_master_results['quick_info_master_found']:
quick_info_master = quick_info_master_results['quick_info_master']
info_text = quick_info_master.info_text
info_html = quick_info_master.info_html
more_info_url = quick_info_master.more_info_url
more_info_credit_text = quick_info_master.more_info_credit_text()
else:
info_text = ""
info_html = ""
more_info_url = ""
more_info_credit_text = ""
results['status'] += ", " + quick_info_master_results['status']
else:
# If here, we are looking at a unique entry
info_text = quick_info.info_text
info_html = quick_info.info_html
more_info_url = quick_info.more_info_url
more_info_credit_text = quick_info.more_info_credit_text()
json_data = {
'success': True,
'status': results['status'],
'quick_info_found': True,
'quick_info_id': quick_info.id,
'quick_info_we_vote_id': quick_info.we_vote_id,
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'ballot_item_display_name': quick_info.ballot_item_display_name,
'language': quick_info.language,
'info_text': info_text,
'info_html': info_html,
'more_info_url': more_info_url,
'more_info_credit_text': more_info_credit_text,
'last_updated': str(quick_info.last_updated),
'last_editor_we_vote_id': quick_info.last_editor_we_vote_id,
'office_we_vote_id': quick_info.contest_office_we_vote_id,
'candidate_we_vote_id': quick_info.candidate_campaign_we_vote_id,
'politician_we_vote_id': quick_info.politician_we_vote_id,
'measure_we_vote_id': quick_info.contest_measure_we_vote_id,
'quick_info_master_we_vote_id': quick_info.quick_info_master_we_vote_id,
'google_civic_election_id': quick_info.google_civic_election_id,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
json_data = {
'status': results['status'],
'success': False,
'quick_info_id': 0,
'quick_info_we_vote_id': '',
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'quick_info_found': False,
'language': '',
'info_text': '',
'info_html': '',
'ballot_item_display_name': '',
'more_info_credit_text': '',
'more_info_url': '',
'last_updated': '',
'last_editor_we_vote_id': '',
'contest_office_we_vote_id': office_we_vote_id,
'candidate_campaign_we_vote_id': candidate_we_vote_id,
'politician_we_vote_id': politician_we_vote_id,
'contest_measure_we_vote_id': measure_we_vote_id,
'quick_info_master_we_vote_id': '',
'google_civic_election_id': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def quick_info_text_save_for_api( # TODO to be converted
voter_device_id, quick_info_id, quick_info_we_vote_id,
google_civic_election_id,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
statement_text,
statement_html
):
results = is_voter_device_id_valid(voter_device_id)
if not results['success']:
json_data_from_results = results['json_data']
json_data = {
'status': json_data_from_results['status'],
'success': False,
'voter_device_id': voter_device_id,
'quick_info_id': quick_info_id,
'quick_info_we_vote_id': quick_info_we_vote_id,
'new_quick_info_created': False,
'is_support': False,
'is_oppose': False,
'is_information_only': False,
'google_civic_election_id': google_civic_election_id,
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'statement_text': statement_text,
'statement_html': statement_html,
'last_updated': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
voter_id = voter_results['voter_id']
if not positive_value_exists(voter_id):
json_data = {
'status': "VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID",
'success': False,
'voter_device_id': voter_device_id,
'quick_info_id': quick_info_id,
'quick_info_we_vote_id': quick_info_we_vote_id,
'new_quick_info_created': False,
'is_support': False,
'is_oppose': False,
'is_information_only': False,
'google_civic_election_id': google_civic_election_id,
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'statement_text': statement_text,
'statement_html': statement_html,
'last_updated': '',
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
voter = voter_results['voter']
quick_info_id = convert_to_int(quick_info_id)
quick_info_we_vote_id = quick_info_we_vote_id.strip().lower()
existing_unique_identifier_found = positive_value_exists(quick_info_id) \
or positive_value_exists(quick_info_we_vote_id)
new_unique_identifier_found = positive_value_exists(voter_id) \
and positive_value_exists(google_civic_election_id) and (
positive_value_exists(office_we_vote_id) or
positive_value_exists(candidate_we_vote_id) or
positive_value_exists(measure_we_vote_id)
)
unique_identifier_found = existing_unique_identifier_found or new_unique_identifier_found
# We must have these variables in order to create a new entry
required_variables_for_new_entry = positive_value_exists(voter_id) \
and positive_value_exists(google_civic_election_id) and (
positive_value_exists(office_we_vote_id) or
positive_value_exists(candidate_we_vote_id) or
positive_value_exists(measure_we_vote_id)
)
if not unique_identifier_found:
results = {
'status': "QUICK_INFO_REQUIRED_UNIQUE_IDENTIFIER_VARIABLES_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'quick_info_id': quick_info_id,
'quick_info_we_vote_id': quick_info_we_vote_id,
'new_quick_info_created': False,
'is_support': False,
'is_oppose': False,
'is_information_only': False,
'google_civic_election_id': google_civic_election_id,
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'statement_text': statement_text,
'statement_html': statement_html,
'last_updated': '',
}
return results
elif not existing_unique_identifier_found and not required_variables_for_new_entry:
results = {
'status': "NEW_QUICK_INFO_REQUIRED_VARIABLES_MISSING",
'success': False,
'voter_device_id': voter_device_id,
'quick_info_id': quick_info_id,
'quick_info_we_vote_id': quick_info_we_vote_id,
'new_quick_info_created': False,
'is_support': False,
'is_oppose': False,
'is_information_only': False,
'google_civic_election_id': google_civic_election_id,
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'statement_text': statement_text,
'statement_html': statement_html,
'last_updated': '',
}
return results
quick_info_manager = QuickInfoManager()
save_results = quick_info_manager.update_or_create_quick_info(
quick_info_id=quick_info_id,
quick_info_we_vote_id=quick_info_we_vote_id,
voter_we_vote_id=voter.we_vote_id,
google_civic_election_id=google_civic_election_id,
office_we_vote_id=office_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
measure_we_vote_id=measure_we_vote_id,
statement_text=statement_text,
statement_html=statement_html,
)
if save_results['success']:
quick_info = save_results['quick_info']
results = {
'success': save_results['success'],
'status': save_results['status'],
'voter_device_id': voter_device_id,
'quick_info_id': quick_info.id,
'quick_info_we_vote_id': quick_info.we_vote_id,
'new_quick_info_created': save_results['new_quick_info_created'],
'is_support': quick_info.is_support(),
'is_oppose': quick_info.is_oppose(),
'is_information_only': quick_info.is_information_only(),
'google_civic_election_id': quick_info.google_civic_election_id,
'office_we_vote_id': quick_info.contest_office_we_vote_id,
'candidate_we_vote_id': quick_info.candidate_campaign_we_vote_id,
'measure_we_vote_id': quick_info.contest_measure_we_vote_id,
'statement_text': quick_info.statement_text,
'statement_html': quick_info.statement_html,
'last_updated': '',
}
return results
else:
results = {
'success': False,
'status': save_results['status'],
'voter_device_id': voter_device_id,
'quick_info_id': quick_info_id,
'quick_info_we_vote_id': quick_info_we_vote_id,
'new_quick_info_created': False,
'is_support': False,
'is_oppose': False,
'is_information_only': False,
'google_civic_election_id': google_civic_election_id,
'office_we_vote_id': office_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'measure_we_vote_id': measure_we_vote_id,
'statement_text': statement_text,
'statement_html': statement_html,
'last_updated': '',
}
return results
|
|
# Copyright 2014, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from networking_powervm.plugins.ibm.agent.powervm import sea_agent
from networking_powervm.tests.unit.plugins.ibm.powervm import base
from neutron_lib import constants as q_const
from oslo_config import cfg
from pypowervm.wrappers import logical_partition as pvm_lpar
from pypowervm.wrappers import network as pvm_net
from pypowervm.wrappers import virtual_io_server as pvm_vios
def fake_nb(uuid, pvid, tagged_vlans, addl_vlans):
return mock.MagicMock(
uuid=uuid,
load_grps=[mock.MagicMock(pvid=pvid, tagged_vlans=tagged_vlans)],
list_vlans=mock.Mock(return_value=[pvid] + tagged_vlans + addl_vlans))
class FakeException(Exception):
"""Used to indicate an error in an API the agent calls."""
pass
class SEAAgentTest(base.BasePVMTestCase):
def setUp(self):
super(SEAAgentTest, self).setUp()
self.agtfx = self.useFixture(base.AgentFx())
# Mock the mgmt uuid
self.useFixture(fixtures.MockPatch(
'pypowervm.tasks.partition.get_mgmt_partition')
).mock.return_value = mock.MagicMock(uuid='mgmt_uuid')
self.mock_parse_sea_mappings = self.useFixture(fixtures.MockPatch(
'networking_powervm.plugins.ibm.agent.powervm.utils.'
'parse_sea_mappings')).mock
self.mock_parse_sea_mappings.return_value = {'default': 'nb_uuid'}
cfg.CONF.set_override('bridge_mappings', 'the_bridge_maps',
group='AGENT')
self.agent = sea_agent.SharedEthernetNeutronAgent()
def test_init(self):
"""Verifies the integrity of the agent after being initialized."""
self.assertEqual('networking-powervm-sharedethernet-agent',
self.agent.agent_state.get('binary'))
self.assertEqual(q_const.L2_AGENT_TOPIC,
self.agent.agent_state.get('topic'))
self.mock_parse_sea_mappings.assert_called_once_with(
self.agent.adapter, self.agent.host_uuid, 'the_bridge_maps')
self.assertEqual(
{'default': 'nb_uuid'},
self.agent.agent_state['configurations']['bridge_mappings'])
self.assertEqual('PowerVM Shared Ethernet agent',
self.agent.agent_state.get('agent_type'))
self.assertEqual(True, self.agent.agent_state.get('start_flag'))
# Other @propertys
self.assertEqual('sea-agent-%s' % cfg.CONF.host, self.agent.agent_id)
self.assertEqual(pvm_net.CNA, self.agent.vif_wrapper_class)
@mock.patch('pypowervm.tasks.network_bridger.ensure_vlans_on_nb')
@mock.patch('networking_powervm.plugins.ibm.agent.powervm.agent_base.'
'BasePVMNeutronAgent.provision_devices')
def test_provision_devices(self, mock_base_prov, mock_ensure):
"""Validates that the provision is invoked with batched VLANs."""
preq1 = base.mk_preq('plug', 'aa', segment_id=20,
phys_network='default', vif_type='pvm_sea')
preq2 = base.mk_preq('plug', 'bb', segment_id=22,
phys_network='default', vif_type='pvm_sea')
preq3 = base.mk_preq('unplug', 'cc', segment_id=24,
phys_network='default', vif_type='pvm_sea')
# Invoke
self.agent.provision_devices({preq1, preq2, preq3})
# Validate that both VLANs are in one call
mock_ensure.assert_called_once_with(
self.agent.adapter, self.agent.host_uuid, 'nb_uuid', {20, 22})
# Validate that the devices were marked up
mock_base_prov.assert_called_once_with({preq1, preq2})
# Validate the behavior of a failed VLAN provision.
mock_ensure.reset_mock()
mock_base_prov.reset_mock()
# Have the ensure throw some exception
mock_ensure.side_effect = FakeException()
# Invoke
self.assertRaises(FakeException, self.agent.provision_devices,
{preq1, preq2, preq3})
# Validate that both VLANs are in one call. Should still occur even
# though no exception.
mock_ensure.assert_called_once_with(
self.agent.adapter, self.agent.host_uuid, 'nb_uuid', {20, 22})
# However, the port update should not be invoked.
mock_base_prov.assert_not_called()
@mock.patch('pypowervm.tasks.network_bridger.remove_vlan_from_nb')
@mock.patch('networking_powervm.plugins.ibm.agent.powervm.sea_agent.'
'SharedEthernetNeutronAgent._get_nb_and_vlan')
@mock.patch('networking_powervm.plugins.ibm.agent.powervm.sea_agent.'
'SharedEthernetNeutronAgent.provision_devices')
@mock.patch('networking_powervm.plugins.ibm.agent.powervm.utils.'
'get_vswitch_map')
@mock.patch('networking_powervm.plugins.ibm.agent.powervm.utils.list_vifs')
@mock.patch('networking_powervm.plugins.ibm.agent.powervm.utils.'
'list_bridges')
@mock.patch('networking_powervm.plugins.ibm.agent.powervm.utils.'
'find_nb_for_cna')
@mock.patch('networking_powervm.plugins.ibm.agent.powervm.prov_req.'
'ProvisionRequest.for_wrappers')
def test_heal_and_optimize(
self, mock_preq, mock_find_nb_for_cna, mock_list_bridges,
mock_list_cnas, mock_vs_map, mock_prov_devs, mock_get_nb_and_vlan,
mock_nbr_remove):
"""Validates the heal and optimization code. Limited to 3 deletes."""
# Fake adapters already on system.
mgmt_lpar = mock.Mock(spec=pvm_lpar.LPAR, is_mgmt_partition=True)
reg_lpar = mock.Mock(spec=pvm_lpar.LPAR, is_mgmt_partition=False)
mgmt_vios = mock.Mock(spec=pvm_vios.VIOS, is_mgmt_partition=True)
reg_vios = mock.Mock(spec=pvm_vios.VIOS, is_mgmt_partition=False)
cna1 = mock.MagicMock(mac='00', pvid=30, tagged_vlans=[])
cna2 = mock.MagicMock(mac='11', pvid=31, tagged_vlans=[32, 33, 34])
mock_list_cnas.return_value = {
mgmt_lpar: [cna1], reg_lpar: [cna2], mgmt_vios: [], reg_vios: []}
# The neutron data. These will be 'ensured' on the bridge.
preq1 = base.mk_preq('plug', '00', segment_id=20,
phys_network='default')
preq2 = base.mk_preq('plug', '22', segment_id=22,
phys_network='default')
preq3 = base.mk_preq('unplug', '55', segment_id=55,
phys_network='default')
mock_preq.return_value = [preq1, preq2, preq3]
# Mock a provision request
mock_get_nb_and_vlan.return_value = ('nb2_uuid', 23)
# Mock up network bridges.
mock_nb1 = fake_nb('nb_uuid', 20, [], [])
mock_nb2 = fake_nb('nb2_uuid', 40, [41, 42, 43], [44, 45, 46, 47, 48])
mock_list_bridges.return_value = [mock_nb1, mock_nb2]
mock_find_nb_for_cna.return_value = mock_nb2
# Invoke
self.agent.heal_and_optimize()
mock_list_cnas.assert_called_once_with(self.agent.adapter, pvm_net.CNA,
include_vios_and_mgmt=True)
# Filtered down to the non-mgmt LPAR
mock_preq.assert_called_once_with(self.agent, {reg_lpar: [cna2]},
'plug')
mock_list_bridges.assert_called_once_with(self.agent.adapter,
self.agent.host_uuid)
mock_prov_devs.assert_called_with([preq1, preq2, preq3])
mock_get_nb_and_vlan.assert_has_calls(
[mock.call(req.rpc_device, emit_warnings=False) for req in
(preq1, preq2, preq3)])
mock_vs_map.assert_called_once_with(self.agent.adapter,
self.agent.host_uuid)
mock_find_nb_for_cna.assert_has_calls(
[mock.call(mock_list_bridges.return_value, cna,
mock_vs_map.return_value) for cna in (cna1, cna2)],
any_order=True)
# One remove call per net bridge, up to a max of 3.
self.assertEqual(3, mock_nbr_remove.call_count)
# VLANs 44, 45, 46, 47, and 48 are not required by anything, so the
# first three of those should be deleted
mock_nbr_remove.assert_has_calls(
[mock.call(
self.agent.adapter, self.agent.host_uuid, 'nb2_uuid', vlan)
for vlan in (44, 45, 46)], any_order=True)
# Update mocks to show 44, 45, and 46 were removed
mock_nb2 = fake_nb('nb2_uuid', 40, [41, 42, 43], [47, 48])
mock_list_bridges.return_value = [mock_nb1, mock_nb2]
mock_find_nb_for_cna.return_value = mock_nb2
# Validate no removes if we disable cleanup.
mock_nbr_remove.reset_mock()
mock_prov_devs.reset_mock()
cfg.CONF.set_override('automated_powervm_vlan_cleanup', False,
group='AGENT')
# Invoke
self.agent.heal_and_optimize()
# Verify. One ensure call per net bridge. Zero for the remove as that
# has been flagged to not clean up.
mock_nbr_remove.assert_not_called()
mock_prov_devs.assert_called_with([preq1, preq2, preq3])
# Now change the CONF back and validate we can remove the remainder
cfg.CONF.set_override('automated_powervm_vlan_cleanup', True,
group='AGENT')
# Invoke
self.agent.heal_and_optimize()
# Should only be two left to remove
self.assertEqual(2, mock_nbr_remove.call_count)
# VLANs 47, and 48 should be the ones that are removed
mock_nbr_remove.assert_has_calls(
[mock.call(
self.agent.adapter, self.agent.host_uuid, 'nb2_uuid', vlan)
for vlan in (47, 48)], any_order=True)
def test_get_nb_and_vlan(self):
"""Be sure nb uuid and vlan parsed from dev properly."""
self.assertEqual(('nb_uuid', 100), self.agent._get_nb_and_vlan(
{'physical_network': 'default', 'segmentation_id': 100}))
|
|
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from datetime import datetime
from .forms import CategoryForm, PageForm, UserForm, UserProfileForm
from .models import Category, Page
from .search import run_query
def encode_url(str):
return str.replace(' ', '-')
def decode_url(str):
return str.replace('-', ' ')
def get_category_list(max_results=0, starts_with=''):
cat_list = []
if starts_with:
cat_list = Category.objects.filter(name__startswith=starts_with)
else:
cat_list = Category.objects.all()
if max_results > 0:
if (len(cat_list) > max_results):
cat_list = cat_list[:max_results]
for cat in cat_list:
cat.url = encode_url(cat.name)
return cat_list
def index(request):
context = RequestContext(request)
top_category_list = Category.objects.order_by('-likes')[:5]
for category in top_category_list:
category.url = encode_url(category.name)
context_dict = {'categories': top_category_list}
cat_list = get_category_list()
context_dict['cat_list'] = cat_list
page_list = Page.objects.order_by('-views')[:5]
context_dict['pages'] = page_list
if request.session.get('last_visit'):
last_visit_time = request.session.get('last_visit')
visits = request.session.get('visits', 0)
if (datetime.now() - datetime.strptime(last_visit_time[:-7], "%Y-%m-%d %H:%M:%S")).days > 0:
request.session['visits'] = visits + 1
else:
request.session['last_visit'] = str(datetime.now())
request.session['visits'] = 1
return render_to_response('rango/index.html', context_dict, context)
def about(request):
context = RequestContext(request)
context_dict = {}
cat_list = get_category_list()
context_dict['cat_list'] = cat_list
count = request.session.get('visits',0)
context_dict['visit_count'] = count
return render_to_response('rango/about.html', context_dict , context)
def category(request, category_name_url):
context = RequestContext(request)
category_name = decode_url(category_name_url)
context_dict = {'category_name': category_name, 'category_name_url': category_name_url}
cat_list = get_category_list()
context_dict['cat_list'] = cat_list
try:
category = Category.objects.get(name__iexact=category_name)
context_dict['category'] = category
pages = Page.objects.filter(category=category).order_by('-views')
context_dict['pages'] = pages
except Category.DoesNotExist:
pass
if request.method == 'POST':
query = request.POST.get('query')
if query:
query = query.strip()
result_list = run_query(query)
context_dict['result_list'] = result_list
return render_to_response('rango/category.html', context_dict, context)
def add_category(request):
context = RequestContext(request)
cat_list = get_category_list()
context_dict = {}
context_dict['cat_list'] = cat_list
if request.method == 'POST':
form = CategoryForm(request.POST)
if form.is_valid():
form.save(commit=True)
return index(request)
else:
print form.errors
else:
form = CategoryForm()
context_dict['form'] = form
return render_to_response('rango/add_category.html', context_dict, context)
@login_required
def add_page(request, category_name_url):
context = RequestContext(request)
cat_list = get_category_list()
context_dict = {}
context_dict['cat_list'] = cat_list
category_name = decode_url(category_name_url)
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
page = form.save(commit=False)
try:
cat = Category.objects.get(name=category_name)
page.category = cat
except Category.DoesNotExist:
return render_to_response( 'rango/add_page.html',
context_dict,
context)
page.views = 0
page.save()
return category(request, category_name_url)
else:
print form.errors
else:
form = PageForm()
context_dict['category_name_url']= category_name_url
context_dict['category_name'] = category_name
context_dict['form'] = form
return render_to_response('rango/add_page.html', context_dict, context)
def register(request):
context = RequestContext(request)
cat_list = get_category_list()
context_dict = {}
context_dict['cat_list'] = cat_list
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
registered = True
else:
print user_form.errors, profile_form.errors
else:
user_form = UserForm()
profile_form = UserProfileForm()
context_dict['user_form'] = user_form
context_dict['profile_form']= profile_form
context_dict['registered'] = registered
return render_to_response('rango/register.html', context_dict, context)
def user_login(request):
context = RequestContext(request)
cat_list = get_category_list()
context_dict = {}
context_dict['cat_list'] = cat_list
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('index'))
else:
context_dict['disabled_account'] = True
return render_to_response('rango/login.html', context_dict, context)
else:
print "Invalid login details: {0}, {1}".format(username, password)
context_dict['bad_details'] = True
return render_to_response('rango/login.html', context_dict, context)
else:
return render_to_response('rango/login.html', context_dict, context)
@login_required
def restricted(request):
context = RequestContext(request)
cat_list = get_category_list()
context_dict = {}
context_dict['cat_list'] = cat_list
return HttpResponse("Since you're logged in, you can see this text!")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def search(request):
context = RequestContext(request)
cat_list = get_category_list()
context_dict = {}
context_dict['cat_list'] = cat_list
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
result_list = run_query(query)
context_dict['result_list'] = result_list
return render_to_response('rango/search.html', context_dict, context)
@login_required
def profile(request):
context = RequestContext(request)
cat_list = get_category_list()
context_dict = {'cat_list': cat_list}
u = User.objects.get(username=request.user)
try:
up = UserProfile.objects.get(user=u)
except:
up = None
context_dict['user'] = u
context_dict['userprofile'] = up
return render_to_response('rango/profile.html', context_dict, context)
def track_url(request):
context = RequestContext(request)
page_id = None
url = reverse('index')
if request.method == 'GET':
if 'page_id' in request.GET:
page_id = request.GET['page_id']
try:
page = Page.objects.get(url__iexact = page_id)
page.views = page.views + 1
page.save()
url = page.url
except ValueError, e:
print e
return redirect(url)
@login_required
def like_category(request):
context = RequestContext(request)
cat_id = None
if request.method == 'GET':
cat_id = request.GET['category_id']
likes = 0
if cat_id:
category = Category.objects.get(id=int(cat_id))
if category:
likes = category.likes + 1
category.likes = likes
category.save()
return HttpResponse(likes)
def suggest_category(request):
context = RequestContext(request)
cat_list = []
starts_with = ''
if request.method == 'GET':
starts_with = request.GET['suggestion']
else:
starts_with = request.POST['suggestion']
cat_list = get_category_list(8, starts_with)
return render_to_response('rango/category_list.html', {'cat_list': cat_list }, context)
@login_required
def auto_add_page(request):
context = RequestContext(request)
cat_id = None
url = None
title = None
context_dict = {}
if request.method == 'GET':
cat_id = request.GET['category_id']
url = request.GET['url']
title = request.GET['title']
if cat_id:
category = Category.objects.get(id=int(cat_id))
p = Page.objects.get_or_create(category=category, title=title, url=url)
pages = Page.objects.filter(category=category).order_by('-views')
context_dict['pages'] = pages
return render_to_response('rango/page_list.html', context_dict, context)
|
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from boot_config import *
import re
import webbrowser
from functools import partial
from distutils.version import LooseVersion
from os.path import join, basename, splitext, isfile
from pprint import pprint
if QT4: # ___ ______________ DEPENDENCIES __________________________
from PySide.QtCore import Qt, Slot, QObject, Signal, QSize, QPoint, QEvent
from PySide.QtGui import (QApplication, QMessageBox, QIcon, QFileDialog, QTableWidgetItem,
QDialog, QWidget, QMovie, QFont, QMenu, QAction, QTableWidget,
QCheckBox, QToolButton, QActionGroup, QCursor)
else:
from PySide2.QtCore import QObject, Qt, Signal, QPoint, Slot, QSize, QEvent
from PySide2.QtGui import QFont, QMovie, QIcon, QCursor
from PySide2.QtWidgets import (QTableWidgetItem, QTableWidget, QMessageBox,
QApplication, QWidget, QDialog, QFileDialog,
QActionGroup, QMenu, QAction, QToolButton, QCheckBox)
import requests
from bs4 import BeautifulSoup
def _(text): # for future gettext support
return text
__all__ = ("XTableWidgetIntItem", "XTableWidgetPercentItem", "XTableWidgetTitleItem",
"DropTableWidget", "XMessageBox", "About", "AutoInfo", "ToolBar", "TextDialog",
"Status", "LogStream", "Scanner", "HighlightScanner", "ReLoader", "DBLoader",
"XToolButton")
# ___ _______________________ SUBCLASSING ___________________________
class XTableWidgetIntItem(QTableWidgetItem):
""" Sorts numbers writen as strings (after 1 is 2 not 11)
"""
def __lt__(self, value):
try:
return int(self.data(Qt.DisplayRole)) < int(value.data(Qt.DisplayRole))
except ValueError: # no text
this_text = self.data(Qt.DisplayRole)
if not this_text:
this_text = "0"
that_text = value.data(Qt.DisplayRole)
if not that_text:
that_text = "0"
return int(this_text) < int(that_text)
class XTableWidgetPercentItem(QTableWidgetItem):
""" Sorts percentages writen as strings (e.g. 35%)
"""
def __lt__(self, value):
return int(self.data(Qt.DisplayRole)[:-1]) < int(value.data(Qt.DisplayRole)[:-1])
class XTableWidgetTitleItem(QTableWidgetItem):
""" Sorts titles ignoring the leading "A" or "The"
"""
def __lt__(self, value):
t1 = self.data(Qt.DisplayRole).lower()
t1 = (t1[2:] if t1.startswith("a ") else
t1[4:] if t1.startswith("the ") else
t1[3:] if t1.startswith("an ") else t1)
t2 = value.data(Qt.DisplayRole).lower()
t2 = (t2[2:] if t2.startswith("a ") else
t2[4:] if t2.startswith("the ") else
t2[3:] if t2.startswith("an ") else t2)
return t1 < t2
class DropTableWidget(QTableWidget):
fileDropped = Signal(list)
def __init__(self, parent=None):
super(DropTableWidget, self).__init__(parent)
# noinspection PyArgumentList
self.app = QApplication.instance()
def dragEnterEvent(self, event):
if event.mimeData().hasUrls and not self.app.base.db_mode:
event.accept()
return True
else:
event.ignore()
return False
def dragMoveEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
return True
else:
event.ignore()
return False
def dropEvent(self, event):
if event.mimeData().hasUrls:
links = []
for url in event.mimeData().urls():
links.append(url.toLocalFile())
self.fileDropped.emit(links)
event.accept()
return True
else:
event.ignore()
return False
class XMessageBox(QMessageBox):
""" A QMessageBox with a QCheckBox
"""
def __init__(self, parent=None):
super(XMessageBox, self).__init__(parent)
self.check_box = QCheckBox()
self.checked = False
if QT4:
# Access the Layout of the MessageBox to add a Checkbox
layout = self.layout()
layout.addWidget(self.check_box, 1, 1)
else:
self.setCheckBox(self.check_box)
def exec_(self, *args, **kwargs):
""" Override the exec_ method so
you can return the value of the checkbox
"""
return QMessageBox.exec_(self, *args, **kwargs), self.check_box.isChecked()
class XToolButton(QToolButton):
right_clicked = Signal()
def __init__(self, parent=None):
super(XToolButton, self).__init__(parent)
self.installEventFilter(self)
# def mousePressEvent(self, QMouseEvent):
# if QMouseEvent.button() == Qt.RightButton:
# # do what you want here
# print("Right Button Clicked")
# QMouseEvent.accept()
def eventFilter(self, obj, event):
if obj.objectName() == "db_btn":
if event.type() == QEvent.ContextMenu:
self.right_clicked.emit()
return True
else:
return False
else:
# pass the event on to the parent class
return QToolButton.eventFilter(self, obj, event)
# ___ _______________________ WORKERS _______________________________
class LogStream(QObject):
append_to_log = Signal(str)
# def __init__(self):
# super(LogStream, self).__init__()
# # noinspection PyArgumentList
# self.base = QtGui.QApplication.instance().base
def write(self, text):
self.append_to_log.emit(text)
class Scanner(QObject):
found = Signal(unicode)
finished = Signal()
def __init__(self, path):
super(Scanner, self).__init__()
self.path = path
def process(self):
self.start_scan()
self.finished.emit()
def start_scan(self):
try:
for dir_tuple in os.walk(self.path):
dir_path = dir_tuple[0]
if dir_path.lower().endswith(".sdr"): # a book's metadata folder
if dir_path.lower().endswith("evernote.sdr"):
continue
for file_ in dir_tuple[2]: # get the .lua file not the .old (backup)
if splitext(file_)[1].lower() == ".lua":
self.found.emit(join(dir_path, file_))
# older metadata storage or android history folder
elif (dir_path.lower().endswith(join("koreader", "history"))
or basename(dir_path).lower() == "history"):
for file_ in dir_tuple[2]:
if splitext(file_)[1].lower() == ".lua":
self.found.emit(join(dir_path, file_))
continue
except UnicodeDecodeError: # os.walk error
pass
class ReLoader(QObject):
found = Signal(unicode)
finished = Signal()
def __init__(self, paths):
super(ReLoader, self).__init__()
self.paths = paths
def process(self):
for path in self.paths:
self.found.emit(path)
self.finished.emit()
class DBLoader(QObject):
found = Signal(unicode, dict, unicode)
finished = Signal()
def __init__(self, books):
super(DBLoader, self).__init__()
self.books = books
def process(self):
for book in self.books:
self.found.emit(book["path"], book["data"], book["date"])
self.finished.emit()
class HighlightScanner(QObject):
found = Signal(dict)
finished = Signal()
def __init__(self):
super(HighlightScanner, self).__init__()
# noinspection PyArgumentList
self.base = QApplication.instance().base
def process(self):
for row in range(self.base.file_table.rowCount()):
data = self.base.file_table.item(row, TITLE).data(Qt.UserRole)
path = self.base.file_table.item(row, TYPE).data(Qt.UserRole)[0]
self.get_book_highlights(data, path)
self.finished.emit()
def get_book_highlights(self, data, path):
""" Finds all the highlights from a book
:type data: dict
:param data: The book data (converted from the lua file)
:type path: str|unicode
:param path: The book path
"""
highlights = self.base.parse_highlights(data, path)
for highlight in highlights:
self.found.emit(highlight)
# ___ _______________________ GUI STUFF _____________________________
from gui_about import Ui_About
from gui_auto_info import Ui_AutoInfo
from gui_toolbar import Ui_ToolBar
from gui_status import Ui_Status
from gui_edit import Ui_TextDialog
class ToolBar(QWidget, Ui_ToolBar):
def __init__(self, parent=None):
super(ToolBar, self).__init__(parent)
self.setupUi(self)
self.base = parent
self.buttons = (self.check_btn, self.scan_btn, self.export_btn, self.open_btn,
self.merge_btn, self.delete_btn, self.clear_btn, self.about_btn,
self.books_view_btn, self.high_view_btn)
self.size_menu = self.create_size_menu()
for btn in [self.loaded_btn, self.db_btn,
self.books_view_btn, self.high_view_btn]:
btn.clicked.connect(self.change_view)
self.check_btn.clicked.connect(parent.on_check_btn)
self.check_btn.hide()
@Slot(QPoint)
def on_tool_frame_customContextMenuRequested(self, point):
""" The Toolbar is right-clicked
:type point: QPoint
:param point: The point where the right-click happened
"""
self.size_menu.exec_(self.tool_frame.mapToGlobal(point))
def create_size_menu(self):
""" Create the toolbar's buttons size menu
"""
menu = QMenu(self)
group = QActionGroup(self)
sizes = (_("Tiny"), 16), (_("Small"), 32), (_("Medium"), 48), (_("Big"), 64)
for name, size in sizes:
action = QAction(name, menu)
action.setCheckable(True)
if size == self.base.toolbar_size:
action.setChecked(True)
action.triggered.connect(partial(self.set_btn_size, size))
group.addAction(action)
menu.addAction(action)
return menu
def set_btn_size(self, size):
""" Changes the Toolbar's icons size
:type size: int
:param size: The Icons' size preset
"""
self.base.toolbar_size = size
button_size = QSize(size, size)
half_size = QSize(size * .5, size * .5)
for btn in self.buttons:
btn.setMinimumWidth(size + 10)
btn.setIconSize(button_size)
for btn in [self.loaded_btn, self.db_btn]:
# btn.setMinimumWidth(size + 10)
btn.setIconSize(half_size)
# noinspection PyArgumentList
QApplication.processEvents()
@Slot()
def on_scan_btn_clicked(self):
""" The `Scan Directory` button is pressed
"""
path = QFileDialog.getExistingDirectory(self.base,
_("Select a directory with books or "
"your eReader's drive"),
self.base.last_dir,
QFileDialog.ShowDirsOnly)
if path:
self.base.last_dir = path
self.base.high_list.clear()
self.base.reload_highlights = True
self.base.loading_thread(Scanner, path, self.base.kor_text, clear=False)
@Slot()
def on_export_btn_clicked(self):
""" The `Export` button is pressed
"""
self.base.on_export()
@Slot()
def on_open_btn_clicked(self):
""" The `Open Book` button is pressed
"""
if self.base.current_view == BOOKS_VIEW:
try:
idx = self.base.sel_indexes[-1]
except IndexError: # nothing selected
return
item = self.base.file_table.item(idx.row(), 0)
self.base.on_file_table_itemDoubleClicked(item)
if self.base.current_view == HIGHLIGHTS_VIEW:
try:
idx = self.base.sel_high_view[-1]
except IndexError: # nothing selected
return
data = self.base.high_table.item(idx.row(), HIGHLIGHT_H).data(Qt.UserRole)
self.base.open_file(data["path"])
@Slot()
def on_merge_btn_clicked(self):
""" The `Merge` button is pressed
"""
data = [self.base.file_table.item(idx.row(), idx.column()).data(Qt.UserRole)
for idx in self.base.sel_indexes]
if self.base.same_cre_version(data):
self.base.on_merge_highlights()
else:
self.base.wrong_cre_version()
@Slot()
def on_delete_btn_clicked(self):
""" The `Delete` button is pressed
"""
self.base.delete_actions(0)
@Slot()
def on_clear_btn_clicked(self):
""" The `Clear List` button is pressed
"""
if self.base.current_view == HIGHLIGHTS_VIEW:
(self.base.high_table.model() # clear Books view too
.removeRows(0, self.base.high_table.rowCount()))
self.base.loaded_paths.clear()
self.base.reload_highlights = True
self.base.file_table.model().removeRows(0, self.base.file_table.rowCount())
self.activate_buttons()
@Slot()
def on_db_btn_right_clicked(self):
""" The context menu of the "Archived" button is pressed
"""
menu = self.create_db_menu()
# noinspection PyArgumentList
menu.exec_(QCursor.pos())
def create_db_menu(self):
""" Create the database menu
"""
menu = QMenu(self)
action = QAction(_("Create new database"), menu)
action.setIcon(self.base.ico_db_add)
action.triggered.connect(partial(self.base.change_db, NEW_DB))
menu.addAction(action)
action = QAction(_("Reload database"), menu)
action.setIcon(self.base.ico_refresh)
action.triggered.connect(partial(self.base.change_db, RELOAD_DB))
menu.addAction(action)
action = QAction(_("Change database"), menu)
action.setIcon(self.base.ico_db_open)
action.triggered.connect(partial(self.base.change_db, CHANGE_DB))
menu.addAction(action)
return menu
def change_view(self):
""" Changes what is shown in the app
"""
new = self.update_archived() if self.db_btn.isChecked() else self.update_loaded()
if self.books_view_btn.isChecked(): # Books view
# self.add_btn_menu(self.base.toolbar.export_btn)
if self.base.sel_idx:
item = self.base.file_table.item(self.base.sel_idx.row(),
self.base.sel_idx.column())
self.base.on_file_table_itemClicked(item, reset=False)
else: # Highlights view
for btn in [self.base.toolbar.export_btn, self.base.toolbar.delete_btn]:
self.remove_btn_menu(btn)
if self.base.reload_highlights and not new:
self.base.scan_highlights_thread()
self.base.current_view = (BOOKS_VIEW if self.books_view_btn.isChecked()
else HIGHLIGHTS_VIEW)
self.base.views.setCurrentIndex(self.base.current_view)
self.setup_buttons()
self.activate_buttons()
def update_loaded(self):
""" Reloads the previously scanned metadata
"""
if self.base.db_mode:
self.base.db_mode = False
self.base.reload_highlights = True
self.base.loading_thread(ReLoader, self.base.books2reload, self.base.kor_text)
return True
def update_archived(self):
""" Reloads the archived metadata from the db
"""
if not self.base.db_mode:
self.base.books2reload = self.base.loaded_paths.copy()
self.base.db_mode = True
self.base.reload_highlights = True
self.base.read_books_from_db()
text = _("Loading {} database").format(APP_NAME)
self.base.loading_thread(DBLoader, self.base.books, text)
if not len(self.base.books): # no books in the db
text = _('There are no books currently in the archive.\nTo add/'
'update one or more books, select them in the "Loaded" '
'view and in their right-click menu, press "Archive".')
self.base.popup(_("Info"), text, icon=QMessageBox.Question)
return True
def setup_buttons(self):
""" Shows/Hides toolbar's buttons based on the view selected
"""
books_view = self.books_view_btn.isChecked()
db_mode = self.db_btn.isChecked()
self.scan_btn.setVisible(not db_mode)
self.merge_btn.setVisible(books_view and not db_mode)
self.delete_btn.setVisible(books_view)
self.clear_btn.setVisible(not db_mode)
if self.base.db_mode:
self.remove_btn_menu(self.base.toolbar.delete_btn)
else:
self.add_btn_menu(self.base.toolbar.delete_btn)
self.base.status.setVisible(books_view)
def activate_buttons(self):
""" Enables/Disables toolbar's buttons based on selection/view
"""
if self.base.high_table.isVisible(): # Highlights view
try:
idx = self.base.sel_high_view[-1]
except IndexError:
idx = None
count = self.base.high_table.rowCount()
else:
idx = self.base.sel_idx
count = self.base.file_table.rowCount()
if idx:
row = idx.row()
if self.base.high_table.isVisible(): # Highlights view
data = self.base.high_table.item(row, HIGHLIGHT_H).data(Qt.UserRole)
book_exists = isfile(data["path"])
else:
book_exists = self.base.file_table.item(row, TYPE).data(Qt.UserRole)[1]
else:
book_exists = False
self.export_btn.setEnabled(bool(idx))
self.open_btn.setEnabled(book_exists)
self.delete_btn.setEnabled(bool(idx))
self.clear_btn.setEnabled(bool(count))
self.merge_btn.setEnabled(False)
if len(self.base.sel_indexes) == 2: # check if we can sync/merge
idx1, idx2 = self.base.sel_indexes
data1 = self.base.file_table.item(idx1.row(), idx1.column()).data(Qt.UserRole)
path1 = self.base.file_table.item(idx1.row(), TYPE).data(Qt.UserRole)[0]
data2 = self.base.file_table.item(idx2.row(), idx2.column()).data(Qt.UserRole)
path2 = self.base.file_table.item(idx2.row(), TYPE).data(Qt.UserRole)[0]
self.merge_btn.setEnabled(self.base.same_book(data1, data2, path1, path2))
@staticmethod
def add_btn_menu(btn):
""" Adds a menu arrow to a toolbar button
:type btn: QToolButton
:param btn: The button to change
"""
btn.setStyleSheet("")
btn.setPopupMode(QToolButton.MenuButtonPopup)
@staticmethod
def remove_btn_menu(btn):
""" Removes the menu arrow from a toolbar button
:type btn: QToolButton
:param btn: The button to change
"""
btn.setStyleSheet("QToolButton::menu-indicator{width:0px;}")
btn.setPopupMode(QToolButton.DelayedPopup)
@Slot()
def on_about_btn_clicked(self):
""" The `About` button is pressed
"""
self.base.about.create_text()
self.base.about.show()
class About(QDialog, Ui_About):
def __init__(self, parent=None):
super(About, self).__init__(parent)
self.setupUi(self)
# Remove the question mark widget from dialog
self.setWindowFlags(self.windowFlags() ^
Qt.WindowContextHelpButtonHint)
self.setWindowTitle(_("About {}").format(APP_NAME))
self.base = parent
@Slot()
def on_about_qt_btn_clicked(self):
""" The `About Qt` button is pressed
"""
# noinspection PyCallByClass
QMessageBox.aboutQt(self, title=_("About Qt"))
@Slot()
def on_updates_btn_clicked(self):
""" The `Check for Updates` button is pressed
"""
self.check_for_updates()
def check_for_updates(self):
""" Checks the web site for the current version
"""
version_new = self.get_online_version()
if not version_new:
self.base.popup(_("No response!"), _("Version info is unreachable!\n"
"Please, try again later..."), buttons=1)
return
version = LooseVersion(self.base.version)
if version_new > version:
popup = self.base.popup(_("Newer version exists!"),
_("There is a newer version (v.{}) online.\n"
"Open the site to download it now?")
.format(version_new),
icon=QMessageBox.Information, buttons=2)
if popup.clickedButton().text() == "OK":
webbrowser.open("http://www.noembryo.com/apps.php?katalib")
self.close()
elif version_new == version:
self.base.popup(_("No newer version exists!"),
_("{} is up to date (v.{})").format(APP_NAME, version),
icon=QMessageBox.Information, buttons=1)
elif version_new < version:
self.base.popup(_("No newer version exists!"),
_("It seems that you are using a newer version ({0})\n"
"than the one online ({1})!").format(version, version_new),
icon=QMessageBox.Question, buttons=1)
@staticmethod
def get_online_version():
header = {"User-Agent": "Mozilla/5.0 (Windows NT 5.1; rv:14.0) "
"Gecko/20100101 Firefox/24.0.1",
"Referer": "http://whateveritis.com"}
url = "http://www.noembryo.com/apps.php?kohighlights"
try:
html_text = requests.get(url, headers=header).content
except requests.exceptions.ConnectionError:
return
soup_text = BeautifulSoup(html_text, "html.parser")
results = soup_text.findAll(name="p")
results = "".join([str(i) for i in results])
match = re.search(r"\d+\.\d+\.\d+\.\d+", results, re.DOTALL)
try:
version_new = match.group(0)
except AttributeError: # no match found
version_new = "0.0.0.0"
return LooseVersion(version_new)
def create_text(self):
# color = self.palette().color(QPalette.WindowText).name() # for links
splash = ":/stuff/logo.png"
paypal = ":/stuff/paypal.png"
info = _("""<body style="font-size:10pt; font-weight:400; font-style:normal">
<center>
<table width="100%" border="0">
<tr>
<p align="center"><img src="{0}" width="256" height ="212"></p>
<p align="center"><b>{3}</b> is a utility for viewing
<a href="https://github.com/koreader/koreader">Koreader</a>'s
highlights<br/>and/or export them to simple text</p>
<p align="center">Version {1}</p>
<p align="center">Visit
<a href="https://github.com/noEmbryo/KoHighlights">
{3} page at GitHub</a>, or</p>
<p align="center"><a href="http://www.noEmbryo.com"> noEmbryo's page</a>
with more Apps and stuff...</p>
<p align="center">Use it and if you like it, consider to
<p align="center"><a href="https://www.paypal.com/cgi-bin/webscr?
cmd=_s-xclick &hosted_button_id=RBYLVRYG9RU2S">
<img src="{2}" alt="PayPal Button"
width="142" height="27" border="0"></a></p>
<p align="center"> </p></td>
</tr>
</table>
</center>
</body>""").format(splash, self.base.version, paypal, APP_NAME)
self.text_lbl.setText(info)
class AutoInfo(QDialog, Ui_AutoInfo):
def __init__(self, parent=None):
super(AutoInfo, self).__init__(parent)
self.setupUi(self)
# Remove the question mark widget from dialog
# self.setWindowFlags(self.windowFlags() ^
# Qt.WindowContextHelpButtonHint)
self.setWindowFlags(Qt.Tool | Qt.FramelessWindowHint)
self.hide()
font = QFont()
font.setBold(True)
font.setPointSize(QFont.pointSize(QFont()) + 3)
self.label.setFont(font)
def set_text(self, text):
self.label.setText(text)
class TextDialog(QDialog, Ui_TextDialog):
def __init__(self, parent=None):
super(TextDialog, self).__init__(parent)
# Remove the question mark widget from dialog
self.setWindowFlags(self.windowFlags() ^ Qt.WindowContextHelpButtonHint)
self.setupUi(self)
self.base = parent
self.on_ok = None
@Slot()
def on_ok_btn_clicked(self):
""" The OK button is pressed
"""
self.on_ok()
class Status(QWidget, Ui_Status):
def __init__(self, parent=None):
super(Status, self).__init__(parent)
self.setupUi(self)
self.base = parent
self.wait_anim = QMovie(":/stuff/wait.gif")
self.anim_lbl.setMovie(self.wait_anim)
self.anim_lbl.hide()
self.show_menu = QMenu(self)
for i in [self.act_page, self.act_date, self.act_text, self.act_comment]:
self.show_menu.addAction(i)
# noinspection PyUnresolvedReferences
i.triggered.connect(self.on_show_items)
i.setChecked(True)
sort_menu = QMenu(self)
ico_sort = QIcon(":/stuff/sort.png")
group = QActionGroup(self)
action = QAction(_("Date"), sort_menu)
action.setCheckable(True)
action.setChecked(not self.base.high_by_page)
action.triggered.connect(self.base.set_highlight_sort)
action.setData(False)
group.addAction(action)
sort_menu.addAction(action)
action = QAction(_("Page"), sort_menu)
action.setCheckable(True)
action.setChecked(self.base.high_by_page)
action.triggered.connect(self.base.set_highlight_sort)
action.setData(True)
group.addAction(action)
sort_menu.addAction(action)
sort_menu.setIcon(ico_sort)
sort_menu.setTitle(_("Sort by"))
self.show_menu.addMenu(sort_menu)
self.show_items_btn.setMenu(self.show_menu)
def on_show_items(self):
""" Show/Hide elements of the highlight info
"""
try:
idx = self.base.file_table.selectionModel().selectedRows()[-1]
except IndexError: # nothing selected
return
item = self.base.file_table.item(idx.row(), 0)
self.base.on_file_table_itemClicked(item)
def animation(self, run):
""" Creates or deletes temporary files and folders
:type run: bool
:param run: Start/stop animation
"""
# if action == "start":
if run:
self.anim_lbl.show()
self.wait_anim.start()
else:
self.anim_lbl.hide()
self.wait_anim.stop()
# if __name__ == "__main__":
# with open("secondary.py", str("r")) as py_text:
# import re
# script = py_text.read()
# result = tuple(re.findall(r"class (.+)\(", script))
# print("__all__ = {}".format(result))
|
|
############################################################################
# Copyright(c) Open Law Library. All rights reserved. #
# See ThirdPartyNotices.txt in the project root for additional notices. #
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import asyncio
import logging
import re
import sys
from concurrent.futures import Future, ThreadPoolExecutor
from multiprocessing.pool import ThreadPool
from threading import Event
from typing import Any, Callable, List, Optional, TypeVar
from pygls import IS_WIN
from pygls.lsp.types import (ApplyWorkspaceEditResponse, ClientCapabilities, ConfigCallbackType,
ConfigurationParams, Diagnostic, MessageType, RegistrationParams,
ServerCapabilities, TextDocumentSyncKind, UnregistrationParams,
WorkspaceEdit)
from pygls.protocol import LanguageServerProtocol
from pygls.workspace import Workspace
logger = logging.getLogger(__name__)
F = TypeVar('F', bound=Callable)
async def aio_readline(loop, executor, stop_event, rfile, proxy):
"""Reads data from stdin in separate thread (asynchronously)."""
CONTENT_LENGTH_PATTERN = re.compile(rb'^Content-Length: (\d+)\r\n$')
# Initialize message buffer
message = []
content_length = 0
while not stop_event.is_set() and not rfile.closed:
# Read a header line
header = await loop.run_in_executor(executor, rfile.readline)
if not header:
break
message.append(header)
# Extract content length if possible
if not content_length:
match = CONTENT_LENGTH_PATTERN.fullmatch(header)
if match:
content_length = int(match.group(1))
logger.debug('Content length: %s', content_length)
# Check if all headers have been read (as indicated by an empty line \r\n)
if content_length and not header.strip():
# Read body
body = await loop.run_in_executor(executor, rfile.read, content_length)
if not body:
break
message.append(body)
# Pass message to language server protocol
proxy(b''.join(message))
# Reset the buffer
message = []
content_length = 0
class StdOutTransportAdapter:
"""Protocol adapter which overrides write method.
Write method sends data to stdout.
"""
def __init__(self, rfile, wfile):
self.rfile = rfile
self.wfile = wfile
def close(self):
self.rfile.close()
self.wfile.close()
def write(self, data):
self.wfile.write(data)
self.wfile.flush()
class Server:
"""Class that represents async server. It can be started using TCP or IO.
Args:
protocol_cls(Protocol): Protocol implementation that must be derived
from `asyncio.Protocol`
loop(AbstractEventLoop): asyncio event loop
max_workers(int, optional): Number of workers for `ThreadPool` and
`ThreadPoolExecutor`
sync_kind(TextDocumentSyncKind): Text document synchronization option
- NONE(0): no synchronization
- FULL(1): replace whole text
- INCREMENTAL(2): replace text within a given range
Attributes:
_max_workers(int): Number of workers for thread pool executor
_server(Server): Server object which can be used to stop the process
_stop_event(Event): Event used for stopping `aio_readline`
_thread_pool(ThreadPool): Thread pool for executing methods decorated
with `@ls.thread()` - lazy instantiated
_thread_pool_executor(ThreadPoolExecutor): Thread pool executor
passed to `run_in_executor`
- lazy instantiated
"""
def __init__(self, protocol_cls, loop=None, max_workers=2,
sync_kind=TextDocumentSyncKind.INCREMENTAL):
if not issubclass(protocol_cls, asyncio.Protocol):
raise TypeError('Protocol class should be subclass of asyncio.Protocol')
self._max_workers = max_workers
self._server = None
self._stop_event = None
self._thread_pool = None
self._thread_pool_executor = None
self.sync_kind = sync_kind
if IS_WIN:
asyncio.set_event_loop(asyncio.ProactorEventLoop())
else:
asyncio.set_event_loop(asyncio.SelectorEventLoop())
self.loop = loop or asyncio.get_event_loop()
try:
asyncio.get_child_watcher().attach_loop(self.loop)
except NotImplementedError:
pass
self.lsp = protocol_cls(self)
def shutdown(self):
"""Shutdown server."""
logger.info('Shutting down the server')
self._stop_event.set()
if self._thread_pool:
self._thread_pool.terminate()
self._thread_pool.join()
if self._thread_pool_executor:
self._thread_pool_executor.shutdown()
if self._server:
self._server.close()
self.loop.run_until_complete(self._server.wait_closed())
logger.info('Closing the event loop.')
self.loop.close()
def start_io(self, stdin=None, stdout=None):
"""Starts IO server."""
logger.info('Starting IO server')
self._stop_event = Event()
transport = StdOutTransportAdapter(stdin or sys.stdin.buffer,
stdout or sys.stdout.buffer)
self.lsp.connection_made(transport)
try:
self.loop.run_until_complete(
aio_readline(self.loop,
self.thread_pool_executor,
self._stop_event,
stdin or sys.stdin.buffer,
self.lsp.data_received))
except BrokenPipeError:
logger.error('Connection to the client is lost! Shutting down the server.')
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.shutdown()
def start_tcp(self, host, port):
"""Starts TCP server."""
logger.info('Starting server on %s:%s', host, port)
self._stop_event = Event()
self._server = self.loop.run_until_complete(
self.loop.create_server(self.lsp, host, port)
)
try:
self.loop.run_forever()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.shutdown()
@property
def thread_pool(self) -> ThreadPool:
"""Returns thread pool instance (lazy initialization)."""
if not self._thread_pool:
self._thread_pool = ThreadPool(processes=self._max_workers)
return self._thread_pool
@property
def thread_pool_executor(self) -> ThreadPoolExecutor:
"""Returns thread pool instance (lazy initialization)."""
if not self._thread_pool_executor:
self._thread_pool_executor = \
ThreadPoolExecutor(max_workers=self._max_workers)
return self._thread_pool_executor
class LanguageServer(Server):
"""A class that represents Language server using Language Server Protocol.
This class can be extended and it can be passed as a first argument to
registered commands/features.
Args:
protocol_cls(LanguageServerProtocol): LSP or any subclass of it
max_workers(int, optional): Number of workers for `ThreadPool` and
`ThreadPoolExecutor`
"""
def __init__(self, loop=None, protocol_cls=LanguageServerProtocol, max_workers: int = 2):
if not issubclass(protocol_cls, LanguageServerProtocol):
raise TypeError('Protocol class should be subclass of LanguageServerProtocol')
super().__init__(protocol_cls, loop, max_workers)
def apply_edit(self, edit: WorkspaceEdit, label: str = None) -> ApplyWorkspaceEditResponse:
"""Sends apply edit request to the client."""
return self.lsp.apply_edit(edit, label)
def command(self, command_name: str) -> Callable[[F], F]:
"""Decorator used to register custom commands.
Example:
@ls.command('myCustomCommand')
def my_cmd(ls, a, b, c):
pass
"""
return self.lsp.fm.command(command_name)
@property
def client_capabilities(self) -> ClientCapabilities:
"""Return client capabilities."""
return self.lsp.client_capabilities
def feature(
self, feature_name: str, options: Optional[Any] = None,
) -> Callable[[F], F]:
"""Decorator used to register LSP features.
Example:
@ls.feature('textDocument/completion', triggerCharacters=['.'])
def completions(ls, params: CompletionRequest):
return CompletionList(False, [CompletionItem("Completion 1")])
"""
return self.lsp.fm.feature(feature_name, options)
def get_configuration(self, params: ConfigurationParams,
callback: ConfigCallbackType = None) -> Future:
"""Gets the configuration settings from the client."""
return self.lsp.get_configuration(params, callback)
def get_configuration_async(self, params: ConfigurationParams) -> asyncio.Future:
"""Gets the configuration settings from the client. Should be called with `await`"""
return self.lsp.get_configuration_async(params)
def publish_diagnostics(self, doc_uri: str, diagnostics: List[Diagnostic]):
"""Sends diagnostic notification to the client."""
self.lsp.publish_diagnostics(doc_uri, diagnostics)
def register_capability(self, params: RegistrationParams, callback):
"""Register a new capability on the client."""
return self.lsp.register_capability(params, callback)
def register_capability_async(self, params: RegistrationParams):
"""Register a new capability on the client. Should be called with `await`"""
return self.lsp.register_capability_async(params)
def send_notification(self, method: str, params: object = None) -> None:
"""Sends notification to the client."""
self.lsp.notify(method, params)
@property
def server_capabilities(self) -> ServerCapabilities:
"""Return server capabilities."""
return self.lsp.server_capabilities
def show_message(self, message, msg_type=MessageType.Info) -> None:
"""Sends message to the client to display message."""
self.lsp.show_message(message, msg_type)
def show_message_log(self, message, msg_type=MessageType.Log) -> None:
"""Sends message to the client's output channel."""
self.lsp.show_message_log(message, msg_type)
def thread(self) -> Callable[[F], F]:
"""Decorator that mark function to execute it in a thread."""
return self.lsp.thread()
def unregister_capability(self, params: UnregistrationParams, callback):
"""Unregister a new capability on the client."""
return self.lsp.unregister_capability(params, callback)
def unregister_capability_async(self, params: UnregistrationParams):
"""Unregister a new capability on the client. Should be called with `await`"""
return self.lsp.unregister_capability_async(params)
@property
def workspace(self) -> Workspace:
"""Returns in-memory workspace."""
return self.lsp.workspace
|
|
# pylint: disable=too-many-instance-attributes,too-many-locals
# pylint: disable=too-many-branches,too-many-statements,too-many-arguments
"""Executor group is a convenient tool for managing a group of executors."""
import logging
import numpy as np
from .. import context as ctx
from .. import ndarray as nd
from ..io import DataDesc
from ..executor_manager import _split_input_slice
def _load_general(data, targets, major_axis):
"""Load a list of arrays into a list of arrays specified by slices."""
for d_src, d_targets, axis in zip(data, targets, major_axis):
if isinstance(d_targets, nd.NDArray):
d_src.copyto(d_targets)
elif isinstance(d_src, (list, tuple)):
for src, dst in zip(d_src, d_targets):
src.copyto(dst)
else:
for slice_idx, d_dst in d_targets:
if axis >= 0:
# copy slice
shape = d_src.shape
begin = np.zeros(len(shape), dtype=int)
end = np.array(shape)
begin[axis] = slice_idx.start
end[axis] = slice_idx.stop
# pylint: disable=no-member,protected-access
if d_src.context == d_dst.context:
nd.crop(d_src, begin=tuple(begin), end=tuple(end), out=d_dst)
else:
# on different device, crop and then do cross device copy
d_dst_copy = nd.crop(d_src, begin=tuple(begin), end=tuple(end))
d_dst_copy.copyto(d_dst)
# pylint: enable=no-member,protected-access
else:
d_src.copyto(d_dst)
def _load_data(batch, targets, major_axis):
"""Load data into sliced arrays."""
_load_general(batch.data, targets, major_axis)
def _load_label(batch, targets, major_axis):
"""Load label into sliced arrays."""
_load_general(batch.label, targets, major_axis)
def _merge_multi_context(outputs, major_axis):
"""Merge outputs that lives on multiple context into one, so that they look
like living on one context.
"""
rets = []
for tensors, axis in zip(outputs, major_axis):
if axis >= 0:
# pylint: disable=no-member,protected-access
if len(tensors) == 1:
rets.append(tensors[0])
else:
# Concatenate if necessary
rets.append(nd.concat(*[tensor.as_in_context(tensors[0].context)
for tensor in tensors],
dim=axis))
# pylint: enable=no-member,protected-access
else:
# negative axis means the there is no batch_size axis, and all the
# results should be the same on each device. We simply take the
# first one, without checking they are actually the same
rets.append(tensors[0])
return rets
class DataParallelExecutorGroup(object):
"""A group of executors that lives on a group of devices.
This is a helper class used to implement data parallelization. Each mini-batch will
be split and run on the devices.
Parameters
----------
symbol : Symbol
The common symbolic computation graph for all executors.
contexts : list
A list of contexts.
workload : list
If not ``None``, could be a list of numbers that specify the workload to be assigned
to different context. Larger number indicate heavier workload.
data_shapes : list
Should be a list of (name, shape) tuples, for the shapes of data. Note the order is
important and should be the same as the order that the `DataIter` provide the data.
label_shapes : list
Should be a list of (name, shape) tuples, for the shapes of label. Note the order is
important and should be the same as the order that the `DataIter` provide the label.
param_names : list
A list of strings, indicating the names of parameters (e.g. weights, filters, etc.)
in the computation graph.
for_training : bool
Indicate whether the executors should be bind for training. When not doing training,
the memory for gradients will not be allocated.
inputs_need_grad : bool
Indicate whether the gradients for the input data should be computed. This is currently
not used. It will be useful for implementing composition of modules.
shared_group : DataParallelExecutorGroup
Defaults to ``None``. This is used in bucketing. When not ``None``, it should be a executor
group corresponding to a different bucket. In other words, it will correspond to a different
symbol but with the same set of parameters (e.g. unrolled RNNs with different lengths).
In this case, many memory will be shared.
logger : Logger
Default is `logging`.
fixed_param_names: list of str
Parameters to be fixed during training. For these parameters, not gradients
will be calculated and thus no space will be allocated for the gradient.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
"""
def __init__(self, symbol, contexts, workload, data_shapes, label_shapes, param_names,
for_training, inputs_need_grad, shared_group=None, logger=logging,
fixed_param_names=None, grad_req='write', state_names=None):
self.param_names = param_names
self.arg_names = symbol.list_arguments()
self.aux_names = symbol.list_auxiliary_states()
self.symbol = symbol
self.contexts = contexts
self.workload = workload
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.logger = logger
#In the future we should have a better way to profile memory per device (haibin)
self._total_exec_bytes = 0
self.fixed_param_names = fixed_param_names
if self.fixed_param_names is None:
self.fixed_param_names = []
self.state_names = state_names
if self.state_names is None:
self.state_names = []
if not for_training:
grad_req = 'null'
data_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in data_shapes]
if label_shapes is not None:
label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
data_names = [x.name for x in data_shapes]
if isinstance(grad_req, str):
self.grad_req = {}
for k in self.arg_names:
if k in self.param_names:
self.grad_req[k] = 'null' if k in self.fixed_param_names else grad_req
elif k in data_names:
self.grad_req[k] = grad_req if self.inputs_need_grad else 'null'
else:
self.grad_req[k] = 'null'
elif isinstance(grad_req, (list, tuple)):
assert len(grad_req) == len(self.arg_names)
self.grad_req = dict(zip(self.arg_names, grad_req))
elif isinstance(grad_req, dict):
self.grad_req = {}
for k in self.arg_names:
if k in self.param_names:
self.grad_req[k] = 'null' if k in self.fixed_param_names else 'write'
elif k in data_names:
self.grad_req[k] = 'write' if self.inputs_need_grad else 'null'
else:
self.grad_req[k] = 'null'
self.grad_req.update(grad_req)
else:
raise ValueError("grad_req must be one of str, list, tuple, or dict.")
if shared_group is not None:
self.shared_data_arrays = shared_group.shared_data_arrays
else:
self.shared_data_arrays = [{} for _ in contexts]
# initialize some instance variables
self.batch_size = None
self.slices = None
self.execs = []
self._default_execs = None
self.data_arrays = None
self.label_arrays = None
self.param_arrays = None
self.state_arrays = None
self.grad_arrays = None
self.aux_arrays = None
self.input_grad_arrays = None
self.data_shapes = None
self.label_shapes = None
self.data_layouts = None
self.label_layouts = None
self.output_layouts = [DataDesc.get_batch_axis(self.symbol[name].attr('__layout__'))
for name in self.symbol.list_outputs()]
self.bind_exec(data_shapes, label_shapes, shared_group)
def decide_slices(self, data_shapes):
"""Decide the slices for each context according to the workload.
Parameters
----------
data_shapes : list
list of (name, shape) specifying the shapes for the input data or label.
"""
assert len(data_shapes) > 0
major_axis = [DataDesc.get_batch_axis(x.layout) for x in data_shapes]
for (name, shape), axis in zip(data_shapes, major_axis):
if axis == -1:
continue
batch_size = shape[axis]
if self.batch_size is not None:
assert batch_size == self.batch_size, ("all data must have the same batch size: "
+ ("batch_size = %d, but " % self.batch_size)
+ ("%s has shape %s" % (name, shape)))
else:
self.batch_size = batch_size
self.slices = _split_input_slice(self.batch_size, self.workload)
return major_axis
def _collect_arrays(self):
"""Collect internal arrays from executors."""
# convenient data structures
self.data_arrays = [[(self.slices[i], e.arg_dict[name]) for i, e in enumerate(self.execs)]
for name, _ in self.data_shapes]
self.state_arrays = [[e.arg_dict[name] for e in self.execs]
for name in self.state_names]
if self.label_shapes is not None:
self.label_arrays = [[(self.slices[i], e.arg_dict[name])
for i, e in enumerate(self.execs)]
for name, _ in self.label_shapes]
else:
self.label_arrays = None
self.param_arrays = [[exec_.arg_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
if self.for_training:
self.grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
else:
self.grad_arrays = None
data_names = [x[0] for x in self.data_shapes]
if self.inputs_need_grad:
self.input_grad_arrays = [[exec_.grad_arrays[self.arg_names.index(name)]
for exec_ in self.execs]
for name in data_names if name in self.arg_names]
else:
self.input_grad_arrays = None
self.aux_arrays = [[exec_.aux_arrays[i] for exec_ in self.execs]
for i in range(len(self.aux_names))]
def bind_exec(self, data_shapes, label_shapes, shared_group=None, reshape=False):
"""Bind executors on their respective devices.
Parameters
----------
data_shapes : list
label_shapes : list
shared_group : DataParallelExecutorGroup
reshape : bool
"""
assert reshape or not self.execs
self.batch_size = None
# calculate workload and bind executors
self.data_layouts = self.decide_slices(data_shapes)
if label_shapes is not None:
# call it to make sure labels has the same batch size as data
self.label_layouts = self.decide_slices(label_shapes)
for i in range(len(self.contexts)):
data_shapes_i = self._sliced_shape(data_shapes, i, self.data_layouts)
if label_shapes is not None:
label_shapes_i = self._sliced_shape(label_shapes, i, self.label_layouts)
else:
label_shapes_i = []
if reshape:
self.execs[i] = self._default_execs[i].reshape(
allow_up_sizing=True, **dict(data_shapes_i + label_shapes_i))
else:
self.execs.append(self._bind_ith_exec(i, data_shapes_i, label_shapes_i,
shared_group))
self.data_shapes = data_shapes
self.label_shapes = label_shapes
self._collect_arrays()
def reshape(self, data_shapes, label_shapes):
"""Reshape executors.
Parameters
----------
data_shapes : list
label_shapes : list
"""
if data_shapes == self.data_shapes and label_shapes == self.label_shapes:
return
if self._default_execs is None:
self._default_execs = [i for i in self.execs]
self.bind_exec(data_shapes, label_shapes, reshape=True)
def set_params(self, arg_params, aux_params):
"""Assign, i.e. copy parameters to all the executors.
Parameters
----------
arg_params : dict
A dictionary of name to `NDArray` parameter mapping.
aux_params : dict
A dictionary of name to `NDArray` auxiliary variable mapping.
"""
for exec_ in self.execs:
exec_.copy_params_from(arg_params, aux_params)
def get_params(self, arg_params, aux_params):
""" Copy data from each executor to `arg_params` and `aux_params`.
Parameters
----------
arg_params : list of NDArray
Target parameter arrays.
aux_params : list of NDArray
Target aux arrays.
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
"""
for name, block in zip(self.param_names, self.param_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(arg_params[name].dtype).copyto(arg_params[name])
for name, block in zip(self.aux_names, self.aux_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(aux_params[name].dtype).copyto(aux_params[name])
def forward(self, data_batch, is_train=None):
"""Split `data_batch` according to workload and run forward on each devices.
Parameters
----------
data_batch : DataBatch
Or could be any object implementing similar interface.
is_train : bool
The hint for the backend, indicating whether we are during training phase.
Default is `None`, then the value `self.for_training` will be used.
Returns
-------
"""
_load_data(data_batch, self.data_arrays, self.data_layouts)
if is_train is None:
is_train = self.for_training
if self.label_arrays is not None:
assert not is_train or data_batch.label
if data_batch.label:
_load_label(data_batch, self.label_arrays, self.label_layouts)
for exec_ in self.execs:
exec_.forward(is_train=is_train)
def get_output_shapes(self):
"""Get the shapes of the outputs."""
outputs = self.execs[0].outputs
shapes = [out.shape for out in outputs]
concat_shapes = []
for key, the_shape, axis in zip(self.symbol.list_outputs(), shapes, self.output_layouts):
the_shape = list(the_shape)
if axis >= 0:
the_shape[axis] = self.batch_size
concat_shapes.append((key, tuple(the_shape)))
return concat_shapes
def get_outputs(self, merge_multi_context=True):
"""Get outputs of the previous forward computation.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
"""
outputs = [[exec_.outputs[i] for exec_ in self.execs]
for i in range(len(self.execs[0].outputs))]
if merge_multi_context:
outputs = _merge_multi_context(outputs, self.output_layouts)
return outputs
def get_states(self, merge_multi_context=True):
"""Get states from all devices.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the states
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
"""
assert not merge_multi_context, \
"merge_multi_context=True is not supported for get_states yet."
return self.state_arrays
def set_states(self, states=None, value=None):
"""Set value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like [[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]].
value : number
a single scalar value for all state arrays.
"""
if states is not None:
assert value is None, "Only one of states & value can be specified."
_load_general(states, self.state_arrays, (0,)*len(states))
else:
assert value is not None, "At least one of states & value must be specified."
assert states is None, "Only one of states & value can be specified."
for d_dst in self.state_arrays:
for dst in d_dst:
dst[:] = value
def get_input_grads(self, merge_multi_context=True):
"""Get the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
"""
assert self.inputs_need_grad
if merge_multi_context:
return _merge_multi_context(self.input_grad_arrays, self.data_layouts)
return self.input_grad_arrays
def backward(self, out_grads=None):
"""Run backward on all devices. A backward should be called after
a call to the forward function. Backward cannot be called unless
``self.for_training`` is ``True``.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.for_training, 're-bind with for_training=True to run backward'
if out_grads is None:
out_grads = []
for i, (exec_, islice) in enumerate(zip(self.execs, self.slices)):
out_grads_slice = []
for grad, axis in zip(out_grads, self.output_layouts):
if axis >= 0:
# pylint: disable=no-member
og_my_slice = nd.slice_axis(grad, axis=axis, begin=islice.start,
end=islice.stop)
# pylint: enable=no-member
out_grads_slice.append(og_my_slice.as_in_context(self.contexts[i]))
else:
out_grads_slice.append(grad.copyto(self.contexts[i]))
exec_.backward(out_grads=out_grads_slice)
def update_metric(self, eval_metric, labels):
"""Accumulate the performance according to `eval_metric` on all devices.
Parameters
----------
eval_metric : EvalMetric
The metric used for evaluation.
labels : list of NDArray
Typically comes from `label` of a `DataBatch`.
"""
for texec, islice in zip(self.execs, self.slices):
labels_slice = []
for label, axis in zip(labels, self.label_layouts):
if axis == 0:
# slicing NDArray along axis 0 can avoid copying
labels_slice.append(label[islice])
elif axis > 0:
# pylint: disable=no-member
label_my_slice = nd.slice_axis(label, axis=axis, begin=islice.start,
end=islice.stop).as_in_context(label.context)
# pylint: enable=no-member
labels_slice.append(label_my_slice)
else:
labels_slice.append(label)
eval_metric.update(labels_slice, texec.outputs)
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group):
"""Internal utility function to bind the i-th executor.
"""
shared_exec = None if shared_group is None else shared_group.execs[i]
context = self.contexts[i]
shared_data_arrays = self.shared_data_arrays[i]
input_shapes = dict(data_shapes)
if label_shapes is not None:
input_shapes.update(dict(label_shapes))
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None, "shape inference failed"
input_types = {x.name: x.dtype for x in data_shapes}
if label_shapes is not None:
input_types.update({x.name: x.dtype for x in label_shapes})
arg_types, _, aux_types = self.symbol.infer_type(**input_types)
assert arg_types is not None, "type inference failed"
arg_arrays = []
grad_arrays = {} if self.for_training else None
def _get_or_reshape(name, shared_data_arrays, arg_shape, arg_type, context, logger):
"""Internal helper to get a memory block or re-use by re-shaping."""
if name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape):
# nice, we can directly re-use this data blob
assert arg_arr.dtype == arg_type
arg_arr = arg_arr.reshape(arg_shape)
else:
logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape)) +
(', which is larger than already allocated ') +
('shape %s' % (arg_arr.shape,)) +
('. Need to re-allocate. Consider putting ') +
('default_bucket_key to') +
(' be the bucket taking the largest input for better ') +
('memory sharing.'))
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
# replace existing shared array because the new one is bigger
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
shared_data_arrays[name] = arg_arr
return arg_arr
# create or borrow arguments and gradients
for j in range(len(self.arg_names)):
name = self.arg_names[j]
if name in self.param_names: # model parameters
if shared_exec is None:
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
if self.grad_req[name] != 'null':
grad_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
grad_arrays[name] = grad_arr
else:
arg_arr = shared_exec.arg_dict[name]
assert arg_arr.shape == arg_shapes[j]
assert arg_arr.dtype == arg_types[j]
if self.grad_req[name] != 'null':
grad_arrays[name] = shared_exec.grad_dict[name]
else: # data, label, or states
arg_arr = _get_or_reshape(name, shared_data_arrays, arg_shapes[j], arg_types[j],
context, self.logger)
# data might also need grad if inputs_need_grad is True
if self.grad_req[name] != 'null':
grad_arrays[name] = _get_or_reshape('grad of ' + name, shared_data_arrays,
arg_shapes[j], arg_types[j], context,
self.logger)
arg_arrays.append(arg_arr)
# create or borrow aux variables
if shared_exec is None:
aux_arrays = [nd.zeros(s, context, dtype=t) for s, t in zip(aux_shapes, aux_types)]
else:
for j, arr in enumerate(shared_exec.aux_arrays):
assert aux_shapes[j] == arr.shape
assert aux_types[j] == arr.dtype
aux_arrays = shared_exec.aux_arrays[:]
executor = self.symbol.bind(ctx=context, args=arg_arrays,
args_grad=grad_arrays, aux_states=aux_arrays,
grad_req=self.grad_req, shared_exec=shared_exec)
# Get the total bytes allocated for this executor
self._total_exec_bytes += int(executor.debug_str().split('\n')[-3].split()[1])
return executor
def _sliced_shape(self, shapes, i, major_axis):
"""Get the sliced shapes for the i-th executor.
Parameters
----------
shapes : list of (str, tuple)
The original (name, shape) pairs.
i : int
Which executor we are dealing with.
"""
sliced_shapes = []
for desc, axis in zip(shapes, major_axis):
shape = list(desc.shape)
if axis >= 0:
shape[axis] = self.slices[i].stop - self.slices[i].start
sliced_shapes.append(DataDesc(desc.name, tuple(shape), desc.dtype, desc.layout))
return sliced_shapes
def install_monitor(self, mon):
"""Install monitor on all executors"""
for exe in self.execs:
mon.install(exe)
|
|
"""The tests for the demo climate component."""
import pytest
import voluptuous as vol
from homeassistant.components.climate.const import (
ATTR_AUX_HEAT,
ATTR_CURRENT_HUMIDITY,
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_HUMIDITY,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_MAX_HUMIDITY,
ATTR_MAX_TEMP,
ATTR_MIN_HUMIDITY,
ATTR_MIN_TEMP,
ATTR_PRESET_MODE,
ATTR_SWING_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
DOMAIN,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_ECO,
SERVICE_SET_AUX_HEAT,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HUMIDITY,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_SWING_MODE,
SERVICE_SET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
ENTITY_CLIMATE = "climate.hvac"
ENTITY_ECOBEE = "climate.ecobee"
ENTITY_HEATPUMP = "climate.heatpump"
@pytest.fixture(autouse=True)
async def setup_demo_climate(hass):
"""Initialize setup demo climate."""
hass.config.units = METRIC_SYSTEM
assert await async_setup_component(hass, DOMAIN, {"climate": {"platform": "demo"}})
await hass.async_block_till_done()
def test_setup_params(hass):
"""Test the initial parameters."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_COOL
assert state.attributes.get(ATTR_TEMPERATURE) == 21
assert state.attributes.get(ATTR_CURRENT_TEMPERATURE) == 22
assert state.attributes.get(ATTR_FAN_MODE) == "On High"
assert state.attributes.get(ATTR_HUMIDITY) == 67
assert state.attributes.get(ATTR_CURRENT_HUMIDITY) == 54
assert state.attributes.get(ATTR_SWING_MODE) == "Off"
assert STATE_OFF == state.attributes.get(ATTR_AUX_HEAT)
assert state.attributes.get(ATTR_HVAC_MODES) == [
"off",
"heat",
"cool",
"auto",
"dry",
"fan_only",
]
def test_default_setup_params(hass):
"""Test the setup with default parameters."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_MIN_TEMP) == 7
assert state.attributes.get(ATTR_MAX_TEMP) == 35
assert state.attributes.get(ATTR_MIN_HUMIDITY) == 30
assert state.attributes.get(ATTR_MAX_HUMIDITY) == 99
async def test_set_only_target_temp_bad_attr(hass):
"""Test setting the target temperature without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_TEMPERATURE) == 21
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_TEMPERATURE: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_TEMPERATURE) == 21
async def test_set_only_target_temp(hass):
"""Test the setting of the target temperature."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_TEMPERATURE) == 21
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_TEMPERATURE: 30},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_TEMPERATURE) == 30.0
async def test_set_only_target_temp_with_convert(hass):
"""Test the setting of the target temperature."""
state = hass.states.get(ENTITY_HEATPUMP)
assert state.attributes.get(ATTR_TEMPERATURE) == 20
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_HEATPUMP, ATTR_TEMPERATURE: 21},
blocking=True,
)
state = hass.states.get(ENTITY_HEATPUMP)
assert state.attributes.get(ATTR_TEMPERATURE) == 21.0
async def test_set_target_temp_range(hass):
"""Test the setting of the target temperature with range."""
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) == 21.0
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) == 24.0
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: ENTITY_ECOBEE,
ATTR_TARGET_TEMP_LOW: 20,
ATTR_TARGET_TEMP_HIGH: 25,
},
blocking=True,
)
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) == 20.0
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) == 25.0
async def test_set_target_temp_range_bad_attr(hass):
"""Test setting the target temperature range without attribute."""
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) == 21.0
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) == 24.0
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: ENTITY_ECOBEE,
ATTR_TARGET_TEMP_LOW: None,
ATTR_TARGET_TEMP_HIGH: None,
},
blocking=True,
)
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_TARGET_TEMP_LOW) == 21.0
assert state.attributes.get(ATTR_TARGET_TEMP_HIGH) == 24.0
async def test_set_target_humidity_bad_attr(hass):
"""Test setting the target humidity without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HUMIDITY) == 67
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HUMIDITY,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HUMIDITY: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HUMIDITY) == 67
async def test_set_target_humidity(hass):
"""Test the setting of the target humidity."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HUMIDITY) == 67
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HUMIDITY,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HUMIDITY: 64},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HUMIDITY) == 64.0
async def test_set_fan_mode_bad_attr(hass):
"""Test setting fan mode without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_FAN_MODE) == "On High"
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_FAN_MODE: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_FAN_MODE) == "On High"
async def test_set_fan_mode(hass):
"""Test setting of new fan mode."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_FAN_MODE) == "On High"
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_FAN_MODE: "On Low"},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_FAN_MODE) == "On Low"
async def test_set_swing_mode_bad_attr(hass):
"""Test setting swing mode without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_SWING_MODE) == "Off"
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_SWING_MODE: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_SWING_MODE) == "Off"
async def test_set_swing(hass):
"""Test setting of new swing mode."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_SWING_MODE) == "Off"
await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_SWING_MODE: "Auto"},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_SWING_MODE) == "Auto"
async def test_set_hvac_bad_attr_and_state(hass):
"""Test setting hvac mode without required attribute.
Also check the state.
"""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_COOL
assert state.state == HVAC_MODE_COOL
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HVAC_MODE: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_COOL
assert state.state == HVAC_MODE_COOL
async def test_set_hvac(hass):
"""Test setting of new hvac mode."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_COOL
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_HEAT
async def test_set_hold_mode_away(hass):
"""Test setting the hold mode away."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ECOBEE, ATTR_PRESET_MODE: PRESET_AWAY},
blocking=True,
)
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_AWAY
async def test_set_hold_mode_eco(hass):
"""Test setting the hold mode eco."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ECOBEE, ATTR_PRESET_MODE: PRESET_ECO},
blocking=True,
)
state = hass.states.get(ENTITY_ECOBEE)
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_ECO
async def test_set_aux_heat_bad_attr(hass):
"""Test setting the auxiliary heater without required attribute."""
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_OFF
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUX_HEAT,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_AUX_HEAT: None},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_OFF
async def test_set_aux_heat_on(hass):
"""Test setting the axillary heater on/true."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUX_HEAT,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_AUX_HEAT: True},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_ON
async def test_set_aux_heat_off(hass):
"""Test setting the auxiliary heater off/false."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUX_HEAT,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_AUX_HEAT: False},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.attributes.get(ATTR_AUX_HEAT) == STATE_OFF
async def test_turn_on(hass):
"""Test turn on device."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_OFF
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_CLIMATE}, blocking=True
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_HEAT
async def test_turn_off(hass):
"""Test turn on device."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_CLIMATE, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_HEAT
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_CLIMATE}, blocking=True
)
state = hass.states.get(ENTITY_CLIMATE)
assert state.state == HVAC_MODE_OFF
|
|
"""Representation of a deCONZ gateway."""
import asyncio
import async_timeout
from pydeconz import DeconzSession, errors
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_ALLOW_CLIP_SENSOR,
CONF_ALLOW_DECONZ_GROUPS,
CONF_ALLOW_NEW_DEVICES,
CONF_MASTER_GATEWAY,
DEFAULT_ALLOW_CLIP_SENSOR,
DEFAULT_ALLOW_DECONZ_GROUPS,
DEFAULT_ALLOW_NEW_DEVICES,
DOMAIN as DECONZ_DOMAIN,
LOGGER,
NEW_GROUP,
NEW_LIGHT,
NEW_SCENE,
NEW_SENSOR,
SUPPORTED_PLATFORMS,
)
from .deconz_event import async_setup_events, async_unload_events
from .errors import AuthenticationRequired, CannotConnect
@callback
def get_gateway_from_config_entry(hass, config_entry):
"""Return gateway with a matching bridge id."""
return hass.data[DECONZ_DOMAIN][config_entry.unique_id]
class DeconzGateway:
"""Manages a single deCONZ gateway."""
def __init__(self, hass, config_entry) -> None:
"""Initialize the system."""
self.hass = hass
self.config_entry = config_entry
self.api = None
self.available = True
self.ignore_state_updates = False
self.deconz_ids = {}
self.entities = {}
self.events = []
self.listeners = []
@property
def bridgeid(self) -> str:
"""Return the unique identifier of the gateway."""
return self.config_entry.unique_id
@property
def host(self) -> str:
"""Return the host of the gateway."""
return self.config_entry.data[CONF_HOST]
@property
def master(self) -> bool:
"""Gateway which is used with deCONZ services without defining id."""
return self.config_entry.options[CONF_MASTER_GATEWAY]
# Options
@property
def option_allow_clip_sensor(self) -> bool:
"""Allow loading clip sensor from gateway."""
return self.config_entry.options.get(
CONF_ALLOW_CLIP_SENSOR, DEFAULT_ALLOW_CLIP_SENSOR
)
@property
def option_allow_deconz_groups(self) -> bool:
"""Allow loading deCONZ groups from gateway."""
return self.config_entry.options.get(
CONF_ALLOW_DECONZ_GROUPS, DEFAULT_ALLOW_DECONZ_GROUPS
)
@property
def option_allow_new_devices(self) -> bool:
"""Allow automatic adding of new devices."""
return self.config_entry.options.get(
CONF_ALLOW_NEW_DEVICES, DEFAULT_ALLOW_NEW_DEVICES
)
# Signals
@property
def signal_reachable(self) -> str:
"""Gateway specific event to signal a change in connection status."""
return f"deconz-reachable-{self.bridgeid}"
@callback
def async_signal_new_device(self, device_type) -> str:
"""Gateway specific event to signal new device."""
new_device = {
NEW_GROUP: f"deconz_new_group_{self.bridgeid}",
NEW_LIGHT: f"deconz_new_light_{self.bridgeid}",
NEW_SCENE: f"deconz_new_scene_{self.bridgeid}",
NEW_SENSOR: f"deconz_new_sensor_{self.bridgeid}",
}
return new_device[device_type]
# Callbacks
@callback
def async_connection_status_callback(self, available) -> None:
"""Handle signals of gateway connection status."""
self.available = available
self.ignore_state_updates = False
async_dispatcher_send(self.hass, self.signal_reachable, True)
@callback
def async_add_device_callback(
self, device_type, device=None, force: bool = False
) -> None:
"""Handle event of new device creation in deCONZ."""
if not force and not self.option_allow_new_devices:
return
args = []
if device is not None and not isinstance(device, list):
args.append([device])
async_dispatcher_send(
self.hass,
self.async_signal_new_device(device_type),
*args, # Don't send device if None, it would override default value in listeners
)
async def async_update_device_registry(self) -> None:
"""Update device registry."""
device_registry = await self.hass.helpers.device_registry.async_get_registry()
# Host device
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
connections={(CONNECTION_NETWORK_MAC, self.api.config.mac)},
)
# Gateway service
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
identifiers={(DECONZ_DOMAIN, self.api.config.bridgeid)},
manufacturer="Dresden Elektronik",
model=self.api.config.modelid,
name=self.api.config.name,
sw_version=self.api.config.swversion,
via_device=(CONNECTION_NETWORK_MAC, self.api.config.mac),
)
async def async_setup(self) -> bool:
"""Set up a deCONZ gateway."""
try:
self.api = await get_gateway(
self.hass,
self.config_entry.data,
self.async_add_device_callback,
self.async_connection_status_callback,
)
except CannotConnect as err:
raise ConfigEntryNotReady from err
except AuthenticationRequired:
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DECONZ_DOMAIN,
context={"source": SOURCE_REAUTH},
data=self.config_entry.data,
)
)
return False
for component in SUPPORTED_PLATFORMS:
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, component
)
)
await async_setup_events(self)
self.api.start()
self.config_entry.add_update_listener(self.async_config_entry_updated)
return True
@staticmethod
async def async_config_entry_updated(hass, entry) -> None:
"""Handle signals of config entry being updated.
This is a static method because a class method (bound method), can not be used with weak references.
Causes for this is either discovery updating host address or config entry options changing.
"""
gateway = get_gateway_from_config_entry(hass, entry)
if gateway.api.host != gateway.host:
gateway.api.close()
gateway.api.host = gateway.host
gateway.api.start()
return
await gateway.options_updated()
async def options_updated(self):
"""Manage entities affected by config entry options."""
deconz_ids = []
if self.option_allow_clip_sensor:
self.async_add_device_callback(NEW_SENSOR)
else:
deconz_ids += [
sensor.deconz_id
for sensor in self.api.sensors.values()
if sensor.type.startswith("CLIP")
]
if self.option_allow_deconz_groups:
self.async_add_device_callback(NEW_GROUP)
else:
deconz_ids += [group.deconz_id for group in self.api.groups.values()]
entity_registry = await self.hass.helpers.entity_registry.async_get_registry()
for entity_id, deconz_id in self.deconz_ids.items():
if deconz_id in deconz_ids and entity_registry.async_is_registered(
entity_id
):
# Removing an entity from the entity registry will also remove them
# from Home Assistant
entity_registry.async_remove(entity_id)
@callback
def shutdown(self, event) -> None:
"""Wrap the call to deconz.close.
Used as an argument to EventBus.async_listen_once.
"""
self.api.close()
async def async_reset(self):
"""Reset this gateway to default state."""
self.api.async_connection_status_callback = None
self.api.close()
for component in SUPPORTED_PLATFORMS:
await self.hass.config_entries.async_forward_entry_unload(
self.config_entry, component
)
for unsub_dispatcher in self.listeners:
unsub_dispatcher()
self.listeners = []
async_unload_events(self)
self.deconz_ids = {}
return True
async def get_gateway(
hass, config, async_add_device_callback, async_connection_status_callback
) -> DeconzSession:
"""Create a gateway object and verify configuration."""
session = aiohttp_client.async_get_clientsession(hass)
deconz = DeconzSession(
session,
config[CONF_HOST],
config[CONF_PORT],
config[CONF_API_KEY],
async_add_device=async_add_device_callback,
connection_status=async_connection_status_callback,
)
try:
with async_timeout.timeout(10):
await deconz.initialize()
return deconz
except errors.Unauthorized as err:
LOGGER.warning("Invalid key for deCONZ at %s", config[CONF_HOST])
raise AuthenticationRequired from err
except (asyncio.TimeoutError, errors.RequestError) as err:
LOGGER.error("Error connecting to deCONZ gateway at %s", config[CONF_HOST])
raise CannotConnect from err
|
|
#!/usr/bin/env python
# coding=utf-8
"""
Edit a pdb file to provide missing data
"""
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
from md_utils.md_common import list_to_file, InvalidDataError, warning, process_cfg, create_out_fname, read_csv_dict, \
print_qm_kind, create_element_dict, print_qm_links, list_to_csv
try:
# noinspection PyCompatibility
from ConfigParser import ConfigParser
except ImportError:
# noinspection PyCompatibility
from configparser import ConfigParser
__author__ = 'hmayes'
# Error Codes
# The good status code
GOOD_RET = 0
INPUT_ERROR = 1
IO_ERROR = 2
INVALID_DATA = 3
# Constants #
# Config File Sections
MAIN_SEC = 'main'
# Config keys
PDB_FILE = 'pdb_file'
PDB_NEW_FILE = 'new_pdb_name'
ATOM_REORDER_FILE = 'atom_reorder_old_new_file'
MOL_RENUM_FILE = 'mol_renum_old_new_file'
RENUM_MOL = 'mol_renum_flag'
# TODO: if desired, make a option to add a chain label
# CHAIN_LABEL = 'chain_label_flag'
FIRST_ADD_ELEM = 'first_atom_add_element'
LAST_ADD_ELEM = 'last_atom_add_element'
FIRST_WAT_ID = 'first_wat_atom'
LAST_WAT_ID = 'last_wat_atom'
ADD_ELEMENTS = 'add_element_types'
ELEMENT_DICT_FILE = 'atom_type_element_dict_file'
OUT_BASE_DIR = 'output_directory'
RESID_QMMM = 'resids_qmmm_ca_cb_link'
# PDB file info
PDB_LINE_TYPE_LAST_CHAR = 'pdb_line_type_last_char'
PDB_ATOM_NUM_LAST_CHAR = 'pdb_atom_num_last_char'
PDB_ATOM_TYPE_LAST_CHAR = 'pdb_atom_type_last_char'
PDB_RES_TYPE_LAST_CHAR = 'pdb_res_type_last_char'
PDB_MOL_NUM_LAST_CHAR = 'pdb_mol_num_last_char'
PDB_X_LAST_CHAR = 'pdb_x_last_char'
PDB_Y_LAST_CHAR = 'pdb_y_last_char'
PDB_Z_LAST_CHAR = 'pdb_z_last_char'
PDB_LAST_T_CHAR = 'pdb_last_temp_char'
PDB_LAST_ELEM_CHAR = 'pdb_last_element_char'
PDB_FORMAT = 'pdb_print_format'
# Defaults
DEF_CFG_FILE = 'pdb_edit.ini'
DEF_ELEM_DICT_FILE = os.path.join(os.path.dirname(__file__), 'cfg', 'charmm36_atoms_elements.txt')
DEF_CFG_VALS = {ATOM_REORDER_FILE: None,
MOL_RENUM_FILE: None,
ELEMENT_DICT_FILE: None,
RENUM_MOL: False,
FIRST_ADD_ELEM: 1,
LAST_ADD_ELEM: np.inf,
FIRST_WAT_ID: np.nan,
LAST_WAT_ID: np.nan,
OUT_BASE_DIR: None,
PDB_NEW_FILE: None,
PDB_FORMAT: '{:6s}{:>5}{:^6s}{:5s}{:>4} {:8.3f}{:8.3f}{:8.3f}{:22s}{:>2s}{:s}',
PDB_LINE_TYPE_LAST_CHAR: 6,
PDB_ATOM_NUM_LAST_CHAR: 11,
PDB_ATOM_TYPE_LAST_CHAR: 17,
PDB_RES_TYPE_LAST_CHAR: 22,
PDB_MOL_NUM_LAST_CHAR: 28,
PDB_X_LAST_CHAR: 38,
PDB_Y_LAST_CHAR: 46,
PDB_Z_LAST_CHAR: 54,
PDB_LAST_T_CHAR: 76,
PDB_LAST_ELEM_CHAR: 78,
ADD_ELEMENTS: False,
RESID_QMMM: []
}
REQ_KEYS = {PDB_FILE: str,
}
HEAD_CONTENT = 'head_content'
ATOMS_CONTENT = 'atoms_content'
TAIL_CONTENT = 'tail_content'
# This is used when need to add atom types to PDB file
C_ATOMS = ' C'
O_ATOMS = ' O'
H_ATOMS = ' H'
N_ATOMS = ' N'
P_ATOMS = ' P'
S_ATOMS = ' S'
CL_ATOMS = 'CL'
NA_ATOMS = 'NA'
K_ATOMS = ' K'
LI_ATOMS = 'LI'
MG_ATOMS = 'MG'
CA_ATOMS = 'CA'
RB_ATOMS = 'RB'
CS_ATOMS = 'CS'
BA_ATOMS = 'BA'
ZN_ATOMS = 'ZN'
CD_ATOMS = 'CD'
# Atom types; used for making QMMM input
C_ALPHA = ' CA '
C_BETA = ' CB '
SKIP_ATOM_TYPES = [' C ', ' O ', ' NT ', ' HNT ', ' CAT ', ' HT1 ', ' HT2 ', ' HT3 ', ' HA ', ' CAY ',
' HY1 ', ' HY2 ', ' HY3 ', ' CY ', ' OY ', ' N ', ' HN ', ]
def read_cfg(f_loc, cfg_proc=process_cfg):
"""
Reads the given configuration file, returning a dict with the converted values supplemented by default values.
:param f_loc: The location of the file to read.
:param cfg_proc: The processor to use for the raw configuration values. Uses default values when the raw
value is missing.
:return: A dict of the processed configuration file's data.
"""
config = ConfigParser()
good_files = config.read(f_loc)
if not good_files:
raise IOError('Could not read file: {}'.format(f_loc))
main_proc = cfg_proc(dict(config.items(MAIN_SEC)), DEF_CFG_VALS, REQ_KEYS)
# Assume that elements should be added if a dict file is given
if (main_proc[ADD_ELEMENTS] or len(main_proc[RESID_QMMM]) > 0) and main_proc[ELEMENT_DICT_FILE] is None:
main_proc[ELEMENT_DICT_FILE] = DEF_ELEM_DICT_FILE
if main_proc[ELEMENT_DICT_FILE] is not None:
main_proc[ADD_ELEMENTS] = True
return main_proc
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(description='Creates a new version of a pdb file. Atoms will be numbered '
'starting from one. Options include renumbering molecules.')
parser.add_argument("-c", "--config", help="The location of the configuration file in ini format. "
"The default file name is {}, located in the "
"base directory where the program as run.".format(DEF_CFG_FILE),
default=DEF_CFG_FILE, type=read_cfg)
args = None
try:
args = parser.parse_args(argv)
except IOError as e:
warning(e)
parser.print_help()
return args, IO_ERROR
except (KeyError, InvalidDataError, SystemExit) as e:
if hasattr(e, 'code') and e.code == 0:
return args, GOOD_RET
warning("Input data missing:", e)
parser.print_help()
return args, INPUT_ERROR
return args, GOOD_RET
def pdb_atoms_to_file(pdb_format, list_val, fname, mode='w'):
"""
Writes the list of sequences to the given file in the specified format for a PDB
@param pdb_format: provides correct formatting
@param list_val: The list of sequences to write.
@param fname: The location of the file to write.
@param mode: default is to write; can allow to append
"""
with open(fname, mode) as w_file:
for line in list_val:
w_file.write(pdb_format.format(*line) + '\n')
def print_pdb(head_data, atoms_data, tail_data, file_name, file_format):
list_to_file(head_data, file_name)
pdb_atoms_to_file(file_format, atoms_data, file_name, mode='a')
list_to_file(tail_data, file_name, mode='a', print_message=False)
def process_pdb(cfg, atom_num_dict, mol_num_dict, element_dict):
pdb_loc = cfg[PDB_FILE]
pdb_data = {HEAD_CONTENT: [], ATOMS_CONTENT: [], TAIL_CONTENT: []}
# to allow warning to be printed once and only once
missing_types = []
qmmm_elem_id_dict = {}
ca_res_atom_id_dict = {}
cb_res_atom_id_dict = {}
atoms_for_vmd = []
with open(pdb_loc) as f:
wat_count = 0
atom_count = 0
mol_count = 1
current_mol = None
last_mol_num = None
atoms_content = []
for line in f:
line = line.strip()
line_len = len(line)
if line_len == 0:
continue
line_head = line[:cfg[PDB_LINE_TYPE_LAST_CHAR]]
# head_content to contain Everything before 'Atoms' section
# also capture the number of atoms
if line_head == 'REMARK' or line_head == 'CRYST1':
pdb_data[HEAD_CONTENT].append(line)
# atoms_content to contain everything but the xyz
elif line_head == 'ATOM ':
# My template PDB has ***** after atom_count 99999. Thus, I'm renumbering. Otherwise, this this:
# atom_num = line[cfg[PDB_LINE_TYPE_LAST_CHAR]:cfg[PDB_ATOM_NUM_LAST_CHAR]]
# For renumbering, making sure prints in the correct format, including num of characters:
atom_count += 1
# For reordering atoms
if atom_count in atom_num_dict:
atom_id = atom_num_dict[atom_count]
else:
atom_id = atom_count
if atom_id > 99999:
atom_num = format(atom_id, 'x')
if len(atom_num) > 5:
warning("Hex representation of {} is {}, which is greater than 5 characters. This"
"will affect the PDB output formatting.".format(atom_id, atom_num))
else:
atom_num = '{:5d}'.format(atom_id)
atom_type = line[cfg[PDB_ATOM_NUM_LAST_CHAR]:cfg[PDB_ATOM_TYPE_LAST_CHAR]]
res_type = line[cfg[PDB_ATOM_TYPE_LAST_CHAR]:cfg[PDB_RES_TYPE_LAST_CHAR]]
mol_num = int(line[cfg[PDB_RES_TYPE_LAST_CHAR]:cfg[PDB_MOL_NUM_LAST_CHAR]])
pdb_x = float(line[cfg[PDB_MOL_NUM_LAST_CHAR]:cfg[PDB_X_LAST_CHAR]])
pdb_y = float(line[cfg[PDB_X_LAST_CHAR]:cfg[PDB_Y_LAST_CHAR]])
pdb_z = float(line[cfg[PDB_Y_LAST_CHAR]:cfg[PDB_Z_LAST_CHAR]])
occ_t = line[cfg[PDB_Z_LAST_CHAR]:cfg[PDB_LAST_T_CHAR]]
element = line[cfg[PDB_LAST_T_CHAR]:cfg[PDB_LAST_ELEM_CHAR]]
last_cols = line[cfg[PDB_LAST_ELEM_CHAR]:]
# For user-specified changing of molecule number
if mol_num in mol_num_dict:
mol_num = mol_num_dict[mol_num]
# If doing water molecule checking...
if cfg[FIRST_WAT_ID] <= atom_count <= cfg[LAST_WAT_ID]:
if (wat_count % 3) == 0:
current_mol = mol_num
if atom_type != ' OH2 ':
warning('Expected an OH2 atom to be the first atom of a water molecule. '
'Check line: {}'.format(line))
# last_cols = ' 0.00 0.00 S2 O'
else:
if current_mol != mol_num:
warning('Water not in order on line:', line)
if (wat_count % 3) == 1:
if atom_type != ' H1 ':
warning('Expected an H1 atom to be the second atom of a water molecule. '
'Check line: {}'.format(line))
else:
if atom_type != ' H2 ':
warning('Expected an H2 atom to be the second atom of a water molecule. '
'Check line: {}'.format(line))
wat_count += 1
if mol_num in cfg[RESID_QMMM] and atom_type not in SKIP_ATOM_TYPES:
if atom_type == C_ALPHA:
ca_res_atom_id_dict[mol_num] = atom_id
else:
if atom_type == C_BETA:
cb_res_atom_id_dict[mol_num] = atom_id
if atom_type in element_dict:
element = element_dict[atom_type]
else:
raise InvalidDataError("Did not find atom type '{}' in the element dictionary. Please "
"provide a new atom type, element dictionary (using keyword {} "
"in the configuration file) that includes all atom types in the "
"residues identified with the '{}' key."
"".format(atom_type, ELEMENT_DICT_FILE, RESID_QMMM))
if element in qmmm_elem_id_dict:
qmmm_elem_id_dict[element].append(atom_id)
else:
qmmm_elem_id_dict[element] = [atom_id]
atoms_for_vmd.append(atom_id - 1)
if cfg[ADD_ELEMENTS] and atom_count <= cfg[LAST_ADD_ELEM]:
if atom_type in element_dict:
element = element_dict[atom_type]
else:
if atom_type not in missing_types:
warning("Please add atom type '{}' to dictionary of elements. Will not write/overwrite "
"element type in the pdb output.".format(atom_type))
missing_types.append(atom_type)
# For numbering molecules from 1 to end
if cfg[RENUM_MOL]:
if last_mol_num is None:
last_mol_num = mol_num
if mol_num != last_mol_num:
last_mol_num = mol_num
mol_count += 1
if mol_count == 10000:
warning("Molecule numbers greater than 9999 will be printed in hex")
# Due to PDB format constraints, need to print in hex starting at 9999 molecules.
if mol_count > 9999:
mol_num = format(mol_count, 'x')
if len(mol_num) > 4:
warning("Hex representation of {} is {}, which is greater than 4 characters. This"
"will affect the PDB output formatting.".format(atom_id, atom_num))
else:
mol_num = '{:4d}'.format(mol_count)
line_struct = [line_head, atom_num, atom_type, res_type, mol_num, pdb_x, pdb_y, pdb_z,
occ_t, element, last_cols]
atoms_content.append(line_struct)
# tail_content to contain everything after the 'Atoms' section
else:
pdb_data[TAIL_CONTENT].append(line)
# Only sort if there is renumbering
if len(atom_num_dict) > 0:
pdb_data[ATOMS_CONTENT] = sorted(atoms_content, key=lambda entry: entry[1])
else:
pdb_data[ATOMS_CONTENT] = atoms_content
if cfg[PDB_NEW_FILE] is None:
f_name = create_out_fname(cfg[PDB_FILE], suffix="_new", base_dir=cfg[OUT_BASE_DIR])
else:
f_name = create_out_fname(cfg[PDB_NEW_FILE], base_dir=cfg[OUT_BASE_DIR])
print_pdb(pdb_data[HEAD_CONTENT], pdb_data[ATOMS_CONTENT], pdb_data[TAIL_CONTENT],
f_name, cfg[PDB_FORMAT])
if len(cfg[RESID_QMMM]) > 0:
f_name = create_out_fname('amino_id.dat', base_dir=cfg[OUT_BASE_DIR])
print_mode = "w"
for elem in sorted(qmmm_elem_id_dict):
print_qm_kind(qmmm_elem_id_dict[elem], elem, f_name, mode=print_mode)
print_mode = 'a'
print_qm_links(ca_res_atom_id_dict, cb_res_atom_id_dict, f_name, mode=print_mode)
f_name = create_out_fname('vmd_protein_atoms.dat', base_dir=cfg[OUT_BASE_DIR])
list_to_csv([atoms_for_vmd], f_name, delimiter=' ')
def main(argv=None):
# Read input
args, ret = parse_cmdline(argv)
if ret != GOOD_RET or args is None:
return ret
cfg = args.config
# Read and process pdb files
try:
atom_num_dict = read_csv_dict(cfg[ATOM_REORDER_FILE])
mol_num_dict = read_csv_dict(cfg[MOL_RENUM_FILE], one_to_one=False)
element_dict = create_element_dict(cfg[ELEMENT_DICT_FILE])
process_pdb(cfg, atom_num_dict, mol_num_dict, element_dict)
except IOError as e:
warning("Problems reading file:", e)
return IO_ERROR
except (InvalidDataError, ValueError) as e:
warning("Problems with input:", e)
return INVALID_DATA
return GOOD_RET # success
if __name__ == '__main__':
status = main()
sys.exit(status)
|
|
"""
Tagging utilities - from user tag input parsing to tag cloud
calculation.
"""
import math
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
# Font size distribution algorithms
LOGARITHMIC, LINEAR = 1, 2
def parse_tag_input(input):
"""
Parses tag input, with multiple word input being activated and
delineated by commas and double quotes. Quotes take precedence, so
they may contain commas.
Returns a sorted list of unique tag names.
"""
if not input:
return []
input = force_text(input)
# Special case - if there are no commas or double quotes in the
# input, we don't *do* a recall... I mean, we know we only need to
# split on spaces.
if ',' not in input and '"' not in input:
words = list(set(split_strip(input, ' ')))
words.sort()
return words
words = []
buffer = []
# Defer splitting of non-quoted sections until we know if there are
# any unquoted commas.
to_be_split = []
saw_loose_comma = False
open_quote = False
i = iter(input)
try:
while 1:
c = next(i)
if c == '"':
if buffer:
to_be_split.append(''.join(buffer))
buffer = []
# Find the matching quote
open_quote = True
c = next(i)
while c != '"':
buffer.append(c)
c = next(i)
if buffer:
word = ''.join(buffer).strip()
if word:
words.append(word)
buffer = []
open_quote = False
else:
if not saw_loose_comma and c == ',':
saw_loose_comma = True
buffer.append(c)
except StopIteration:
# If we were parsing an open quote which was never closed treat
# the buffer as unquoted.
if buffer:
if open_quote and ',' in buffer:
saw_loose_comma = True
to_be_split.append(''.join(buffer))
if to_be_split:
if saw_loose_comma:
delimiter = ','
else:
delimiter = ' '
for chunk in to_be_split:
words.extend(split_strip(chunk, delimiter))
words = list(set(words))
words.sort()
return words
def split_strip(input, delimiter=','):
"""
Splits ``input`` on ``delimiter``, stripping each resulting string
and returning a list of non-empty strings.
"""
if not input:
return []
words = [w.strip() for w in input.split(delimiter)]
return [w for w in words if w]
def edit_string_for_tags(tags):
"""
Given list of ``Tag`` instances, creates a string representation of
the list suitable for editing by the user, such that submitting the
given string representation back without changing it will give the
same list of tags.
Tag names which contain commas will be double quoted.
If any tag name which isn't being quoted contains whitespace, the
resulting string of tag names will be comma-delimited, otherwise
it will be space-delimited.
"""
names = []
use_commas = False
for tag in tags:
name = tag.name
if ',' in name:
names.append('"%s"' % name)
continue
elif ' ' in name:
if not use_commas:
use_commas = True
names.append(name)
if use_commas:
glue = ', '
else:
glue = ' '
return glue.join(names)
def get_queryset_and_model(queryset_or_model):
"""
Given a ``QuerySet`` or a ``Model``, returns a two-tuple of
(queryset, model).
If a ``Model`` is given, the ``QuerySet`` returned will be created
using its default manager.
"""
try:
return queryset_or_model, queryset_or_model.model
except AttributeError:
return queryset_or_model._default_manager.all(), queryset_or_model
def get_tag_list(tags):
"""
Utility function for accepting tag input in a flexible manner.
If a ``Tag`` object is given, it will be returned in a list as
its single occupant.
If given, the tag names in the following will be used to create a
``Tag`` ``QuerySet``:
* A string, which may contain multiple tag names.
* A list or tuple of strings corresponding to tag names.
* A list or tuple of integers corresponding to tag ids.
If given, the following will be returned as-is:
* A list or tuple of ``Tag`` objects.
* A ``Tag`` ``QuerySet``.
"""
from tagging.models import Tag
if isinstance(tags, Tag):
return [tags]
elif isinstance(tags, QuerySet) and tags.model is Tag:
return tags
elif isinstance(tags, six.string_types):
return Tag.objects.filter(name__in=parse_tag_input(tags))
elif isinstance(tags, (list, tuple)):
if len(tags) == 0:
return tags
contents = set()
for item in tags:
if isinstance(item, six.string_types):
contents.add('string')
elif isinstance(item, Tag):
contents.add('tag')
elif isinstance(item, six.integer_types):
contents.add('int')
if len(contents) == 1:
if 'string' in contents:
return Tag.objects.filter(name__in=[force_text(tag) \
for tag in tags])
elif 'tag' in contents:
return tags
elif 'int' in contents:
return Tag.objects.filter(id__in=tags)
else:
raise ValueError(_('If a list or tuple of tags is provided, they must all be tag names, Tag objects or Tag ids.'))
else:
raise ValueError(_('The tag input given was invalid.'))
def get_tag(tag):
"""
Utility function for accepting single tag input in a flexible
manner.
If a ``Tag`` object is given it will be returned as-is; if a
string or integer are given, they will be used to lookup the
appropriate ``Tag``.
If no matching tag can be found, ``None`` will be returned.
"""
from tagging.models import Tag
if isinstance(tag, Tag):
return tag
try:
if isinstance(tag, six.string_types):
return Tag.objects.get(name=tag)
elif isinstance(tag, six.integer_types):
return Tag.objects.get(id=tag)
except Tag.DoesNotExist:
pass
return None
def _calculate_thresholds(min_weight, max_weight, steps):
delta = (max_weight - min_weight) / float(steps)
return [min_weight + i * delta for i in range(1, steps + 1)]
def _calculate_tag_weight(weight, max_weight, distribution):
"""
Logarithmic tag weight calculation is based on code from the
`Tag Cloud`_ plugin for Mephisto, by Sven Fuchs.
.. _`Tag Cloud`: http://www.artweb-design.de/projects/mephisto-plugin-tag-cloud
"""
if distribution == LINEAR or max_weight == 1:
return weight
elif distribution == LOGARITHMIC:
return math.log(weight) * max_weight / math.log(max_weight)
raise ValueError(_('Invalid distribution algorithm specified: %s.') % distribution)
def calculate_cloud(tags, steps=4, distribution=LOGARITHMIC):
"""
Add a ``font_size`` attribute to each tag according to the
frequency of its use, as indicated by its ``count``
attribute.
``steps`` defines the range of font sizes - ``font_size`` will
be an integer between 1 and ``steps`` (inclusive).
``distribution`` defines the type of font size distribution
algorithm which will be used - logarithmic or linear. It must be
one of ``tagging.utils.LOGARITHMIC`` or ``tagging.utils.LINEAR``.
"""
if len(tags) > 0:
counts = [tag.count for tag in tags]
min_weight = float(min(counts))
max_weight = float(max(counts))
thresholds = _calculate_thresholds(min_weight, max_weight, steps)
for tag in tags:
font_set = False
tag_weight = _calculate_tag_weight(tag.count, max_weight, distribution)
for i in range(steps):
if not font_set and tag_weight <= thresholds[i]:
tag.font_size = i + 1
font_set = True
return tags
|
|
# Copyright 2016-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unittest
from itertools import chain
from pathlib import Path
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.dependencies.factory
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
MachineChoice, OptionKey
)
from mesonbuild.compilers import (
detect_c_compiler, detect_cpp_compiler
)
import mesonbuild.modules.pkgconfig
from run_tests import (
FakeBuild, get_fake_env
)
from .helpers import *
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open(encoding='utf-8') as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = detect_c_compiler(env, MachineChoice.HOST)
cpp = detect_cpp_compiler(env, MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options():
self.assertIn(str(opt), md)
for opt in comp.base_options:
self.assertIn(str(opt), md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError(f'Could not find "{name}" heading')
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
subcontent3 = self._get_section_content("Module options", sections, md)
for subcontent in (subcontent1, subcontent2, subcontent3):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, {
*(str(k.evolve(module=None)) for k in mesonbuild.coredata.BUILTIN_OPTIONS),
*(str(k.evolve(module=None)) for k in mesonbuild.coredata.BUILTIN_OPTIONS_PER_MACHINE),
})
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError(f'Invalid debug value {debug!r} in row:\n{m.group()}')
env.coredata.set_option(OptionKey('buildtype'), buildtype)
self.assertEqual(env.coredata.options[OptionKey('buildtype')].value, buildtype)
self.assertEqual(env.coredata.options[OptionKey('optimization')].value, opt)
self.assertEqual(env.coredata.options[OptionKey('debug')].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions and not f.startswith('_include'):
self.assertIn(f, toc)
def test_modules_in_navbar(self):
'''
Test that each module is referenced in navbar_links.html
'''
with open("docs/theme/extra/templates/navbar_links.html", encoding='utf-8') as f:
html = f.read().lower()
self.assertIsNotNone(html)
for f in Path('mesonbuild/modules').glob('*.py'):
if f.name in {'modtest.py', 'qt.py', '__init__.py'}:
continue
name = f'{f.stem}-module.html'
name = name.replace('unstable_', '')
name = name.replace('python3', 'python-3')
name = name.replace('_', '-')
self.assertIn(name, html)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim', encoding='utf-8') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
|
|
from __future__ import print_function, division
import inspect
from sympy.core.cache import cacheit
from sympy.core.singleton import S
from sympy.core.sympify import _sympify
from sympy.logic.boolalg import Boolean
from sympy.utilities.source import get_class
from contextlib import contextmanager
class AssumptionsContext(set):
"""Set representing assumptions.
This is used to represent global assumptions, but you can also use this
class to create your own local assumptions contexts. It is basically a thin
wrapper to Python's set, so see its documentation for advanced usage.
Examples
========
>>> from sympy import AppliedPredicate, Q
>>> from sympy.assumptions.assume import global_assumptions
>>> global_assumptions
AssumptionsContext()
>>> from sympy.abc import x
>>> global_assumptions.add(Q.real(x))
>>> global_assumptions
AssumptionsContext({Q.real(x)})
>>> global_assumptions.remove(Q.real(x))
>>> global_assumptions
AssumptionsContext()
>>> global_assumptions.clear()
"""
def add(self, *assumptions):
"""Add an assumption."""
for a in assumptions:
super(AssumptionsContext, self).add(a)
def _sympystr(self, printer):
if not self:
return "%s()" % self.__class__.__name__
return "%s(%s)" % (self.__class__.__name__, printer._print_set(self))
global_assumptions = AssumptionsContext()
class AppliedPredicate(Boolean):
"""The class of expressions resulting from applying a Predicate.
Examples
========
>>> from sympy import Q, Symbol
>>> x = Symbol('x')
>>> Q.integer(x)
Q.integer(x)
>>> type(Q.integer(x))
<class 'sympy.assumptions.assume.AppliedPredicate'>
"""
__slots__ = []
def __new__(cls, predicate, arg):
arg = _sympify(arg)
return Boolean.__new__(cls, predicate, arg)
is_Atom = True # do not attempt to decompose this
@property
def arg(self):
"""
Return the expression used by this assumption.
Examples
========
>>> from sympy import Q, Symbol
>>> x = Symbol('x')
>>> a = Q.integer(x + 1)
>>> a.arg
x + 1
"""
return self._args[1]
@property
def args(self):
return self._args[1:]
@property
def func(self):
return self._args[0]
@cacheit
def sort_key(self, order=None):
return (self.class_key(), (2, (self.func.name, self.arg.sort_key())),
S.One.sort_key(), S.One)
def __eq__(self, other):
if type(other) is AppliedPredicate:
return self._args == other._args
return False
def __hash__(self):
return super(AppliedPredicate, self).__hash__()
def _eval_ask(self, assumptions):
return self.func.eval(self.arg, assumptions)
@property
def binary_symbols(self):
from sympy.core.relational import Eq, Ne
if self.func.name in ['is_true', 'is_false']:
i = self.arg
if i.is_Boolean or i.is_Symbol or isinstance(i, (Eq, Ne)):
return i.binary_symbols
return set()
class Predicate(Boolean):
"""A predicate is a function that returns a boolean value.
Predicates merely wrap their argument and remain unevaluated:
>>> from sympy import Q, ask
>>> type(Q.prime)
<class 'sympy.assumptions.assume.Predicate'>
>>> Q.prime.name
'prime'
>>> Q.prime(7)
Q.prime(7)
>>> _.func.name
'prime'
To obtain the truth value of an expression containing predicates, use
the function ``ask``:
>>> ask(Q.prime(7))
True
The tautological predicate ``Q.is_true`` can be used to wrap other objects:
>>> from sympy.abc import x
>>> Q.is_true(x > 1)
Q.is_true(x > 1)
"""
is_Atom = True
def __new__(cls, name, handlers=None):
obj = Boolean.__new__(cls)
obj.name = name
obj.handlers = handlers or []
return obj
def _hashable_content(self):
return (self.name,)
def __getnewargs__(self):
return (self.name,)
def __call__(self, expr):
return AppliedPredicate(self, expr)
def add_handler(self, handler):
self.handlers.append(handler)
def remove_handler(self, handler):
self.handlers.remove(handler)
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One
def eval(self, expr, assumptions=True):
"""
Evaluate self(expr) under the given assumptions.
This uses only direct resolution methods, not logical inference.
"""
res, _res = None, None
mro = inspect.getmro(type(expr))
for handler in self.handlers:
cls = get_class(handler)
for subclass in mro:
eval_ = getattr(cls, subclass.__name__, None)
if eval_ is None:
continue
res = eval_(expr, assumptions)
# Do not stop if value returned is None
# Try to check for higher classes
if res is None:
continue
if _res is None:
_res = res
elif res is None:
# since first resolutor was conclusive, we keep that value
res = _res
else:
# only check consistency if both resolutors have concluded
if _res != res:
raise ValueError('incompatible resolutors')
break
return res
@contextmanager
def assuming(*assumptions):
""" Context manager for assumptions
Examples
========
>>> from sympy.assumptions import assuming, Q, ask
>>> from sympy.abc import x, y
>>> print(ask(Q.integer(x + y)))
None
>>> with assuming(Q.integer(x), Q.integer(y)):
... print(ask(Q.integer(x + y)))
True
"""
old_global_assumptions = global_assumptions.copy()
global_assumptions.update(assumptions)
try:
yield
finally:
global_assumptions.clear()
global_assumptions.update(old_global_assumptions)
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Decode the trained Attention outputs (TIMIT corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, abspath
import sys
import tensorflow as tf
import yaml
import argparse
sys.path.append(abspath('../../../'))
from experiments.timit.data.load_dataset_attention import Dataset
from models.attention.attention_seq2seq import AttentionSeq2Seq
from utils.io.labels.character import Idx2char
from utils.io.labels.phone import Idx2phone
from utils.evaluation.edit_distance import wer_align
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=-1,
help='the epoch to restore')
parser.add_argument('--model_path', type=str,
help='path to the model to evaluate')
parser.add_argument('--beam_width', type=int, default=20,
help='beam_width (int, optional): beam width for beam search.' +
' 1 disables beam search, which mean greedy decoding.')
parser.add_argument('--eval_batch_size', type=str, default=1,
help='the size of mini-batch in evaluation')
def do_decode(model, params, epoch, beam_width, eval_batch_size):
"""Decode the Attention outputs.
Args:
model: the model to restore
params (dict): A dictionary of parameters
epoch (int): the epoch to restore
beam_width (int): beam width for beam search.
1 disables beam search, which mean greedy decoding.
eval_batch_size (int): the size of mini-batch when evaluation
"""
map_file_path = '../metrics/mapping_files/' + \
params['label_type'] + '.txt'
# Load dataset
test_data = Dataset(
data_type='test', label_type=params['label_type'],
batch_size=eval_batch_size, map_file_path=map_file_path,
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
shuffle=False, progressbar=True)
# Define placeholders
model.create_placeholders()
# Add to the graph each operation (including model definition)
_, _, decoder_outputs_train, decoder_outputs_infer = model.compute_loss(
model.inputs_pl_list[0],
model.labels_pl_list[0],
model.inputs_seq_len_pl_list[0],
model.labels_seq_len_pl_list[0],
model.keep_prob_encoder_pl_list[0],
model.keep_prob_decoder_pl_list[0],
model.keep_prob_embedding_pl_list[0])
_, decode_op_infer = model.decode(
decoder_outputs_train,
decoder_outputs_infer)
# Create a saver for writing training checkpoints
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(model.save_path)
# If check point exists
if ckpt:
model_path = ckpt.model_checkpoint_path
if epoch != -1:
model_path = model_path.split('/')[:-1]
model_path = '/'.join(model_path) + '/model.ckpt-' + str(epoch)
saver.restore(sess, model_path)
print("Model restored: " + model_path)
else:
raise ValueError('There are not any checkpoints.')
# Visualize
decode(session=sess,
decode_op=decode_op_infer,
model=model,
dataset=test_data,
label_type=params['label_type'],
is_test=True,
save_path=None)
# save_path=model.save_path)
def decode(session, decode_op, model, dataset, label_type,
is_test=False, save_path=None):
"""Visualize label outputs of Attention-based model.
Args:
session: session of training model
decode_op: operation for decoding
model: the model to evaluate
dataset: An instance of a `Dataset` class
label_type (string): phone39 or phone48 or phone61 or character or
character_capital_divide
is_test (bool, optional):
save_path (string): path to save decoding results
"""
if label_type == 'character':
map_fn = Idx2char(
map_file_path='../metrics/mapping_files/character.txt')
elif label_type == 'character_capital_divide':
map_fn = Idx2char(
map_file_path='../metrics/mapping_files/character_capital_divide.txt',
capital_divide=True)
else:
map_fn = Idx2phone(
map_file_path='../metrics/mapping_files/' + label_type + '.txt')
if save_path is not None:
sys.stdout = open(join(model.model_dir, 'decode.txt'), 'w')
for data, is_new_epoch in dataset:
# Create feed dictionary for next mini batch
inputs, labels_true, inputs_seq_len, labels_seq_len, input_names = data
feed_dict = {
model.inputs_pl_list[0]: inputs[0],
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.keep_prob_encoder_pl_list[0]: 1.0,
model.keep_prob_decoder_pl_list[0]: 1.0,
model.keep_prob_embedding_pl_list[0]: 1.0
}
batch_size = inputs[0].shape[0]
labels_pred = session.run(decode_op, feed_dict=feed_dict)
for i_batch in range(batch_size):
print('----- wav: %s -----' % input_names[0][i_batch])
if is_test:
str_true = labels_true[0][i_batch][0]
else:
str_true = map_fn(
labels_true[0][i_batch][1:labels_seq_len[0][i_batch] - 1])
# NOTE: Exclude <SOS> and <EOS>
str_pred = map_fn(labels_pred[i_batch]).split('>')[0]
# NOTE: Trancate by <EOS>
if 'phone' in label_type:
# Remove the last space
if str_pred[-1] == ' ':
str_pred = str_pred[:-1]
print('Ref: %s' % str_true)
print('Hyp: %s' % str_pred)
if is_new_epoch:
break
def main():
args = parser.parse_args()
# Load config file
with open(join(args.model_path, 'config.yml'), "r") as f:
config = yaml.load(f)
params = config['param']
# Except for a <SOS> and <EOS> class
if params['label_type'] == 'phone61':
params['num_classes'] = 61
elif params['label_type'] == 'phone48':
params['num_classes'] = 48
elif params['label_type'] == 'phone39':
params['num_classes'] = 39
elif params['label_type'] == 'character':
params['num_classes'] = 28
elif params['label_type'] == 'character_capital_divide':
params['num_classes'] = 72
else:
TypeError
# Model setting
model = AttentionSeq2Seq(
input_size=params['input_size'] * params['num_stack'],
encoder_type=params['encoder_type'],
encoder_num_units=params['encoder_num_units'],
encoder_num_layers=params['encoder_num_layers'],
encoder_num_proj=params['encoder_num_proj'],
attention_type=params['attention_type'],
attention_dim=params['attention_dim'],
decoder_type=params['decoder_type'],
decoder_num_units=params['decoder_num_units'],
decoder_num_layers=params['decoder_num_layers'],
embedding_dim=params['embedding_dim'],
num_classes=params['num_classes'],
sos_index=params['num_classes'],
eos_index=params['num_classes'] + 1,
max_decode_length=params['max_decode_length'],
lstm_impl='LSTMBlockCell',
use_peephole=params['use_peephole'],
parameter_init=params['weight_init'],
clip_grad_norm=params['clip_grad_norm'],
clip_activation_encoder=params['clip_activation_encoder'],
clip_activation_decoder=params['clip_activation_decoder'],
weight_decay=params['weight_decay'],
time_major=True,
sharpening_factor=params['sharpening_factor'],
logits_temperature=params['logits_temperature'])
model.save_path = args.model_path
do_decode(model=model, params=params,
epoch=args.epoch, beam_width=1,
eval_batch_size=args.eval_batch_size)
if __name__ == '__main__':
main()
|
|
#-------------------------------------------------------------------------------
# bindlibrary.py
#
# library for get bind information.
#
#
# Copyright (C) 2015, Ryosuke Fukatani
# License: Apache 2.0
#-------------------------------------------------------------------------------
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from pyverilog.dataflow.dataflow import *
class BindLibrary(object):
""" [CLASSES]
Library for using dataflow information.
"""
def __init__(self, binddict, terms):
def make_scope_dict(terms):
""" [FUNCTIONS] for getScopeChaindict
make {string: ScopeChain, ...} from binddict
"""
scope_dict = {}
for scope in terms.keys():
scope_dict[str(scope)] = scope
return scope_dict
self._binddict = binddict
self._terms = terms
self.scope_dict = make_scope_dict(terms)
self.cache = {}
self.gnb_cache = {}
def dfx_memoize(f):
""" [FUNCTIONS]
Memoize for extract_all_dfxxx.
Using self.cache.
"""
def helper(self, target_tree, tree_list, bit, dftype):
if dftype == DFTerminal:
if (target_tree, bit) not in self.cache:
self.cache[(target_tree, bit)] = f(self, target_tree, set([]), bit, dftype)
return tree_list.union(self.cache[(target_tree, bit)])
else:
return f(self, target_tree, tree_list, bit, dftype)
return helper
@dfx_memoize
def extract_all_dfxxx(self, target_tree, tree_list, bit, dftype):
"""[FUNCTIONS]
return set of DFXXX
target_tree:DF***
tree_list:{(type, DF***, bit),(type, DF***, bit),...}
bit: signal bit pointer
dftype: DFOperator or DFIntConst or ,...
"""
if dftype == DFTerminal and isinstance(target_tree, DFTerminal):
target_scope = self.get_scope(target_tree)
if target_scope in self._binddict.keys():
target_bind, target_term_lsb = self.get_next_bind(target_scope, bit)
if not target_bind.isCombination():
tree_list.add((target_tree, bit + target_term_lsb))
else: #TOP Input port
tree_list.add((target_tree, bit + eval_value(self._terms[self.scope_dict[str(target_tree)]].lsb)))
else:
if isinstance(target_tree, dftype):
tree_list.add((target_tree, bit))
if hasattr(target_tree, "nextnodes"):
if isinstance(target_tree, DFConcat):
now_max_bit = 0
now_min_bit = 0
for nextnode in reversed(target_tree.nextnodes):
now_max_bit = now_min_bit + self.get_bit_width_from_tree(nextnode) - 1
if now_min_bit <= bit <= now_max_bit:
tree_list = self.extract_all_dfxxx(nextnode, tree_list, bit - now_min_bit, dftype)
break
now_min_bit = now_max_bit + 1
else:
for nextnode in target_tree.nextnodes:
if isinstance(target_tree, DFBranch) and nextnode == target_tree.condnode:
tree_list = self.extract_all_dfxxx(nextnode, tree_list, 0, dftype)
else:
tree_list = self.extract_all_dfxxx(nextnode, tree_list, bit, dftype)
elif isinstance(target_tree, DFBranch):
tree_list = self.extract_all_dfxxx(target_tree.condnode, tree_list, 0, dftype)
tree_list = self.extract_all_dfxxx(target_tree.truenode, tree_list, bit, dftype)
tree_list = self.extract_all_dfxxx(target_tree.falsenode, tree_list, bit, dftype)
elif isinstance(target_tree, DFTerminal):
target_scope = self.get_scope(target_tree)
if target_scope in self._binddict.keys():
target_bind, target_term_lsb = self.get_next_bind(target_scope, bit)
if target_bind.isCombination():
tree_list = self.extract_all_dfxxx(target_bind.tree, tree_list, bit, dftype)
elif isinstance(target_tree, DFPartselect):
ref_bit = eval_value(target_tree.lsb) + bit - eval_value(self._terms[self.scope_dict[str(target_tree.var)]].lsb)
tree_list = self.extract_all_dfxxx(target_tree.var, tree_list, ref_bit, dftype)
return tree_list
def search_combloop(self, target_tree, bit, start_tree, start_bit, find_cnt=0, rec_call_cnt=0):
"""[FUNCTIONS]
target_tree:DF***
bit: signal bit pointer
start_tree:DF***
"""
if (str(target_tree), bit) == (start_tree, start_bit):
find_cnt += 1
if find_cnt == 2:
raise CombLoopException('Combinational loop is found @' + str(start_tree))
rec_call_cnt += 1
if rec_call_cnt > 1000:
raise CombLoopException(str(start_tree) + ' may be combinational loop, or too complex logic (over 1000 variable).')
if hasattr(target_tree, "nextnodes"):
if isinstance(target_tree, DFConcat):
now_max_bit = 0
now_min_bit = 0
for nextnode in reversed(target_tree.nextnodes):
now_max_bit = now_min_bit + self.get_bit_width_from_tree(nextnode) - 1
if now_min_bit <= bit <= now_max_bit:
self.search_combloop(nextnode, bit - now_min_bit, start_tree, start_bit, find_cnt, rec_call_cnt)
break
now_min_bit = now_max_bit + 1
else:
for nextnode in target_tree.nextnodes:
if isinstance(target_tree, DFBranch) and nextnode == target_tree.condnode:
self.search_combloop(nextnode, 0, start_tree, start_bit, find_cnt, rec_call_cnt)
else:
self.search_combloop(nextnode, bit, start_tree, start_bit, find_cnt, rec_call_cnt)
elif isinstance(target_tree, DFBranch):
self.search_combloop(target_tree.condnode, 0, start_tree, start_bit, find_cnt, rec_call_cnt)
self.search_combloop(target_tree.truenode, bit, start_tree, start_bit, find_cnt, rec_call_cnt)
self.search_combloop(target_tree.falsenode, bit, start_tree, start_bit, find_cnt, rec_call_cnt)
elif isinstance(target_tree, DFTerminal):
target_scope = self.get_scope(target_tree)
if target_scope in self._binddict.keys():
target_bind, target_term_lsb = self.get_next_bind(target_scope, bit)
if target_bind.isCombination():
self.search_combloop(target_bind.tree, bit, start_tree, start_bit, find_cnt, rec_call_cnt)
elif isinstance(target_tree, DFPartselect):
ref_bit = eval_value(target_tree.lsb) + bit - eval_value(self._terms[self.scope_dict[str(target_tree.var)]].lsb)
self.search_combloop(target_tree.var, ref_bit, start_tree, start_bit, find_cnt, rec_call_cnt)
return
def delete_all_cache(self):
self.cache = {}
self.gnb_cache= {}
def gnb_memoize(f):
def helper(self, y, z):
if (y, z) not in self.gnb_cache:
self.gnb_cache[(y, z)] = f(self, y, z)
return self.gnb_cache[(y, z)]
return helper
@gnb_memoize
def get_next_bind(self, scope, bit):
"""[FUNCTIONS] get root bind.(mainly use at 'Rename' terminal.)
"""
if scope in self._binddict.keys():
target_binds = self._binddict[scope]
target_bind_index = self.get_bind_index(target_binds, bit + eval_value(self._terms[scope].lsb), self._terms[scope])
target_bind = target_binds[target_bind_index]
return target_bind, eval_value(self._terms[scope].lsb)
else:
return None, self._terms[scope].lsb
def get_bind_index(self, binds=None, bit=None, term=None, scope=None):
"""[FUNCTIONS] get bind index in that target bit exists.
"""
if 'Rename' in term.termtype:
return 0
else:
if scope is not None:
binds = self._binddict[scope]
term = self._terms[scope]
for index, bind in enumerate(binds):
if bind.lsb is None:
return 0
if self.get_bind_lsb(bind) <= bit <= self.get_bind_msb(bind):
return index
else:
raise IRREGAL_CODE_FORM("unexpected bind @"+binds[0].tostr())
def get_bit_width_from_tree(self, tree):
onebit_comb = ('Ulnot', 'Unot', 'Eq', 'Ne',
'Lor', 'Land', 'Unand', 'Uor', 'Unor', 'Uxor', 'Uxnor')
if isinstance(tree, DFTerminal):
term = self._terms[self.get_scope(tree)]
return eval_value(term.msb) + 1
elif isinstance(tree, DFPartselect):
return eval_value(tree.msb) - eval_value(tree.lsb) + 1
elif isinstance(tree, DFOperator):
if tree.operator in onebit_comb:
return 1
else:
each_sizes = (self.get_bit_width_from_tree(nextnode) for nextnode in tree.nextnodes)
return min(each_sizes)
elif isinstance(tree, DFIntConst):
return tree.width()
elif isinstance(tree, DFConcat):
return sum([self.get_bit_width_from_tree(nextnode) for nextnode in tree.nextnodes])
elif isinstance(tree, DFEvalValue):
return tree.width
else:
raise IRREGAL_CODE_FORM("unexpected concat node")
def walk_reg_each_bit(self):
for tk, tv in sorted(self._terms.items(), key=lambda x: len(x[0])):
if tk in self._binddict.keys():
for bvi in self._binddict[tk]: #process for each always block
bind_lsb = self.get_bind_lsb(bvi)
bind_msb = self.get_bind_msb(bvi)
for bit in range(bind_lsb, bind_msb + 1):
yield tv, tk, bvi, bit, bind_lsb
def walk_signal(self):
for tk, tv in sorted(self._terms.items(), key=lambda x: len(x[0])):
yield tv, tk
def get_bind_lsb(self, bind):
if bind.lsb:
return bind.lsb.value
else:
return 0
def get_bind_msb(self, bind):
if bind.msb:
return bind.msb.value
else:
return 0
def get_scope(self, tree):
name = str(tree)
if name in self.scope_dict.keys():
return self.scope_dict[name]
else:
return None
class CombLoopException(Exception): pass
class MothernodeSetter(BindLibrary):
"""[CLASSES]
set mother node for all nodes.
need expressly call destructer.
"""
def __init__(self, bind_library):
self._binddict = bind_library._binddict
self._terms = bind_library._terms
self.scope_dict = bind_library.scope_dict
self.cache = bind_library.cache
self.gnb_cache = bind_library.gnb_cache
#self.disable_dfxxx_eq()
def __del__(self):
self.enable_dfxxx_eq()
def set_mother_node(f):
def helper(self, target_tree, tree_list, bit, dftype):
tree_list = f(self, target_tree, tree_list, bit, dftype)
if tree_list:
for tree, bit in tree_list:
#if hasattr(tree, 'mother_node'): continue
#if str(tree) == str(target_tree): continue
tree.mother_node = target_tree
return tree_list
return helper
@set_mother_node
def extract_all_dfxxx(self, target_tree, tree_list, bit, dftype):
return BindLibrary.extract_all_dfxxx(self, target_tree, tree_list, bit, dftype)
def disable_dfxxx_eq(self):
""" [FUNCTIONS]
Chenge df***.__eq__()method to identify each tree.
"""
self.DFConstant__eq__org = DFConstant.__eq__
self.DFEvalValue__eq__org = DFEvalValue.__eq__
self.DFUndefined__eq__org = DFUndefined.__eq__
self.DFHighImpedance__eq__org = DFHighImpedance.__eq__
self.DFTerminal__eq__org = DFTerminal.__eq__
self.DFBranch__eq__org = DFBranch.__eq__
self.DFOperator__eq__org = DFOperator.__eq__
self.DFPartselect__eq__org = DFPartselect.__eq__
self.DFPointer__eq__org = DFPointer.__eq__
self.DFConcat__eq__org = DFConcat.__eq__
DFConstant.__eq__ = return_false.__get__(DFConstant)
DFEvalValue.__eq__ = return_false.__get__(DFEvalValue)
DFUndefined.__eq__ = return_false.__get__(DFUndefined)
DFHighImpedance.__eq__ = return_false.__get__(DFHighImpedance)
DFTerminal.__eq__ = return_false.__get__(DFTerminal)
DFBranch.__eq__ = return_false.__get__(DFBranch)
DFOperator.__eq__ = return_false.__get__(DFOperator)
DFPartselect.__eq__ = return_false.__get__(DFPartselect)
DFPointer.__eq__ = return_false.__get__(DFPointer)
DFConcat.__eq__ = return_false.__get__(DFConcat)
#DFDelay.__eq__ = MethodType(return_false, None, DFDelay)
#DFSyscall.__eq__ = MethodType(return_false, None, DFSyscall)
def enable_dfxxx_eq(self):
DFConstant.__eq__ = self.DFConstant__eq__org
DFEvalValue.__eq__ = self.DFEvalValue__eq__org
DFUndefined.__eq__ = self.DFUndefined__eq__org
DFHighImpedance.__eq__ = self.DFHighImpedance__eq__org
DFTerminal.__eq__ = self.DFTerminal__eq__org
DFBranch.__eq__ = self.DFBranch__eq__org
DFOperator.__eq__ = self.DFOperator__eq__org
DFPartselect.__eq__ = self.DFPartselect__eq__org
DFPointer.__eq__ = self.DFPointer__eq__org
DFConcat.__eq__ = self.DFConcat__eq__org
def return_false(self, other):
return False
def DFConstant_eq_org(self, other):
if type(self) != type(other): return False
return self.value == other.value
def DFEvalValue_eq_org(self, other):
if type(self) != type(other): return False
return (self.value == other.value and self.width == other.width and
self.isfloat == other.isfloat and self.isstring == other.isstring)
def DFUndefined_eq_org(self, other):
if type(self) != type(other): return False
return self.width == other.width
def DFHighImpedance_eq_org(self, other):
if type(self) != type(other): return False
return self.width == other.width
def DFTerminal_eq_org(self, other):
if type(self) != type(other): return False
return self.name == other.name
def DFBranch_eq_org(self, other):
if type(self) != type(other): return False
return (self.condnode == other.condnode and self.truenode == other.truenode and
self.falsenode == other.falsenode)
def DFOperator_eq_org(self, other):
if type(self) != type(other): return False
return self.operator == other.operator and self.nextnodes == other.nextnodes
def DFPartselect_eq_org(self, other):
if type(self) != type(other): return False
return self.var == other.var and self.msb == other.msb and self.lsb == other.lsb
def DFPointer_eq_org(self, other):
if type(self) != type(other): return False
return self.var == other.var and self.ptr == other.ptr
def DFConcat_eq_org(self, other):
if type(self) != type(other): return False
return self.nextnodes == other.nextnodes
def eval_value(tree):
if isinstance(tree, DFOperator):
for nextnode in self.nextnodes:
assert(isinstance(nextnode, DFEvalValue)
or isinstance(nextnode, DFIntConst)
or isinstance(nextnode, DFOperator)
or isinstance(nextnode, DFTerminal))
if self.operator == 'Plus':
return eval_value(nextnodes[0]) + eval_value(nextnodes[1])
elif self.operator == 'Minus':
return eval_value(nextnodes[0]) - eval_value(nextnodes[1])
elif self.operator == 'Times':
return eval_value(nextnodes[0]) * eval_value(nextnodes[1])
else:
raise Exception('unimplemented for this type tree' + str(type(tree)))
elif isinstance(tree, DFTerminal):
if self.get_scope(scopedict) in binddict.keys():
return binddict[self.get_scope(scopedict)][0].tree.eval()
else:
raise verror.ImplementationError()
elif isinstance(tree, DFIntConst):
return tree.eval()
elif isinstance(tree, DFEvalValue):
return tree.value
elif tree is None:
return 0
else:
raise Exception('Unexpected error@bindlibrary')
|
|
#!/usr/bin/env python
'''======================================================
Created by: D. Spencer Maughan
Last updated: May 2015
File name: IRIS_origin_IP.py
Organization: RISC Lab, Utah State University
Notes:
This file is meant for realtime tuning of waypoint PID gains.
======================================================'''
import roslib; roslib.load_manifest('risc_msgs')
import rospy
from math import *
import numpy as np
import time
import cv2
import rospkg
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import *
from std_msgs.msg import Bool
from roscopter.msg import Status
#==========================#
# Trackbar Variables #
#==========================#
# X Position
kpx = 330
kix = 33
kdx = 356
# Y Position
kpy = 0
kiy = 0
kdy = 0
# Z Position
kpz = 200
kiz = 54
kdz = 193
#========================#
# Globals #
#========================#
nominal_thrust = 0 # thrust necessary to maintain hover given battery level
ctrl_status = False
states = Cortex()
states.Obj = [States()]*1
euler_max = 45*np.pi/180
max_yaw_rate = .3490659 #in radians/sec
rate = 45 # Hz
image = 0
start_time = 0
#==================#
# Publishers #
#==================#
pub_ctrl = rospy.Publisher('/wp_controls', Controls, queue_size = 1)
pub_traj = rospy.Publisher('/trajectory', Trajectories, queue_size = 1)
#============================#
# Integrator Threshold #
#============================#
Integrator = np.asmatrix(np.zeros((7,1)))
x_int_thresh = 10
y_int_thresh = 10
z_int_thresh = 10
xdot_int_thresh = 1
ydot_int_thresh = 1
zdot_int_thresh = 1
psi_int_thresh = 0
Int_thresh = np.matrix([ [x_int_thresh], [y_int_thresh], [z_int_thresh],\
[xdot_int_thresh],[ydot_int_thresh],[zdot_int_thresh],\
[psi_int_thresh]])
#=========================#
# Trackbar Functions #
#=========================#
# X Position
def Fkpx(x):
global kpx
kpx = x
def Fkix(x):
global kix
kix = x
def Fkdx(x):
global kdx
kdx = x
# Y Position
def Fkpy(x):
global kpy
kpy = x
def Fkiy(x):
global kiy
kiy = x
def Fkdy(x):
global kdy
kdy = x
# Z Position
def Fkpz(x):
global kpz
kpz = x
def Fkiz(x):
global kiz
kiz = x
def Fkdz(x):
global kdz
kdz = x
#=====================#
# Integrator Cap #
#=====================#
def IntegratorCap(I):
global Int_thresh
good_terms = np.multiply(I,abs(I)<Int_thresh) # leave as is and set others to zero
bad_terms = abs(I)>Int_thresh # set bad terms to 1 and others to 0
Int = good_terms + np.multiply(np.sign(I),np.multiply(bad_terms,Int_thresh))
return Int
#========================#
# Get Cortex States #
#========================#
def GetStates(S):
global states
states = S
#=========================#
# Get Battery Status #
#=========================#
def GetBatt(S):
global nominal_thrust
B = S.battery_remaining
# coefficients for fourth order fit
# determined 11 May 2015 by Spencer Maughan and Ishmaal Erekson
c0 = 0.491674747062374
c1 = -0.024809293286468
c2 = 0.000662710609466
c3 = -0.000008160593348
c4 = 0.000000033699651
nominal_thrust = c0+c1*B+c2*B**2+c3*B**3+c4*B**4
#============================#
# Get Controller Status #
#============================#
def GetStatus(S):
global ctrl_status
ctrl_status = S.data
#========================#
# Basic Controller #
#========================#
def Basic_Controller():
global states, euler_max, max_yaw_rate, pub_ctrl,K
Ctrl = Controls()
Ctrl.Obj = [Control()]*1
Ctrl.header.stamp = states.header.stamp
g = 9.80665 # average value of earth's gravitational constant m/s^2
m = 1.282 # IRIS mass in kg
#===================================#
# Get State Trajectory Errors #
#===================================#
if states.Obj[0].visible:
X = np.asmatrix(np.zeros((7,1)))
X[0] = -states.Obj[0].x
X[1] = -states.Obj[0].y
X[2] = .5-states.Obj[0].z
X[3] = -states.Obj[0].u
X[4] = -states.Obj[0].v
X[5] = -states.Obj[0].w
X[6] = -states.Obj[0].psi*np.pi/180
#======================================#
# Only Integrate When Autonomous #
#======================================#
global Integrator,ctrl_status
if ctrl_status:
#=======================#
# Integrator Term #
#=======================#
Integrator = Integrator + np.divide(X,rate)
#======================#
# Integrator Cap #
#======================#
global Int_thresh
Integrator = IntegratorCap(Integrator)
elif not ctrl_status:
Integrator = np.asmatrix(np.zeros((7,1)))
#===================#
# Gain Matrix #
#===================#
K = np.matrix([[ kpx/100, 0, 0, kdx/100, 0, 0, 0],\
[ 0, kpx/100, 0, 0, kdx/100, 0, 0],\
[ 0, 0, kpz/100, 0, 0, kdz/100, 0],\
[ 0, 0, 0, 0, 0, 0,.5]])
#========================#
# Integrator gains #
#========================#
K_Int = np.matrix([[ kix/100, 0, 0, 0, 0, 0, 0],\
[ 0, kiy/100, 0, 0, 0, 0, 0],\
[ 0, 0, kiz/100, 0, 0, 0, 0],\
[ 0, 0, 0, 0, 0, 0, 0]])
#============================================#
# Differential Flatness Control Input #
#============================================#
# LQR input
utilde = -K*X - K_Int*Integrator
# required input
u_r = np.asmatrix(np.zeros((4,1)))
u = utilde+u_r-np.matrix([[0],[0],[9.81],[0]])
#==================================#
# Rotate to Vehicle 1 Frame #
#==================================#
psi = states.Obj[0].psi*np.pi/180
rotZ = np.matrix([[cos(psi), sin(psi), 0],[-sin(psi), cos(psi), 0],[0, 0, 1]])
Cart = np.matrix([[1, 0, 0],[0, -1, 0],[0, 0, -1]])
u[:-1] = Cart*rotZ*u[:-1]
#===================================#
# Normalize given the Thrust #
#===================================#
T = sqrt(u[0:3].T*u[0:3])
u[:-1] = np.divide(u[:-1],-T)
#==================#
# Set Controls #
#==================#
# Controls for Ardrone
# -phi = right... +phi = left
# -theta = back... +theta = forward
# -psi = right... +psi = left
global phi_trim,theta_trim,phi_scale,theta_scale
phi_d = (asin(u[1,-1]))
theta_d = (-asin(u[0,-1]))
ctrl = Control()
ctrl.name = states.Obj[0].name
ctrl.phi = phi_d*6/np.pi#1 = 30 deg
ctrl.theta = theta_d*6/np.pi
ctrl.psi = -u[3,-1]/max_yaw_rate
global nominal_thrust
T_d = nominal_thrust+(T-g)/g
ctrl.T = T_d
Ctrl.Obj[0] = ctrl
Ctrl.header = states.header
#rospy.loginfo("latency = %f",states.header.stamp.to_sec()-rospy.get_time())
#Ctrl.header.stamp = rospy.Time.now()
pub_ctrl.publish(Ctrl)
cv2.imshow("gains", image)
cv2.waitKey(1)
#===================#
# Main #
#===================#
if __name__=='__main__':
import sys
rospy.init_node('IRIS_Origin_Hold')
#===============================#
# Get Image Using RosPack #
#===============================#
rospack = rospkg.RosPack()
path = rospack.get_path('risc_control')
image = cv2.imread(path+'/mario.jpg')
cv2.resize(image,(321,123))
cv2.namedWindow("gains")
#========================#
# Create Trackbars #
#========================#
cv2.createTrackbar("kpx", "gains", kpx, 3000, Fkpx)
cv2.createTrackbar("kix", "gains", kix, 1000, Fkix)
cv2.createTrackbar("kdx", "gains", kdx, 3000, Fkdx)
cv2.createTrackbar("kpy", "gains", kpy, 1000, Fkpy)
cv2.createTrackbar("kiy", "gains", kiy, 1000, Fkiy)
cv2.createTrackbar("kdy", "gains", kdy, 1000, Fkdy)
cv2.createTrackbar("kpz", "gains", kpz, 1000, Fkpz)
cv2.createTrackbar("kiz", "gains", kiz, 1000, Fkiz)
cv2.createTrackbar("kdz", "gains", kdz, 1000, Fkdz)
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_cortex = rospy.Subscriber('/cortex_raw' , Cortex, GetStates, queue_size=1, buff_size=2**12)
sub_Batt = rospy.Subscriber('/apm/status' , Status, GetBatt, queue_size=1, buff_size=2**12)
sub_status = rospy.Subscriber('/controller_status' , Bool, GetStatus, queue_size=1, buff_size=2**12)
Basic_Controller()
r.sleep()
|
|
from datetime import datetime, timedelta
from manager_rest.test.base_test import BaseServerTestCase
from cloudify_rest_client.exceptions import CloudifyClientError
class ExecutionSchedulesTestCase(BaseServerTestCase):
DEPLOYMENT_ID = 'deployment'
fmt = '%Y-%m-%dT%H:%M:%S.%fZ'
an_hour_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=1)
two_hours_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=2)
three_hours_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=3)
three_weeks_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(weeks=3)
deployment_id = None
def setUp(self):
super(ExecutionSchedulesTestCase, self).setUp()
_, self.deployment_id, _, _ = self.put_deployment(self.DEPLOYMENT_ID)
def test_schedule_create(self):
schedule_id = 'sched-1'
workflow_id = 'install'
schedule = self.client.execution_schedules.create(
schedule_id, self.deployment_id, workflow_id,
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
self.assertEqual(schedule.id, schedule_id)
self.assertEqual(schedule.deployment_id, self.deployment_id)
self.assertEqual(schedule.workflow_id, workflow_id)
self.assertEqual(datetime.strptime(schedule.since, self.fmt),
self.an_hour_from_now)
self.assertEqual(len(schedule['all_next_occurrences']), 5)
self.assertEqual(
datetime.strptime(schedule['next_occurrence'], self.fmt),
self.an_hour_from_now)
self.assertEqual(schedule['slip'], 0)
self.assertEqual(schedule['stop_on_fail'], False)
def test_schedule_create_weekdays(self):
schedule = self.client.execution_schedules.create(
'sched-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, until=self.three_weeks_from_now,
recurrence='1 days', weekdays=['mo', 'tu', 'we', 'th'])
self.assertEqual(len(schedule['all_next_occurrences']), 12) # 3w * 4d
def test_schedules_list(self):
schedule_ids = ['sched-1', 'sched-2']
for schedule_id in schedule_ids:
self.client.execution_schedules.create(
schedule_id, self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
schedules = self.client.execution_schedules.list()
self.assertEqual(len(schedules), 2)
self.assertSetEqual({s.id for s in schedules}, set(schedule_ids))
def test_schedule_delete(self):
self.client.execution_schedules.create(
'delete-me', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
self.assertEqual(len(self.client.execution_schedules.list()), 1)
self.client.execution_schedules.delete('delete-me', self.deployment_id)
self.assertEqual(len(self.client.execution_schedules.list()), 0)
def test_schedule_update(self):
schedule = self.client.execution_schedules.create(
'update-me', self.deployment_id, 'install',
since=self.an_hour_from_now, until=self.two_hours_from_now,
recurrence='1 minutes')
# `until` is inclusive
self.assertEqual(len(schedule['all_next_occurrences']), 61)
self.assertEqual(schedule['rule']['recurrence'], '1 minutes')
self.assertEqual(schedule['slip'], 0)
self.client.execution_schedules.update(
'update-me', self.deployment_id, recurrence='5 minutes', slip=30)
# get the schedule from the DB and not directly from .update endpoint
schedule = self.client.execution_schedules.get('update-me',
self.deployment_id)
self.assertEqual(len(schedule['all_next_occurrences']), 13) # 60/5+1
self.assertEqual(schedule['rule']['recurrence'], '5 minutes')
self.assertEqual(schedule['slip'], 30)
self.client.execution_schedules.update(
'update-me', self.deployment_id, until=self.three_hours_from_now)
schedule = self.client.execution_schedules.get('update-me',
self.deployment_id)
self.assertEqual(len(schedule['all_next_occurrences']), 25) # 2*60/5+1
def test_schedule_get_invalid_id(self):
self.assertRaisesRegex(
CloudifyClientError,
'404: Requested `ExecutionSchedule` .* was not found',
self.client.execution_schedules.get,
'nonsuch',
self.deployment_id
)
def test_schedule_create_no_since(self):
self.assertRaises(
AssertionError,
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'some_workflow',
recurrence='1 minutes', count=5
)
def test_schedule_create_invalid_time_format(self):
self.assertRaisesRegex(
AttributeError,
"'str' object has no attribute 'isoformat'",
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'install',
since='long ago', recurrence='1 minutes', count=5
)
def test_schedule_create_invalid_workflow(self):
self.assertRaisesRegex(
CloudifyClientError,
'400: Workflow some_workflow does not exist',
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'some_workflow',
since=self.an_hour_from_now, recurrence='1 minutes', count=5,
)
def test_schedule_invalid_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.create,
'bad-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['oneday', 'someday']
)
self.client.execution_schedules.create(
'good-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours', count=6,
weekdays=['mo', 'tu']
)
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.update,
'good-weekdays', self.deployment_id, weekdays=['oneday', 'someday']
)
def test_schedule_create_invalid_complex_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.create,
'bad-complex-wd', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['5tu']
)
def test_schedule_create_invalid_recurrence_with_complex_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* complex weekday expression',
self.client.execution_schedules.create,
'bad-complex-wd', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['2mo', 'l-tu']
)
def test_schedule_invalid_repetition_without_recurrence(self):
recurrence_error = \
'400: recurrence must be specified for execution count ' \
'larger than 1'
self.assertRaisesRegex(
CloudifyClientError,
recurrence_error,
self.client.execution_schedules.create,
'no-recurrence-no-count', self.deployment_id, 'uninstall',
since=self.an_hour_from_now, weekdays=['su', 'mo', 'tu'],
)
self.client.execution_schedules.create(
'no-recurrence-count-1', self.deployment_id, 'install',
since=self.an_hour_from_now, count=1,
)
self.assertRaisesRegex(
CloudifyClientError,
recurrence_error,
self.client.execution_schedules.update,
'no-recurrence-count-1', self.deployment_id, count=2
)
def test_schedule_create_invalid_recurrence(self):
self.assertRaisesRegex(
CloudifyClientError,
'400: `10 doboshes` is not a legal recurrence expression.',
self.client.execution_schedules.create,
'bad-freq', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='10 doboshes'
)
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
import json
from enum import Enum
from typing import Callable, Tuple, List
class MetricGroup(abc.ABC):
"""
A MetricGroup is a named container for metrics and further metric subgroups.
Instances of this class can be used to register new metrics with Flink and to create a nested
hierarchy based on the group names.
A MetricGroup is uniquely identified by it's place in the hierarchy and name.
.. versionadded:: 1.11.0
"""
def add_group(self, name: str, extra: str = None) -> 'MetricGroup':
"""
Creates a new MetricGroup and adds it to this groups sub-groups.
If extra is not None, creates a new key-value MetricGroup pair.
The key group is added to this group's sub-groups, while the value
group is added to the key group's sub-groups. In this case,
the value group will be returned and a user variable will be defined.
.. versionadded:: 1.11.0
"""
pass
def counter(self, name: str) -> 'Counter':
"""
Registers a new `Counter` with Flink.
.. versionadded:: 1.11.0
"""
pass
def gauge(self, name: str, obj: Callable[[], int]) -> None:
"""
Registers a new `Gauge` with Flink.
.. versionadded:: 1.11.0
"""
pass
def meter(self, name: str, time_span_in_seconds: int = 60) -> 'Meter':
"""
Registers a new `Meter` with Flink.
.. versionadded:: 1.11.0
"""
# There is no meter type in Beam, use counter to implement meter
pass
def distribution(self, name: str) -> 'Distribution':
"""
Registers a new `Distribution` with Flink.
.. versionadded:: 1.11.0
"""
pass
class MetricGroupType(Enum):
"""
Indicate the type of MetricGroup.
"""
generic = 0
key = 1
value = 2
class GenericMetricGroup(MetricGroup):
def __init__(
self,
parent,
name,
metric_group_type=MetricGroupType.generic):
self._parent = parent
self._sub_groups = []
self._name = name
self._metric_group_type = metric_group_type
self._flink_gauge = {}
self._beam_gauge = {}
def _add_group(self, name: str, metric_group_type: MetricGroupType) \
-> 'GenericMetricGroup':
for group in self._sub_groups:
if name == group._name and metric_group_type == group._metric_group_type:
# we don't create same metric group repeatedly
return group
sub_group = GenericMetricGroup(
self,
name,
metric_group_type)
self._sub_groups.append(sub_group)
return sub_group
def add_group(self, name: str, extra: str = None) -> 'MetricGroup':
if extra is None:
return self._add_group(name, MetricGroupType.generic)
else:
return self._add_group(name, MetricGroupType.key)\
._add_group(extra, MetricGroupType.value)
def counter(self, name: str) -> 'Counter':
from apache_beam.metrics.metric import Metrics
return Counter(Metrics.counter(self._get_namespace(), name))
def gauge(self, name: str, obj: Callable[[], int]) -> None:
from apache_beam.metrics.metric import Metrics
self._flink_gauge[name] = obj
self._beam_gauge[name] = Metrics.gauge(self._get_namespace(), name)
def meter(self, name: str, time_span_in_seconds: int = 60) -> 'Meter':
from apache_beam.metrics.metric import Metrics
# There is no meter type in Beam, use counter to implement meter
return Meter(Metrics.counter(self._get_namespace(time_span_in_seconds), name))
def distribution(self, name: str) -> 'Distribution':
from apache_beam.metrics.metric import Metrics
return Distribution(Metrics.distribution(self._get_namespace(), name))
def _get_metric_group_names_and_types(self) -> Tuple[List[str], List[str]]:
if self._name is None:
return [], []
else:
names, types = self._parent._get_metric_group_names_and_types()
names.append(self._name)
types.append(str(self._metric_group_type))
return names, types
def _get_namespace(self, time=None) -> str:
names, metric_group_type = self._get_metric_group_names_and_types()
names.extend(metric_group_type)
if time is not None:
names.append(str(time))
return json.dumps(names)
class Metric(object):
"""
Base interface of a metric object.
.. versionadded:: 1.11.0
"""
pass
class Counter(Metric):
"""
Counter metric interface. Allows a count to be incremented/decremented
during pipeline execution.
.. versionadded:: 1.11.0
"""
def __init__(self, inner_counter):
self._inner_counter = inner_counter
def inc(self, n: int = 1):
"""
Increment the current count by the given value.
.. versionadded:: 1.11.0
"""
self._inner_counter.inc(n)
def dec(self, n: int = 1):
"""
Decrement the current count by 1.
.. versionadded:: 1.11.0
"""
self.inc(-n)
def get_count(self) -> int:
"""
Returns the current count.
.. versionadded:: 1.11.0
"""
from apache_beam.metrics.execution import MetricsEnvironment
container = MetricsEnvironment.current_container()
return container.get_counter(self._inner_counter.metric_name).get_cumulative()
class Distribution(Metric):
"""
Distribution Metric interface.
Allows statistics about the distribution of a variable to be collected during
pipeline execution.
.. versionadded:: 1.11.0
"""
def __init__(self, inner_distribution):
self._inner_distribution = inner_distribution
def update(self, value):
"""
Updates the distribution value.
.. versionadded:: 1.11.0
"""
self._inner_distribution.update(value)
class Meter(Metric):
"""
Meter Metric interface.
Metric for measuring throughput.
.. versionadded:: 1.11.0
"""
def __init__(self, inner_counter):
self._inner_counter = inner_counter
def mark_event(self, value: int = 1):
"""
Mark occurrence of the specified number of events.
.. versionadded:: 1.11.0
"""
self._inner_counter.inc(value)
def get_count(self) -> int:
"""
Get number of events marked on the meter.
.. versionadded:: 1.11.0
"""
from apache_beam.metrics.execution import MetricsEnvironment
container = MetricsEnvironment.current_container()
return container.get_counter(self._inner_counter.metric_name).get_cumulative()
|
|
import mock
from mock import Mock
from pamqp import specification
import amqpstorm
from amqpstorm import AMQPChannelError
from amqpstorm import AMQPConnectionError
from amqpstorm import AMQPInvalidArgument
from amqpstorm import AMQPMessageError
from amqpstorm import Channel
from amqpstorm import exception
from amqpstorm.tests.utility import FakeConnection
from amqpstorm.tests.utility import TestFramework
class ChannelExceptionTests(TestFramework):
def test_chanel_invalid_close_parameter(self):
channel = Channel(0, Mock(name='Connection'), 360)
self.assertRaisesRegexp(
AMQPInvalidArgument,
'reply_code should be an integer',
channel.close, 'travis-ci', 'travis-ci'
)
self.assertRaisesRegexp(
AMQPInvalidArgument,
'reply_text should be a string',
channel.close, 200, 200
)
def test_chanel_callback_not_set(self):
channel = Channel(0, Mock(name='Connection'), 360)
self.assertRaisesRegexp(
AMQPChannelError,
'no consumer callback defined',
channel.process_data_events
)
def test_channel_throw_exception_check_for_error(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(channel.OPEN)
channel.exceptions.append(AMQPConnectionError('travis-ci'))
self.assertRaisesRegexp(
AMQPConnectionError,
'travis-ci',
channel.check_for_errors
)
def test_channel_check_error_no_exception(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
self.assertIsNone(channel.check_for_errors())
def test_channel_check_error_when_closed(self):
channel = Channel(0, FakeConnection(), 360)
self.assertRaisesRegexp(
exception.AMQPChannelError,
'channel was closed',
channel.check_for_errors
)
def test_channel_check_error_connection_closed(self):
channel = Channel(0, FakeConnection(FakeConnection.CLOSED), 360)
self.assertRaisesRegexp(
exception.AMQPConnectionError,
'connection was closed',
channel.check_for_errors
)
def test_channel_raises_when_closed(self):
channel = Channel(0, FakeConnection(FakeConnection.OPEN), 360)
channel.set_state(channel.CLOSED)
self.assertFalse(channel.is_open)
self.assertRaisesRegexp(
exception.AMQPChannelError,
'channel was closed',
channel.check_for_errors
)
self.assertTrue(channel.is_closed)
def test_channel_closed_after_connection_closed(self):
channel = Channel(0, FakeConnection(FakeConnection.CLOSED), 360)
channel.set_state(channel.OPEN)
self.assertTrue(channel.is_open)
self.assertRaisesRegexp(
exception.AMQPConnectionError,
'connection was closed',
channel.check_for_errors
)
self.assertTrue(channel.is_closed)
def test_channel_closed_after_connection_exception(self):
connection = amqpstorm.Connection('localhost', 'guest', 'guest',
lazy=True)
channel = Channel(0, connection, 360)
connection.exceptions.append(AMQPConnectionError('travis-ci'))
channel.set_state(channel.OPEN)
self.assertTrue(connection.is_closed)
self.assertTrue(channel.is_open)
self.assertRaisesRegexp(
exception.AMQPConnectionError,
'travis-ci',
channel.check_for_errors
)
self.assertTrue(channel.is_closed)
def test_channel_consume_exception_when_recoverable(self):
connection = amqpstorm.Connection('localhost', 'guest', 'guest',
lazy=True)
connection.set_state(connection.OPEN)
channel = Channel(0, connection, 360)
channel.set_state(channel.OPEN)
channel.exceptions.append(AMQPChannelError('no-route'))
self.assertTrue(connection.is_open)
self.assertTrue(channel.is_open)
self.assertRaisesRegexp(
exception.AMQPChannelError,
'no-route',
channel.check_for_errors
)
self.assertTrue(channel.is_open)
channel.check_for_errors()
@mock.patch('amqpstorm.Channel._build_message',
side_effect=AMQPChannelError())
def test_channel_build_inbound_raises(self, _):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
generator = channel.build_inbound_messages(break_on_empty=False)
if hasattr(generator, 'next'):
self.assertRaises(AMQPChannelError, generator.next)
else:
self.assertRaises(AMQPChannelError, generator.__next__)
def test_channel_build_inbound_raises_in_loop(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(Channel.OPEN)
self.first = True
def raise_after_one(**_):
if not self.first:
channel.exceptions.append(AMQPChannelError())
self.first = False
return None
with mock.patch('amqpstorm.Channel._build_message',
side_effect=raise_after_one):
generator = channel.build_inbound_messages(break_on_empty=False)
if hasattr(generator, 'next'):
self.assertRaises(AMQPChannelError, generator.next)
else:
self.assertRaises(AMQPChannelError, generator.__next__)
def test_channel_raises_with_return_reply_code_500(self):
channel = Channel(0, FakeConnection(), 360)
channel.set_state(channel.OPEN)
basic_return = specification.Basic.Return(
reply_code=500,
reply_text='Error'
)
channel._basic_return(basic_return)
self.assertRaisesRegexp(
AMQPMessageError,
"Message not delivered: Error \(500\) to queue "
"'' from exchange ''",
channel.check_for_errors
)
def test_channel_raise_with_close_reply_code_500(self):
connection = FakeConnection()
channel = Channel(0, connection, 360)
# Set up Fake Channel.
channel._inbound = [1, 2, 3]
channel.set_state(channel.OPEN)
channel._consumer_tags = [1, 2, 3]
close_frame = specification.Channel.Close(
reply_code=500,
reply_text='travis-ci'
)
channel._close_channel(close_frame)
self.assertEqual(channel._inbound, [])
self.assertEqual(channel._consumer_tags, [])
self.assertEqual(channel._state, channel.CLOSED)
self.assertIsInstance(
connection.get_last_frame(),
specification.Channel.CloseOk
)
self.assertRaisesRegexp(
AMQPChannelError,
'Channel 0 was closed by remote server: travis-ci',
channel.check_for_errors
)
|
|
#
# PgMain.py -- web application threading help routines.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import print_function
import sys, traceback
import os
import threading
import logging
import ginga.util.six as six
if six.PY2:
import thread
import Queue
else:
import _thread as thread
import queue as Queue
import tornado.web
import tornado.template
import tornado.ioloop
from ginga.web.pgw import templates, js, PgHelp
from ginga.misc import Task, Future, Callback
class PgMain(Callback.Callbacks):
def __init__(self, queue=None, logger=None, ev_quit=None,
host='localhost', port=9909, app=None):
super(PgMain, self).__init__()
self.enable_callback('shutdown')
# You can pass in a queue if you prefer to do so
if not queue:
queue = Queue.Queue()
self.gui_queue = queue
# You can pass in a logger if you prefer to do so
if logger is None:
logger = logging.getLogger('PgMain')
self.logger = logger
if not ev_quit:
ev_quit = threading.Event()
self.ev_quit = ev_quit
self.host = host
self.port = port
self.app = app
self.gui_thread_id = None
# Get screen size
## desktop = self.app.desktop()
## #rect = desktop.screenGeometry()
## rect = desktop.availableGeometry()
## size = rect.size()
## self.screen_wd = size.width()
## self.screen_ht = size.height()
self.screen_wd = 600
self.screen_ht = 800
def get_widget(self):
return self.app
def get_screen_size(self):
return (self.screen_wd, self.screen_ht)
def update_pending(self, timeout=0.0):
#print "1. PROCESSING OUT-BAND"
try:
#self.app.processEvents()
pass
except Exception as e:
self.logger.error(str(e))
# TODO: traceback!
done = False
while not done:
#print "2. PROCESSING IN-BAND len=%d" % self.gui_queue.qsize()
# Process "in-band" Qt events
try:
future = self.gui_queue.get(block=True,
timeout=timeout)
# Execute the GUI method
try:
try:
res = future.thaw(suppress_exception=False)
except Exception as e:
future.resolve(e)
self.logger.error("gui error: %s" % str(e))
try:
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception as e:
self.logger.error("Traceback information unavailable.")
finally:
pass
except Queue.Empty:
done = True
except Exception as e:
self.logger.error("Main GUI loop error: %s" % str(e))
#pass
# Process "out-of-band" events
#print "3. PROCESSING OUT-BAND"
try:
#self.app.processEvents()
pass
except Exception as e:
self.logger.error(str(e))
# TODO: traceback!
def gui_do(self, method, *args, **kwdargs):
"""General method for asynchronously calling into the GUI.
It makes a future to call the given (method) with the given (args)
and (kwdargs) inside the gui thread. If the calling thread is a
non-gui thread the future is returned.
"""
future = Future.Future()
future.freeze(method, *args, **kwdargs)
self.gui_queue.put(future)
my_id = thread.get_ident()
if my_id != self.gui_thread_id:
return future
def gui_call(self, method, *args, **kwdargs):
"""General method for synchronously calling into the GUI.
This waits until the method has completed before returning.
"""
my_id = thread.get_ident()
if my_id == self.gui_thread_id:
return method(*args, **kwdargs)
else:
future = self.gui_do(method, *args, **kwdargs)
return future.wait()
def gui_do_future(self, future):
self.gui_queue.put(future)
return future
def nongui_do(self, method, *args, **kwdargs):
task = Task.FuncTask(method, args, kwdargs, logger=self.logger)
return self.nongui_do_task(task)
def nongui_do_cb(self, tup, method, *args, **kwdargs):
task = Task.FuncTask(method, args, kwdargs, logger=self.logger)
task.register_callback(tup[0], args=tup[1:])
return self.nongui_do_task(task)
def nongui_do_future(self, future):
task = Task.FuncTask(future.thaw, (), {}, logger=self.logger)
return self.nongui_do_task(task)
def nongui_do_task(self, task):
try:
task.init_and_start(self)
return task
except Exception as e:
self.logger.error("Error starting task: %s" % (str(e)))
raise(e)
def assert_gui_thread(self):
my_id = thread.get_ident()
assert my_id == self.gui_thread_id, \
Exception("Non-GUI thread (%d) is executing GUI code!" % (
my_id))
def assert_nongui_thread(self):
my_id = thread.get_ident()
assert my_id != self.gui_thread_id, \
Exception("GUI thread (%d) is executing non-GUI code!" % (
my_id))
def mainloop(self, timeout=0.001):
# Mark our thread id
self.gui_thread_id = thread.get_ident()
## while not self.ev_quit.isSet():
## self.update_pending(timeout=timeout)
self.start(use_thread=False)
def start(self, use_thread=False, no_ioloop=False):
#self.thread_pool.startall()
js_path = os.path.dirname(js.__file__)
# create and run the app
self.server = tornado.web.Application([
(r"/js/(.*\.js)", tornado.web.StaticFileHandler,
{"path": js_path}),
(r"/app", PgHelp.WindowHandler,
dict(name='Application', url='/app', app=self.app)),
(r"/app/socket", PgHelp.ApplicationHandler,
dict(name='ApplicationSocketInterface', app=self.app)),
],
app=self.app, logger=self.logger)
self.server.listen(self.port, self.host)
self.base_url = "http://%s:%d/app" % (self.host, self.port)
self.logger.info("ginga web now running at " + self.base_url)
if no_ioloop:
self.t_ioloop = None
else:
self.t_ioloop = tornado.ioloop.IOLoop.instance()
if use_thread:
task = Task.FuncTask2(self.t_ioloop.start)
self.thread_pool.addTask(task)
else:
self.t_ioloop.start()
def stop(self):
# how to stop tornado server?
if not self.t_ioloop is None:
self.t_ioloop.stop()
self.thread_pool.stopall()
self.ev_quit.set()
def gui_quit(self):
"Call this to cause the GUI thread to quit the mainloop."""
self.ev_quit.set()
self.make_callback('shutdown')
#self.app.quit()
def _quit(self):
self.gui_quit()
# END
|
|
from datetime import date, timedelta
from ..core import WesternCalendar, MON, TUE, SAT, SUN
from ..registry_tools import iso_register
@iso_register('AU')
class Australia(WesternCalendar):
"Australia"
include_good_friday = True
include_easter_monday = True
include_queens_birthday = False
include_labour_day_october = False
include_boxing_day = True
# Shall we shift Anzac Day?
shift_anzac_day = True
ANZAC_SHIFT_DAYS = (SAT, SUN)
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(1, 26, "Australia Day"),
)
def get_canberra_day(self, year):
return (
Australia.get_nth_weekday_in_month(year, 3, MON, 2),
"Canberra Day"
)
def get_queens_birthday(self, year):
return (
Australia.get_nth_weekday_in_month(year, 6, MON, 2),
"Queen's Birthday"
)
def get_labour_day_october(self, year):
return (
Australia.get_nth_weekday_in_month(year, 10, MON),
'Labour Day'
)
def get_anzac_day(self, year):
anzac_day = date(year, 4, 25)
if not self.shift_anzac_day:
return anzac_day, "Anzac Day"
if anzac_day.weekday() in self.ANZAC_SHIFT_DAYS:
anzac_day = self.find_following_working_day(anzac_day)
return anzac_day, "Anzac Day"
def get_variable_days(self, year):
# usual variable days
days = super().get_variable_days(year)
january_first = date(year, 1, 1)
if january_first.weekday() in self.get_weekend_days():
days.append((
self.find_following_working_day(january_first),
"New Year's Day shift")
)
australia_day = date(year, 1, 26)
if australia_day.weekday() in self.get_weekend_days():
days.append((
self.find_following_working_day(australia_day),
"Australia Day shift")
)
# was fixed, but might be shifted
days.append(self.get_anzac_day(year))
if self.include_queens_birthday:
days.append(self.get_queens_birthday(year))
if self.include_labour_day_october:
days.append(self.get_labour_day_october(year))
christmas = date(year, 12, 25)
boxing_day = date(year, 12, 26)
if christmas.weekday() in self.get_weekend_days():
shift = self.find_following_working_day(christmas)
days.append((shift, "Christmas Shift"))
days.append((shift + timedelta(days=1), "Boxing Day Shift"))
elif boxing_day.weekday() in self.get_weekend_days():
shift = self.find_following_working_day(boxing_day)
days.append((shift, "Boxing Day Shift"))
return days
@iso_register('AU-ACT')
class AustralianCapitalTerritory(Australia):
"Australian Capital Territory"
include_easter_saturday = True
include_queens_birthday = True
include_labour_day_october = True
include_boxing_day = True
_family_community_label = "Family & Community Day"
def get_family_community_day(self, year):
"""
Return Family & Community Day.
see: https://en.wikipedia.org/wiki/Family_Day#Australia
"""
# Since this day is picked unsing the school year calendar, there's no
# mathematical way yet to compute it.
# This public holiday was declared in 2007. [..]
# Per Holidays (Reconciliation Day) Amendment Bill 2017, 2017 is the
# last year that ACT will celebrate family and community day. It is
# being replaced by Reconciliaton day.
if year < 2007 or year > 2018:
# This would be interpreted as "this holiday must not be added"
return None
# Family & Community Day was celebrated on the first Tuesday of
# November in 2007, 2008 and 2009
if year in (2007, 2008, 2009):
day = AustralianCapitalTerritory.get_nth_weekday_in_month(
year, 11, TUE)
# Family & Community Day was celebrated on the last Monday of
# November in 2010, 2013, 2014, 2015, 2016, 2017
elif year in (2010, 2013, 2014, 2015, 2016, 2017):
day = AustralianCapitalTerritory.get_last_weekday_in_month(
year, 9, MON)
# Family & Community Day was celebrated on the second Monday of
# October in 2011 and 2012
elif year in (2011, 2012):
day = AustralianCapitalTerritory.get_nth_weekday_in_month(
year, 10, MON, 2)
else:
# If for some reason the year is not correctly provided
# (not and int, or whatever)
return None
return day, self._family_community_label
def get_reconciliation_day(self, year):
"""
Return Reconciliaton Day.
As of 2018, it replaces Family & Community Day.
"""
if year < 2018:
return None
reconciliation_day = date(year, 5, 27)
if reconciliation_day.weekday() == MON:
return reconciliation_day, "Reconciliation Day"
else:
shift = AustralianCapitalTerritory.get_first_weekday_after(
reconciliation_day, MON)
return shift, "Reconciliation Day Shift"
def get_variable_days(self, year):
days = super().get_variable_days(year)
days.append(self.get_canberra_day(year))
family_community_day = self.get_family_community_day(year)
if family_community_day is not None:
days.append(family_community_day)
reconciliation_day = self.get_reconciliation_day(year)
if reconciliation_day is not None:
days.append(reconciliation_day)
return days
@iso_register('AU-NSW')
class NewSouthWales(Australia):
"New South Wales"
include_queens_birthday = True
include_easter_saturday = True
include_easter_sunday = True
include_labour_day_october = True
include_boxing_day = True
ANZAC_SHIFT_DAYS = (SUN,)
@iso_register('AU-NT')
class NorthernTerritory(Australia):
"Northern Territory"
include_easter_saturday = True
include_queens_birthday = True
include_boxing_day = True
ANZAC_SHIFT_DAYS = (SUN,)
def get_may_day(self, year):
return (
NorthernTerritory.get_nth_weekday_in_month(year, 5, MON),
"May Day"
)
def get_picnic_day(self, year):
return (
NorthernTerritory.get_nth_weekday_in_month(year, 8, MON),
"Picnic Day"
)
def get_variable_days(self, year):
days = super().get_variable_days(year)
days.extend([
self.get_may_day(year),
self.get_picnic_day(year),
])
return days
@iso_register('AU-QLD')
class Queensland(Australia):
"Queensland"
include_easter_saturday = True
include_queens_birthday = True
include_boxing_day = True
ANZAC_SHIFT_DAYS = (SUN,)
def get_labour_day_may(self, year):
return (
Queensland.get_nth_weekday_in_month(year, 5, MON),
"Labour Day"
)
def get_variable_days(self, year):
days = super().get_variable_days(year)
days.append(self.get_labour_day_may(year))
return days
@iso_register('AU-SA')
class SouthAustralia(Australia):
"South Australia"
include_easter_saturday = True
include_queens_birthday = True
include_labour_day_october = True
ANZAC_SHIFT_DAYS = (SUN,)
def get_adelaides_cup(self, year):
return (
SouthAustralia.get_nth_weekday_in_month(year, 3, MON, 2),
"Adelaide's cup"
)
def get_proclamation_day(self, year):
return date(year, 12, 26), "Proclamation Day"
def get_variable_days(self, year):
days = super().get_variable_days(year)
days.extend([
self.get_adelaides_cup(year),
self.get_proclamation_day(year),
])
return days
@iso_register('AU-TAS')
class Tasmania(Australia):
"Tasmania"
include_queens_birthday = True
include_boxing_day = True
shift_anzac_day = False
@property
def has_recreation_day(self):
return True
def get_eight_hours_day(self, year):
return (
Tasmania.get_nth_weekday_in_month(year, 3, MON, 2),
"Eight hours Day"
)
def get_recreation_day(self, year):
return (
Tasmania.get_nth_weekday_in_month(year, 11, MON),
"Recreation Day"
)
def get_variable_days(self, year):
days = super().get_variable_days(year)
days.append(self.get_eight_hours_day(year))
if self.has_recreation_day:
days.append(self.get_recreation_day(year))
return days
class Hobart(Tasmania):
"Hobart"
@property
def has_recreation_day(self):
return False
def get_hobart(self, year):
return (
Hobart.get_nth_weekday_in_month(year, 2, MON, 2),
"Royal Hobart Regatta"
)
def get_variable_days(self, year):
days = super().get_variable_days(year)
days.append(self.get_hobart(year))
return days
@iso_register('AU-VIC')
class Victoria(Australia):
"Victoria"
include_easter_saturday = True
include_queens_birthday = True
include_boxing_day = True
shift_anzac_day = False
def get_labours_day_in_march(self, year):
return (
Victoria.get_nth_weekday_in_month(year, 3, MON, 2),
"Labour Day"
)
def get_melbourne_cup(self, year):
return (
Victoria.get_nth_weekday_in_month(year, 11, TUE),
"Melbourne Cup"
)
def get_variable_days(self, year):
days = super().get_variable_days(year)
days.append(self.get_labours_day_in_march(year))
days.append(self.get_melbourne_cup(year))
return days
@iso_register('AU-WA')
class WesternAustralia(Australia):
"Western Australia"
include_boxing_day = True
def get_labours_day_in_march(self, year):
return (
WesternAustralia.get_nth_weekday_in_month(year, 3, MON),
"Labour Day"
)
def get_western_australia_day(self, year):
return (
WesternAustralia.get_nth_weekday_in_month(year, 6, MON),
"Western Australia Day"
)
def get_variable_days(self, year):
# It is not possible to surely compute Queen's Birthday holiday in
# The western Australia territory, since it's based on the Governor
# Decision (it is typically the last Monday of September or the first
# Monday of October)
days = super().get_variable_days(year)
days.append(self.get_labours_day_in_march(year))
days.append(self.get_western_australia_day(year))
return days
|
|
from director import callbacks
from director.fieldcontainer import FieldContainer
from director.timercallback import TimerCallback
import re
import numpy as np
from collections import OrderedDict
def cleanPropertyName(s):
"""
Generate a valid python property name by replacing all non-alphanumeric characters with underscores and adding an initial underscore if the first character is a digit
"""
return re.sub(r'\W|^(?=\d)','_',s).lower() # \W matches non-alphanumeric, ^(?=\d) matches the first position if followed by a digit
class PropertyAttributes(FieldContainer):
def __init__(self, **kwargs):
self._add_fields(
decimals = 5,
minimum = -1e4,
maximum = 1e4,
singleStep = 1,
hidden = False,
enumNames = None,
readOnly = False,
)
self._set_fields(**kwargs)
from PythonQt import QtGui
def fromQColor(propertyName, propertyValue):
if isinstance(propertyValue, QtGui.QColor):
return [propertyValue.red()/255.0, propertyValue.green()/255.0, propertyValue.blue()/255.0]
else:
return propertyValue
def toQProperty(propertyName, propertyValue):
if 'color' in propertyName.lower() and isinstance(propertyValue, (list, tuple)) and len(propertyValue) == 3:
return QtGui.QColor(propertyValue[0]*255.0, propertyValue[1]*255.0, propertyValue[2]*255.0)
elif isinstance(propertyValue, np.float):
return float(propertyValue)
elif isinstance(propertyValue, (list, tuple, np.ndarray)) and len(propertyValue) and isinstance(propertyValue[0], np.float):
return [float(x) for x in propertyValue]
else:
return propertyValue
class PropertySet(object):
PROPERTY_CHANGED_SIGNAL = 'PROPERTY_CHANGED_SIGNAL'
PROPERTY_ADDED_SIGNAL = 'PROPERTY_ADDED_SIGNAL'
PROPERTY_ATTRIBUTE_CHANGED_SIGNAL = 'PROPERTY_ATTRIBUTE_CHANGED_SIGNAL'
def __getstate__(self):
d = dict(_properties=self._properties, _attributes=self._attributes)
return d
def __setstate__(self, state):
self.__init__()
attrs = state['_attributes']
for propName, propValue in state['_properties'].iteritems():
self.addProperty(propName, propValue, attributes=attrs.get(propName))
def __init__(self):
self.callbacks = callbacks.CallbackRegistry([self.PROPERTY_CHANGED_SIGNAL,
self.PROPERTY_ADDED_SIGNAL,
self.PROPERTY_ATTRIBUTE_CHANGED_SIGNAL])
self._properties = OrderedDict()
self._attributes = {}
self._alternateNames = {}
def propertyNames(self):
return self._properties.keys()
def hasProperty(self, propertyName):
return propertyName in self._properties
def connectPropertyChanged(self, func):
return self.callbacks.connect(self.PROPERTY_CHANGED_SIGNAL, func)
def disconnectPropertyChanged(self, callbackId):
self.callbacks.disconnect(callbackId)
def connectPropertyAdded(self, func):
return self.callbacks.connect(self.PROPERTY_ADDED_SIGNAL, func)
def disconnectPropertyAdded(self, callbackId):
self.callbacks.disconnect(callbackId)
def connectPropertyAttributeChanged(self, func):
return self.callbacks.connect(self.PROPERTY_ATTRIBUTE_CHANGED_SIGNAL, func)
def disconnectPropertyAttributeChanged(self, callbackId):
self.callbacks.disconnect(callbackId)
def getProperty(self, propertyName):
return self._properties[propertyName]
def getPropertyEnumValue(self, propertyName):
attributes = self._attributes[propertyName]
return attributes.enumNames[self._properties[propertyName]]
def removeProperty(self, propertyName):
del self._properties[propertyName]
del self._attributes[propertyName]
del self._alternateNames[cleanPropertyName(propertyName)]
def addProperty(self, propertyName, propertyValue, attributes=None):
alternateName = cleanPropertyName(propertyName)
if propertyName not in self._properties and alternateName in self._alternateNames:
raise ValueError('Adding this property would conflict with a different existing property with alternate name {:s}'.format(alternateName))
propertyValue = fromQColor(propertyName, propertyValue)
self._properties[propertyName] = propertyValue
self._attributes[propertyName] = attributes or PropertyAttributes()
self._alternateNames[alternateName] = propertyName
self.callbacks.process(self.PROPERTY_ADDED_SIGNAL, self, propertyName)
def setPropertyIndex(self, propertyName, newIndex):
assert self.hasProperty(propertyName)
currentIndex = self._properties.keys().index(propertyName)
inds = range(len(self._properties))
inds.remove(currentIndex)
inds.insert(newIndex, currentIndex)
items = self._properties.items()
self._properties = OrderedDict([items[i] for i in inds])
def setProperty(self, propertyName, propertyValue):
previousValue = self._properties[propertyName]
propertyValue = fromQColor(propertyName, propertyValue)
if propertyValue == previousValue:
return
names = self.getPropertyAttribute(propertyName, 'enumNames')
if names and type(propertyValue) != int:
propertyValue = names.index(propertyValue)
self._properties[propertyName] = propertyValue
self.callbacks.process(self.PROPERTY_CHANGED_SIGNAL, self, propertyName)
def getPropertyAttribute(self, propertyName, propertyAttribute):
attributes = self._attributes[propertyName]
return attributes[propertyAttribute]
def setPropertyAttribute(self, propertyName, propertyAttribute, value):
attributes = self._attributes[propertyName]
if attributes[propertyAttribute] != value:
attributes[propertyAttribute] = value
self.callbacks.process(self.PROPERTY_ATTRIBUTE_CHANGED_SIGNAL, self, propertyName, propertyAttribute)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError as exc:
alternateNames = object.__getattribute__(self, '_alternateNames')
if name in alternateNames:
return object.__getattribute__(self, 'getProperty')(alternateNames[name])
else:
raise exc
class PropertyPanelHelper(object):
@staticmethod
def addPropertiesToPanel(properties, panel, propertyNamesToAdd = None):
for propertyName in properties.propertyNames():
value = properties.getProperty(propertyName)
attributes = properties._attributes[propertyName]
if value is not None and not attributes.hidden:
addThisProperty = True
if (propertyNamesToAdd is not None):
if (propertyName not in propertyNamesToAdd):
addThisProperty = False
if addThisProperty:
PropertyPanelHelper._addProperty(panel, propertyName, attributes, value)
@staticmethod
def onPropertyValueChanged(panel, properties, propertyName):
prop = panel.getProperty(propertyName)
if prop is not None:
propertyValue = properties.getProperty(propertyName)
propertyValue = toQProperty(propertyName, propertyValue)
if isinstance(propertyValue, list):
for i, subValue in enumerate(propertyValue):
panel.getSubProperty(prop, i).setValue(subValue)
groupName = PropertyPanelHelper.getPropertyGroupName(propertyName, propertyValue)
prop.setPropertyName(groupName)
else:
prop.setValue(propertyValue)
@staticmethod
def setPropertyFromPanel(prop, propertiesPanel, propertySet):
if prop.isSubProperty():
if not propertiesPanel.getParentProperty(prop):
return
propertyIndex = propertiesPanel.getSubPropertyIndex(prop)
propertyName = prop.propertyName()
propertyName = propertyName[:propertyName.index('[')]
propertyValue = propertySet.getProperty(propertyName)
propertyValue = list(propertyValue)
propertyValue[propertyIndex] = prop.value()
propertySet.setProperty(propertyName, propertyValue)
groupName = PropertyPanelHelper.getPropertyGroupName(propertyName, propertyValue)
propertiesPanel.getParentProperty(prop).setPropertyName(groupName)
else:
propertyName = prop.propertyName()
propertyValue = prop.value()
propertyValue = fromQColor(propertyName, propertyValue)
propertySet.setProperty(propertyName, propertyValue)
@staticmethod
def _setPropertyAttributes(prop, attributes):
prop.setAttribute('decimals', attributes.decimals)
prop.setAttribute('minimum', attributes.minimum)
prop.setAttribute('maximum', attributes.maximum)
prop.setAttribute('singleStep', attributes.singleStep)
if attributes.enumNames:
prop.setAttribute('enumNames', attributes.enumNames)
@staticmethod
def getPropertyGroupName(name, value):
return '%s [%s]' % (name, ', '.join(['%.2f' % v if isinstance(v, float) else str(v) for v in value]))
@staticmethod
def _addProperty(panel, name, attributes, value):
value = toQProperty(name, value)
if isinstance(value, list):
groupName = PropertyPanelHelper.getPropertyGroupName(name, value)
groupProp = panel.addGroup(name, groupName)
for v in value:
p = panel.addSubProperty(name, v, groupProp)
PropertyPanelHelper._setPropertyAttributes(p, attributes)
return groupProp
elif attributes.enumNames:
p = panel.addEnumProperty(name, value)
PropertyPanelHelper._setPropertyAttributes(p, attributes)
p.setValue(value)
return p
else:
p = panel.addProperty(name, value)
PropertyPanelHelper._setPropertyAttributes(p, attributes)
return p
class PropertyPanelConnector(object):
def __init__(self, propertySet, propertiesPanel, propertyNamesToAdd=None):
self.propertySet = propertySet
self.propertyNamesToAdd = propertyNamesToAdd
self.propertiesPanel = propertiesPanel
self.connections = []
self.connections.append(self.propertySet.connectPropertyAdded(self._onPropertyAdded))
self.connections.append(self.propertySet.connectPropertyChanged(self._onPropertyChanged))
self.connections.append(self.propertySet.connectPropertyAttributeChanged(self._onPropertyAttributeChanged))
self.propertiesPanel.connect('propertyValueChanged(QtVariantProperty*)', self._onPanelPropertyChanged)
self.timer = TimerCallback()
self.timer.callback = self._rebuildNow
self._blockSignals = True
PropertyPanelHelper.addPropertiesToPanel(self.propertySet, self.propertiesPanel, self.propertyNamesToAdd)
self._blockSignals = False
def cleanup(self):
self.timer.callback = None
self.propertiesPanel.disconnect('propertyValueChanged(QtVariantProperty*)', self._onPanelPropertyChanged)
for connection in self.connections:
self.propertySet.callbacks.disconnect(connection)
def _rebuild(self):
if not self.timer.singleShotTimer.isActive():
self.timer.singleShot(0)
def _rebuildNow(self):
self._blockSignals = True
self.propertiesPanel.clear()
PropertyPanelHelper.addPropertiesToPanel(self.propertySet, self.propertiesPanel)
self._blockSignals = False
def _onPropertyAdded(self, propertySet, propertyName):
self._rebuild()
def _onPropertyAttributeChanged(self, propertySet, propertyName, propertyAttribute):
self._rebuild()
def _onPropertyChanged(self, propertySet, propertyName):
self._blockSignals = True
PropertyPanelHelper.onPropertyValueChanged(self.propertiesPanel, propertySet, propertyName)
self._blockSignals = False
def _onPanelPropertyChanged(self, panelProperty):
if not self._blockSignals:
PropertyPanelHelper.setPropertyFromPanel(panelProperty, self.propertiesPanel, self.propertySet)
|
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import operator
import re
import threading
import openerp
from openerp.osv import fields, osv
from openerp import api, tools
from openerp.http import request
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
MENU_ITEM_SEPARATOR = "/"
class ir_ui_menu(osv.osv):
_name = 'ir.ui.menu'
def __init__(self, *args, **kwargs):
super(ir_ui_menu, self).__init__(*args, **kwargs)
self.pool['ir.model.access'].register_cache_clearing_method(self._name, 'clear_caches')
@api.model
@tools.ormcache('frozenset(self.env.user.groups_id.ids)', 'debug')
def _visible_menu_ids(self, debug=False):
""" Return the ids of the menu items visible to the user. """
# retrieve all menus, and determine which ones are visible
context = {'ir.ui.menu.full_list': True}
menus = self.with_context(context).search([])
groups = self.env.user.groups_id if debug else self.env.user.groups_id - self.env.ref('base.group_no_one')
# first discard all menus with groups the user does not have
menus = menus.filtered(
lambda menu: not menu.groups_id or menu.groups_id & groups)
# take apart menus that have an action
action_menus = menus.filtered(lambda m: m.action and m.action.exists())
folder_menus = menus - action_menus
visible = self.browse()
# process action menus, check whether their action is allowed
access = self.env['ir.model.access']
model_fname = {
'ir.actions.act_window': 'res_model',
'ir.actions.report.xml': 'model',
'ir.actions.server': 'model_id',
}
for menu in action_menus:
fname = model_fname.get(menu.action._name)
if not fname or not menu.action[fname] or \
access.check(menu.action[fname], 'read', False):
# make menu visible, and its folder ancestors, too
visible += menu
menu = menu.parent_id
while menu and menu in folder_menus and menu not in visible:
visible += menu
menu = menu.parent_id
return set(visible.ids)
@api.multi
@api.returns('self')
def _filter_visible_menus(self):
""" Filter `self` to only keep the menu items that should be visible in
the menu hierarchy of the current user.
Uses a cache for speeding up the computation.
"""
visible_ids = self._visible_menu_ids(request.debug if request else False)
return self.filtered(lambda menu: menu.id in visible_ids)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
ids = super(ir_ui_menu, self).search(cr, uid, args, offset=0,
limit=None, order=order, context=context, count=False)
if not ids:
if count:
return 0
return []
# menu filtering is done only on main menu tree, not other menu lists
if context.get('ir.ui.menu.full_list'):
result = ids
else:
result = self._filter_visible_menus(cr, uid, ids, context=context)
if offset:
result = result[long(offset):]
if limit:
result = result[:long(limit)]
if count:
return len(result)
return result
def name_get(self, cr, uid, ids, context=None):
res = []
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context is None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id:
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + MENU_ITEM_SEPARATOR
else:
parent_path = ''
return parent_path + elmt.name
def create(self, cr, uid, values, context=None):
self.clear_caches()
return super(ir_ui_menu, self).create(cr, uid, values, context=context)
def write(self, cr, uid, ids, values, context=None):
self.clear_caches()
return super(ir_ui_menu, self).write(cr, uid, ids, values, context=context)
def unlink(self, cr, uid, ids, context=None):
# Detach children and promote them to top-level, because it would be unwise to
# cascade-delete submenus blindly. We also can't use ondelete=set null because
# that is not supported when _parent_store is used (would silently corrupt it).
# TODO: ideally we should move them under a generic "Orphans" menu somewhere?
if isinstance(ids, (int, long)):
ids = [ids]
local_context = dict(context or {})
local_context['ir.ui.menu.full_list'] = True
direct_children_ids = self.search(cr, uid, [('parent_id', 'in', ids)], context=local_context)
if direct_children_ids:
self.write(cr, uid, direct_children_ids, {'parent_id': False})
result = super(ir_ui_menu, self).unlink(cr, uid, ids, context=context)
self.clear_caches()
return result
def copy(self, cr, uid, id, default=None, context=None):
res = super(ir_ui_menu, self).copy(cr, uid, id, default=default, context=context)
datas=self.read(cr,uid,[res],['name'])[0]
rex=re.compile('\([0-9]+\)')
concat=rex.findall(datas['name'])
if concat:
next_num=int(concat[0])+1
datas['name']=rex.sub(('(%d)'%next_num),datas['name'])
else:
datas['name'] += '(1)'
self.write(cr,uid,[res],{'name':datas['name']})
return res
def read_image(self, path):
if not path:
return False
path_info = path.split(',')
icon_path = openerp.modules.get_module_resource(path_info[0],path_info[1])
icon_image = False
if icon_path:
try:
icon_file = tools.file_open(icon_path,'rb')
icon_image = base64.encodestring(icon_file.read())
finally:
icon_file.close()
return icon_image
def get_needaction_data(self, cr, uid, ids, context=None):
""" Return for each menu entry of ids :
- if it uses the needaction mechanism (needaction_enabled)
- the needaction counter of the related action, taking into account
the action domain
"""
if context is None:
context = {}
res = {}
menu_ids = set()
for menu in self.browse(cr, uid, ids, context=context):
menu_ids.add(menu.id)
ctx = None
if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.context:
try:
# use magical UnquoteEvalContext to ignore undefined client-side variables such as `active_id`
eval_ctx = tools.UnquoteEvalContext(**context)
ctx = eval(menu.action.context, locals_dict=eval_ctx, nocopy=True) or None
except Exception:
# if the eval still fails for some reason, we'll simply skip this menu
pass
menu_ref = ctx and ctx.get('needaction_menu_ref')
if menu_ref:
if not isinstance(menu_ref, list):
menu_ref = [menu_ref]
model_data_obj = self.pool.get('ir.model.data')
for menu_data in menu_ref:
try:
model, id = model_data_obj.get_object_reference(cr, uid, menu_data.split('.')[0], menu_data.split('.')[1])
if (model == 'ir.ui.menu'):
menu_ids.add(id)
except Exception:
pass
menu_ids = list(menu_ids)
for menu in self.browse(cr, uid, menu_ids, context=context):
res[menu.id] = {
'needaction_enabled': False,
'needaction_counter': False,
}
if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.res_model:
if menu.action.res_model in self.pool:
obj = self.pool[menu.action.res_model]
if obj._needaction:
if menu.action.type == 'ir.actions.act_window':
eval_context = self.pool['ir.actions.act_window']._get_eval_context(cr, uid, context=context)
dom = menu.action.domain and eval(menu.action.domain, eval_context) or []
else:
dom = eval(menu.action.params_store or '{}', {'uid': uid}).get('domain')
res[menu.id]['needaction_enabled'] = obj._needaction
res[menu.id]['needaction_counter'] = obj._needaction_count(cr, uid, dom, context=context)
return res
def get_user_roots(self, cr, uid, context=None):
""" Return all root menu ids visible for the user.
:return: the root menu ids
:rtype: list(int)
"""
menu_domain = [('parent_id', '=', False)]
return self.search(cr, uid, menu_domain, context=context)
@api.cr_uid_context
@tools.ormcache_context('uid', keys=('lang',))
def load_menus_root(self, cr, uid, context=None):
fields = ['name', 'sequence', 'parent_id', 'action', 'web_icon_data']
menu_root_ids = self.get_user_roots(cr, uid, context=context)
menu_roots = self.read(cr, uid, menu_root_ids, fields, context=context) if menu_root_ids else []
return {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots,
'all_menu_ids': menu_root_ids,
}
@api.cr_uid_context
@tools.ormcache_context('uid', 'debug', keys=('lang',))
def load_menus(self, cr, uid, debug, context=None):
""" Loads all menu items (all applications and their sub-menus).
:return: the menu root
:rtype: dict('children': menu_nodes)
"""
fields = ['name', 'sequence', 'parent_id', 'action', 'web_icon_data']
menu_root_ids = self.get_user_roots(cr, uid, context=context)
menu_roots = self.read(cr, uid, menu_root_ids, fields, context=context) if menu_root_ids else []
menu_root = {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots,
'all_menu_ids': menu_root_ids,
}
if not menu_roots:
return menu_root
# menus are loaded fully unlike a regular tree view, cause there are a
# limited number of items (752 when all 6.1 addons are installed)
menu_ids = self.search(cr, uid, [('id', 'child_of', menu_root_ids)], 0, False, False, context=context)
menu_items = self.read(cr, uid, menu_ids, fields, context=context)
# adds roots at the end of the sequence, so that they will overwrite
# equivalent menu items from full menu read when put into id:item
# mapping, resulting in children being correctly set on the roots.
menu_items.extend(menu_roots)
menu_root['all_menu_ids'] = menu_ids # includes menu_root_ids!
# make a tree using parent_id
menu_items_map = dict(
(menu_item["id"], menu_item) for menu_item in menu_items)
for menu_item in menu_items:
if menu_item['parent_id']:
parent = menu_item['parent_id'][0]
else:
parent = False
if parent in menu_items_map:
menu_items_map[parent].setdefault(
'children', []).append(menu_item)
# sort by sequence a tree using parent_id
for menu_item in menu_items:
menu_item.setdefault('children', []).sort(
key=operator.itemgetter('sequence'))
return menu_root
_columns = {
'name': fields.char('Menu', required=True, translate=True),
'sequence': fields.integer('Sequence'),
'child_id': fields.one2many('ir.ui.menu', 'parent_id', 'Child IDs'),
'parent_id': fields.many2one('ir.ui.menu', 'Parent Menu', select=True, ondelete="restrict"),
'parent_left': fields.integer('Parent Left', select=True),
'parent_right': fields.integer('Parent Right', select=True),
'groups_id': fields.many2many('res.groups', 'ir_ui_menu_group_rel',
'menu_id', 'gid', 'Groups', help="If you have groups, the visibility of this menu will be based on these groups. "\
"If this field is empty, Odoo will compute visibility based on the related object's read access."),
'complete_name': fields.function(_get_full_name, string='Full Path', type='char'),
'web_icon': fields.char('Web Icon File'),
'action': fields.reference('Action', selection=[
('ir.actions.report.xml', 'ir.actions.report.xml'),
('ir.actions.act_window', 'ir.actions.act_window'),
('ir.actions.act_url', 'ir.actions.act_url'),
('ir.actions.server', 'ir.actions.server'),
('ir.actions.client', 'ir.actions.client'),
]),
}
web_icon_data = openerp.fields.Binary('Web Icon Image',
compute="_compute_web_icon", store=True, attachment=True)
@api.depends('web_icon')
def _compute_web_icon(self):
for menu in self:
menu.web_icon_data = self.read_image(menu.web_icon)
_constraints = [
(osv.osv._check_recursion, 'Error ! You can not create recursive Menu.', ['parent_id'])
]
_defaults = {
'sequence': 10,
}
_order = "sequence,id"
_parent_store = True
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from nova.compute import flavors
from nova.compute import power_state
from nova.conductor.tasks import live_migrate
from nova import db
from nova import exception
from nova import test
class LiveMigrationTaskTestCase(test.TestCase):
def setUp(self):
super(LiveMigrationTaskTestCase, self).setUp()
self.context = "context"
self.instance_host = "host"
self.instance_uuid = "uuid"
self.instance_image = "image_ref"
self.instance = {
"host": self.instance_host,
"uuid": self.instance_uuid,
"power_state": power_state.RUNNING,
"memory_mb": 512,
"image_ref": self.instance_image}
self.destination = "destination"
self.block_migration = "bm"
self.disk_over_commit = "doc"
self.select_hosts_callback = self._select_hosts_callback
self._generate_task()
def _generate_task(self):
self.task = live_migrate.LiveMigrationTask(self.context,
self.instance, self.destination, self.block_migration,
self.disk_over_commit, self.select_hosts_callback)
def _select_hosts_callback(self, *args):
return ["host1"]
def test_execute_with_destination(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task, '_check_requested_destination')
self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
self.task._check_host_is_up(self.instance_host)
self.task._check_requested_destination()
self.task.compute_rpcapi.live_migration(self.context,
host=self.instance_host,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migrate_data=None).AndReturn("bob")
self.mox.ReplayAll()
self.assertEqual("bob", self.task.execute())
def test_execute_without_destination(self):
self.destination = None
self._generate_task()
self.assertEqual(None, self.task.destination)
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task, '_find_destination')
self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
self.task._check_host_is_up(self.instance_host)
self.task._find_destination().AndReturn("found_host")
self.task.compute_rpcapi.live_migration(self.context,
host=self.instance_host,
instance=self.instance,
dest="found_host",
block_migration=self.block_migration,
migrate_data=None).AndReturn("bob")
self.mox.ReplayAll()
self.assertEqual("bob", self.task.execute())
def test_check_instance_is_running_passes(self):
self.task._check_instance_is_running()
def test_check_instance_is_running_fails_when_shutdown(self):
self.task.instance['power_state'] = power_state.SHUTDOWN
self.assertRaises(exception.InstanceNotRunning,
self.task._check_instance_is_running)
def test_check_instance_host_is_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
db.service_get_by_compute_host(self.context,
"host").AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(True)
self.mox.ReplayAll()
self.task._check_host_is_up("host")
def test_check_instance_host_is_up_fails_if_not_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
db.service_get_by_compute_host(self.context,
"host").AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
def test_check_instance_host_is_up_fails_if_not_found(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
db.service_get_by_compute_host(self.context,
"host").AndRaise(exception.NotFound)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
def test_check_requested_destination(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
self.mox.StubOutWithMock(self.task.compute_rpcapi,
'check_can_live_migrate_destination')
db.service_get_by_compute_host(self.context,
self.destination).AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(True)
hypervisor_details = {
"hypervisor_type": "a",
"hypervisor_version": 6.1,
"free_ram_mb": 513
}
self.task._get_compute_info(self.destination)\
.AndReturn(hypervisor_details)
self.task._get_compute_info(self.instance_host)\
.AndReturn(hypervisor_details)
self.task._get_compute_info(self.destination)\
.AndReturn(hypervisor_details)
self.task.compute_rpcapi.check_can_live_migrate_destination(
self.context, self.instance, self.destination,
self.block_migration, self.disk_over_commit).AndReturn(
"migrate_data")
self.mox.ReplayAll()
self.task._check_requested_destination()
self.assertEqual("migrate_data", self.task.migrate_data)
def test_check_requested_destination_fails_with_same_dest(self):
self.task.destination = "same"
self.task.source = "same"
self.assertRaises(exception.UnableToMigrateToSelf,
self.task._check_requested_destination)
def test_check_requested_destination_fails_when_destination_is_up(self):
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
db.service_get_by_compute_host(self.context,
self.destination).AndRaise(exception.NotFound)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_not_enough_memory(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.task._check_host_is_up(self.destination)
db.service_get_by_compute_host(self.context,
self.destination).AndReturn({
"compute_node": [{"free_ram_mb": 511}]
})
self.mox.ReplayAll()
self.assertRaises(exception.MigrationPreCheckError,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_hypervisor_diff(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task,
'_check_destination_has_enough_memory')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "b"
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a"
})
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_hypervisor_too_old(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task,
'_check_destination_has_enough_memory')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 7
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 6
})
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.task._check_requested_destination)
def test_find_destination_works(self):
self.mox.StubOutWithMock(self.task.image_service, 'show')
self.mox.StubOutWithMock(flavors, 'extract_flavor')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
self.task.image_service.show(self.context,
self.instance_image).AndReturn("image")
flavors.extract_flavor(self.instance).AndReturn("inst_type")
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def test_find_destination_no_image_works(self):
self.instance['image_ref'] = ''
self.mox.StubOutWithMock(flavors, 'extract_flavor')
self.mox.StubOutWithMock(self.task, 'select_hosts_callback')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
flavors.extract_flavor(self.instance).AndReturn("inst_type")
# request_spec with no image set
request_spec = {'instance_properties': self.instance,
'instance_type': "inst_type",
'instance_uuids': [self.instance['uuid']]}
self.task.select_hosts_callback(self.context,
request_spec, mox.IgnoreArg()).AndReturn(["host1"])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def _test_find_destination_retry_hypervisor_raises(self, error):
self.mox.StubOutWithMock(self.task.image_service, 'show')
self.mox.StubOutWithMock(flavors, 'extract_flavor')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
self.task.image_service.show(self.context,
self.instance_image).AndReturn("image")
flavors.extract_flavor(self.instance).AndReturn("inst_type")
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(error)
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def test_find_destination_retry_with_old_hypervisor(self):
self._test_find_destination_retry_hypervisor_raises(
exception.DestinationHypervisorTooOld)
def test_find_destination_retry_with_invalid_hypervisor_type(self):
self._test_find_destination_retry_hypervisor_raises(
exception.InvalidHypervisorType)
def test_find_destination_retry_with_invalid_livem_checks(self):
self.mox.StubOutWithMock(self.task.image_service, 'show')
self.mox.StubOutWithMock(flavors, 'extract_flavor')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
self.task.image_service.show(self.context,
self.instance_image).AndReturn("image")
flavors.extract_flavor(self.instance).AndReturn("inst_type")
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.Invalid)
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def test_find_destination_retry_exceeds_max(self):
self.flags(scheduler_max_attempts=1)
self.mox.StubOutWithMock(self.task.image_service, 'show')
self.mox.StubOutWithMock(flavors, 'extract_flavor')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
self.task.image_service.show(self.context,
self.instance_image).AndReturn("image")
flavors.extract_flavor(self.instance).AndReturn("inst_type")
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(exception.DestinationHypervisorTooOld)
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost, self.task._find_destination)
def test_not_implemented_rollback(self):
self.assertRaises(NotImplementedError, self.task.rollback)
|
|
import datetime
import os
import tba_config
from google.appengine.api import memcache
from google.appengine.ext import ndb
from google.appengine.ext.webapp import template
from base_controller import CacheableHandler
from consts.district_type import DistrictType
from helpers.match_helper import MatchHelper
from helpers.award_helper import AwardHelper
from helpers.team_helper import TeamHelper
from helpers.event_helper import EventHelper
from models.event import Event
class EventList(CacheableHandler):
"""
List all Events.
"""
VALID_YEARS = list(reversed(range(1992, tba_config.MAX_YEAR + 1)))
CACHE_VERSION = 4
CACHE_KEY_FORMAT = "event_list_{}_{}" # (year, explicit_year)
def __init__(self, *args, **kw):
super(EventList, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def get(self, year=None, explicit_year=False):
if year == '':
return self.redirect("/events")
if year:
if not year.isdigit():
self.abort(404)
year = int(year)
if year not in self.VALID_YEARS:
self.abort(404)
explicit_year = True
else:
year = datetime.datetime.now().year
explicit_year = False
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(year, explicit_year)
super(EventList, self).get(year, explicit_year)
def _render(self, year=None, explicit_year=False):
event_keys = Event.query(Event.year == year).fetch(1000, keys_only=True)
events = ndb.get_multi(event_keys)
EventHelper.sort_events(events)
week_events = EventHelper.groupByWeek(events)
district_enums = set()
for event in events:
if event.event_district_enum is not None and event.event_district_enum != DistrictType.NO_DISTRICT:
district_enums.add(event.event_district_enum)
districts = [] # a tuple of (district abbrev, district name)
for district_enum in district_enums:
districts.append((DistrictType.type_abbrevs[district_enum],
DistrictType.type_names[district_enum]))
districts = sorted(districts, key=lambda d: d[1])
self.template_values.update({
"events": events,
"explicit_year": explicit_year,
"selected_year": year,
"valid_years": self.VALID_YEARS,
"week_events": week_events,
"districts": districts,
})
path = os.path.join(os.path.dirname(__file__), '../templates/event_list.html')
return template.render(path, self.template_values)
def memcacheFlush(self):
year = datetime.datetime.now().year
keys = [self.CACHE_KEY_FORMAT.format(year, True), self.CACHE_KEY_FORMAT.format(year, False)]
memcache.delete_multi(keys)
return keys
class EventDetail(CacheableHandler):
"""
Show an Event.
event_code like "2010ct"
"""
LONG_CACHE_EXPIRATION = 60 * 60 * 24
SHORT_CACHE_EXPIRATION = 60 * 5
CACHE_VERSION = 4
CACHE_KEY_FORMAT = "event_detail_{}" # (event_key)
def __init__(self, *args, **kw):
super(EventDetail, self).__init__(*args, **kw)
self._cache_expiration = self.LONG_CACHE_EXPIRATION
def get(self, event_key):
if not event_key:
return self.redirect("/events")
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key)
super(EventDetail, self).get(event_key)
def _render(self, event_key):
event = Event.get_by_id(event_key)
if not event:
self.abort(404)
event.prepAwardsMatchesTeams()
awards = AwardHelper.organizeAwards(event.awards)
if event.within_a_day:
cleaned_matches = event.matches
else:
cleaned_matches = MatchHelper.deleteInvalidMatches(event.matches)
matches = MatchHelper.organizeMatches(cleaned_matches)
teams = TeamHelper.sortTeams(event.teams)
num_teams = len(teams)
middle_value = num_teams / 2
if num_teams % 2 != 0:
middle_value += 1
teams_a, teams_b = teams[:middle_value], teams[middle_value:]
oprs = [i for i in event.matchstats['oprs'].items()] if (event.matchstats is not None and 'oprs' in event.matchstats) else []
oprs = sorted(oprs, key=lambda t: t[1], reverse=True) # sort by OPR
oprs = oprs[:15] # get the top 15 OPRs
if event.now:
matches_recent = MatchHelper.recentMatches(cleaned_matches)
matches_upcoming = MatchHelper.upcomingMatches(cleaned_matches)
else:
matches_recent = None
matches_upcoming = None
bracket_table = MatchHelper.generateBracket(matches, event.alliance_selections)
if event.year == 2015:
playoff_advancement = MatchHelper.generatePlayoffAdvancement2015(matches, event.alliance_selections)
for comp_level in ['qf', 'sf']:
if comp_level in bracket_table:
del bracket_table[comp_level]
else:
playoff_advancement = None
district_points_sorted = None
if event.district_points:
district_points_sorted = sorted(event.district_points['points'].items(), key=lambda (team, points): -points['total'])
self.template_values.update({
"event": event,
"matches": matches,
"matches_recent": matches_recent,
"matches_upcoming": matches_upcoming,
"awards": awards,
"teams_a": teams_a,
"teams_b": teams_b,
"num_teams": num_teams,
"oprs": oprs,
"bracket_table": bracket_table,
"playoff_advancement": playoff_advancement,
"district_points_sorted": district_points_sorted,
})
if event.within_a_day:
self._cache_expiration = self.SHORT_CACHE_EXPIRATION
path = os.path.join(os.path.dirname(__file__), '../templates/event_details.html')
return template.render(path, self.template_values)
class EventRss(CacheableHandler):
"""
Generates a RSS feed for the matches in a event
"""
CACHE_VERSION = 2
CACHE_KEY_FORMAT = "event_rss_{}" # (event_key)
def __init__(self, *args, **kw):
super(EventRss, self).__init__(*args, **kw)
self._cache_expiration = 60 * 5
def get(self, event_key):
if not event_key:
return self.redirect("/events")
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(event_key)
super(EventRss, self).get(event_key)
def _render(self, event_key):
event = Event.get_by_id(event_key)
if not event:
self.abort(404)
matches = MatchHelper.organizeMatches(event.matches)
self.template_values.update({
"event": event,
"matches": matches,
"datetime": datetime.datetime.now()
})
path = os.path.join(os.path.dirname(__file__), '../templates/event_rss.xml')
self.response.headers['content-type'] = 'application/xml; charset=UTF-8'
return template.render(path, self.template_values)
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import sys
sys.path.append("..")
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
import paddle.nn as nn
import paddle
from paddle.nn.functional import interpolate
from test_nearest_interp_v2_op import nearest_neighbor_interp_np
paddle.enable_static()
class TestNearestInterpOp(OpTest):
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def setUp(self):
self.set_npu()
self.out_size = None
self.actual_shape = None
self.data_layout = 'NCHW'
self.init_test_case()
self.op_type = "nearest_interp_v2"
input_np = np.random.random(self.input_shape).astype("float32")
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
scale_w = scale_h = self.scale[0]
elif isinstance(self.scale, list) and len(self.scale) > 1:
scale_w = self.scale[1]
scale_h = self.scale[0]
output_h = int(in_h * scale_h)
output_w = int(in_w * scale_w)
else:
output_h = self.out_h
output_w = self.out_w
output_np = nearest_neighbor_interp_np(
input_np, output_h, output_w, scale_h, scale_w, self.out_size,
self.actual_shape, self.align_corners, self.data_layout)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'interp_method': self.interp_method,
'align_corners': self.align_corners,
'data_layout': self.data_layout
}
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0]]
self.attrs['scale'] = self.scale
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', in_place=True, max_relative_error=0.006)
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 3, 4, 5]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = False
class TestNearestNeighborInterpCase1(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.align_corners = False
class TestNearestNeighborInterpCase2(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.align_corners = False
class TestNearestNeighborInterpCase3(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.align_corners = False
class TestNearestNeighborInterpCase4(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.out_size = np.array([2, 2]).astype("int32")
self.align_corners = False
class TestNearestNeighborInterpCase5(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.out_size = np.array([11, 11]).astype("int32")
self.align_corners = False
class TestNearestNeighborInterpCase6(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([65, 129]).astype("int32")
self.align_corners = False
class TestNearestNeighborInterpSame(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 0.
self.align_corners = False
class TestNearestNeighborInterpActualShape(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = False
class TestNearestNeighborInterpScale1(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 7, 5]
self.out_h = 64
self.out_w = 32
self.scale = 2.
self.out_size = None
self.align_corners = False
class TestNearestNeighborInterpScale2(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 5, 7]
self.out_h = 64
self.out_w = 32
self.scale = 1.5
self.out_size = None
self.align_corners = False
class TestNearestNeighborInterpScale3(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 7, 5]
self.out_h = 64
self.out_w = 32
self.scale = [2.0, 3.0]
self.out_size = None
self.align_corners = False
class TestNearestInterpOp_attr_tensor(OpTest):
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def setUp(self):
self.set_npu()
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "nearest_interp_v2"
self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False
self.attrs = {
'interp_method': self.interp_method,
'align_corners': self.align_corners,
}
input_np = np.random.random(self.input_shape).astype("float32")
self.inputs = {'X': input_np}
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
scale_w = scale_h = self.scale[0]
elif isinstance(self.scale, list) and len(self.scale) > 1:
scale_w = self.scale[1]
scale_h = self.scale[0]
out_h = int(self.input_shape[2] * scale_h)
out_w = int(self.input_shape[3] * scale_w)
else:
out_h = self.out_h
out_w = self.out_w
if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size
elif self.out_size is not None:
size_tensor = []
for index, ele in enumerate(self.out_size):
size_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs['SizeTensor'] = size_tensor
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0]]
self.attrs['scale'] = self.scale
output_np = nearest_neighbor_interp_np(input_np, out_h, out_w, 0, 0,
self.out_size, self.actual_shape,
self.align_corners)
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 5, 4, 4]
self.out_h = 3
self.out_w = 3
self.scale = 0.
self.out_size = [3, 3]
self.align_corners = False
# out_size is a tensor list
class TestNearestInterp_attr_tensor_Case1(TestNearestInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.out_size = [8, 12]
self.align_corners = False
# out_size is a 1-D tensor
class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = False
self.shape_by_1Dtensor = True
# scale is a 1-D tensor
class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 2.0
self.out_size = None
self.align_corners = False
self.scale_by_1Dtensor = True
class TestNearestInterpOpAPI_dy(unittest.TestCase):
def test_case(self):
import paddle
if core.is_compiled_with_npu():
place = core.NPUPlace(0)
else:
place = core.CPUPlace()
with fluid.dygraph.guard(place):
input_data = np.random.random((2, 3, 6, 6)).astype("float32")
scale_np = np.array([2, 2]).astype("int64")
input_x = paddle.to_tensor(input_data)
scale = paddle.to_tensor(scale_np)
expect_res = nearest_neighbor_interp_np(
input_data, out_h=12, out_w=12, align_corners=False)
out = interpolate(
x=input_x,
scale_factor=scale,
mode="nearest",
align_corners=False)
self.assertTrue(np.allclose(out.numpy(), expect_res))
if __name__ == "__main__":
unittest.main()
|
|
import kernelwidget
import bufferswidget
import displaywidget
import newdialog
import stylesheet
import settings
import icons
import utils
import pygpuip
import os
from PySide import QtGui, QtCore
class MainWindow(QtGui.QMainWindow):
def __init__(self, path, settings = None):
super(MainWindow, self).__init__()
self.setPath(path)
self.setWindowIcon(icons.get("pug"))
self.setStyleSheet(stylesheet.data)
# Start in center of the screen, covering 80%
r = QtGui.QDesktopWidget().availableGeometry()
self.setGeometry(r.width()*0.10, r.height() * 0.10,
r.width() * 0.80, r.height() * 0.80)
self.toolbarIconSize = QtCore.QSize(32,32)
self.interactive = False
self.createMenuAndActions()
self.createDockWidgets()
# Central tab widget (main part of the gui)
self.kernelTabWidget = QtGui.QTabWidget(self)
self.setCentralWidget(self.kernelTabWidget)
self.reset()
self.settings = settings
if self.settings:
self.initFromSettings()
self.needsBuild = True
self.needsAllocate = True
self.needsImport = True
def setPath(self, path):
self.path = path
if path:
self.setWindowTitle("gpuip - %s" % self.path)
else:
self.setWindowTitle("gpuip")
def new(self):
dialog = newdialog.NewDialog(self)
if dialog.exec_():
self.setPath(None)
self.settings = dialog.getSettings()
self.initFromSettings()
self.log("Creating a new session")
def newFromExisting(self):
f = QtGui.QFileDialog.getOpenFileName(
None, "New from existing", QtCore.QDir.currentPath(), "ip (*ip)")
if f[0]:
s = settings.Settings()
s.read(f[0])
dialog = newdialog.NewDialog(self)
dialog.initFromSettings(s)
if dialog.exec_():
self.setPath(None)
self.settings = dialog.getSettings()
self.initFromSettings()
self.log("Creating new session from previous " + f[0])
def open(self):
f = QtGui.QFileDialog.getOpenFileName(
self, "Open", QtCore.QDir.currentPath(), "ip (*ip)")
if f[0]:
self.settings = settings.Settings()
self.settings.read(f[0])
self.initFromSettings()
self.setPath(f[0])
self.log("Opening " + f[0])
def save(self):
if self.path:
self.updateSettings()
self.settings.write(self.path)
self.log("Saved current session to %s" % self.path)
else:
self.saveAs()
def saveAs(self):
f = QtGui.QFileDialog.getSaveFileName(
self, "Save", QtCore.QDir.currentPath(), "ip (*ip)")
if f[0]:
self.setPath(f[0])
self.save()
def updateSettings(self):
# Get buffer input and outputs
for b in self.settings.buffers:
b.input = self.buffersWidget.getBufferInput(b.name)
b.output = self.buffersWidget.getBufferOutput(b.name)
# Get in buffers, out buffers and param values
for k in self.settings.kernels:
kw = self.kernelWidgets[k.name]
k.code = str(kw.codeEditor.toPlainText())
for inb in k.inBuffers:
inb.buffer = str(kw.inBuffers[inb.name].cbox.currentText())
for outb in k.outBuffers:
outb.buffer = str(kw.outBuffers[outb.name].cbox.currentText())
for p in k.params:
kernelParam = kw.params[p.name]
p.value = utils.safeEval(kernelParam.lineEdit.text())
def initFromSettings(self):
self.reset()
self.ip, self.buffers, self.kernels = self.settings.create()
self.displayWidget.setBuffers(self.buffers)
bufferNames = [b.name for b in self.settings.buffers]
for b in self.settings.buffers:
bufinputpath = b.input
if bufinputpath != "" and not os.path.isfile(bufinputpath):
bufinputpath = ""
self.logError("Buffer %s input path not valid: %s" %
(b.name, b.input))
self.buffersWidget.addBuffer(b.name, b.type,
b.channels, bufinputpath, b.output)
self.buffersWidget.layout.addStretch()
setBoilerPlate = True
self.kernelWidgets = {}
for k in self.settings.kernels:
w = kernelwidget.KernelWidget(self.kernelTabWidget,
self.interactiveProcess)
for inb in k.inBuffers:
w.addInBuffer(inb.name, inb.buffer, bufferNames)
for outb in k.outBuffers:
w.addOutBuffer(outb.name, outb.buffer, bufferNames)
for p in k.params:
w.addParameter(p.name, p.value, p.default, p.min, p.max, p.type)
self.kernelTabWidget.addTab(w, k.name)
self.kernelWidgets[k.name] = w
if k.code != "":
w.codeEditor.setText(k.code)
setBoilerPlate = False
if setBoilerPlate:
self.setBoilerplateCode(True)
def reset(self):
self.logBrowser.clear()
self.ip = None
self.bufferData = None
self.kernels = []
self.buffers = {}
self.kernelWidgets = {}
# Re-add GUI components for buffers widget
scroll = QtGui.QScrollArea(self)
scroll.setWidgetResizable(True)
self.buffersWidget = bufferswidget.BuffersWidget(scroll)
scroll.setWidget(self.buffersWidget)
self.dockBuffers.setWidget(scroll)
self.buffersWidget.show()
# Remove all kernel widgets from the kernel tab widget
for i in range(self.kernelTabWidget.count()):
self.kernelTabWidget.removeTab(0)
def build(self):
kernelNames = ""
for kernel in self.kernels:
kernelWidget = self.kernelWidgets[kernel.name]
kernel.code = str(kernelWidget.codeEditor.toPlainText())
kernelNames += kernel.name + ", "
self.log("Building kernels [ <i>%s</i> ] ..." % kernelNames[:-2])
clock = utils.StopWatch()
err = self.ip.Build()
if not err:
self.logSuccess("All kernels were built.", clock)
self.needsBuild = False
return True
else:
self.logError(err)
QtGui.QMessageBox.critical(self, self.tr("Kernel Build Error"),
self.tr(err), QtGui.QMessageBox.Ok,
QtGui.QMessageBox.Ok)
return False
def import_from_images(self):
self.updateSettings()
clock = utils.StopWatch()
for b in self.settings.buffers:
if b.input:
self.log("Importing data from image <i>%s</i> to <i>%s</i>." \
% (b.input, b.name))
err = self.buffers[b.name].Read(b.input, utils.getNumCores())
if err:
self.logError(err)
return False
self.logSuccess("Image data imported", clock)
self.displayWidget.refreshDisplay()
self.needsImport = False
return True
def allocate(self):
self.updateSettings()
clock = utils.StopWatch()
bufferNames = [b.name for b in self.settings.buffers]
self.log("Allocating buffers <i> %s </i> ..." % bufferNames)
width, height = utils.allocateBufferData(self.buffers)
self.ip.SetDimensions(width, height)
err = self.ip.Allocate()
clock = utils.StopWatch()
if err:
self.logError(err)
return False
else:
self.logSuccess("All buffers were allocated.", clock)
self.needsAllocate = False
clock = utils.StopWatch()
for b in self.settings.buffers:
if b.input:
err = self.ip.WriteBufferToGPU(self.buffers[b.name])
if err:
self.logError(err)
return False
self.logSuccess("Data transfered to GPU.", clock)
return True
def interactiveProcess(self):
if self.interactive:
self.run()
def run(self):
self.updateSettings()
# Run previous steps if necessary. If any fails, return function
if (self.needsBuild and not self.build()) or \
(self.needsAllocate and not self.allocate()) or \
(self.needsImport and not self.import_from_images()):
return False
self.log("Running kernels...")
self.settings.updateKernels(self.kernels, self.buffers)
clock = utils.StopWatch()
err = self.ip.Run()
if err:
self.logError(err)
return False
self.logSuccess("All kernels processed.", clock)
clock = utils.StopWatch()
for b in self.buffers:
err = self.ip.ReadBufferFromGPU(self.buffers[b])
if err:
self.logError(err)
return False
self.logSuccess("Data transfered from GPU.", clock)
self.displayWidget.refreshDisplay()
return True
def export_to_images(self):
self.updateSettings()
clock = utils.StopWatch()
for b in self.settings.buffers:
if b.output:
self.log("Exporting data from buffer <i>%s</i> to <i>%s</i>." \
% (b.name, b.output))
err = self.buffers[b.name].Write(b.output, utils.getNumCores())
if err:
self.logError(err)
return False
self.logSuccess("Buffer data transfered to images.", clock)
return True
def run_all_steps(self):
for f in ["build","import_from_images","allocate","process","export_to_images"]:
getattr(self,f)() # run func
QtGui.QApplication.instance().processEvents() # update gui
return True
def log(self, msg):
self.logBrowser.append(utils.getTimeStr() + msg)
def logSuccess(self, msg, clock):
success = "<font color='green'>Success: </font>"
clockStr= "<i> " + str(clock) + "</i>"
self.logBrowser.append(utils.getTimeStr() + success + msg + clockStr)
def logError(self, msg):
error = "<font color='red'>Error: </font>"
self.logBrowser.append(utils.getTimeStr() + error + msg)
def toggleInteractive(self):
self.interactive = not self.interactive
def refreshCodeFromFile(self):
ret = QtGui.QMessageBox.warning(
self, self.tr("Refresh Code From File"),
self.tr("Refreshing the code will not save current"+\
" code. \nDo you want to continue?"),
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel,
QtGui.QMessageBox.Cancel)
if ret != QtGui.QMessageBox.StandardButton.Cancel:
self.settings.updateCode()
for k in self.settings.kernels:
editor = self.kernelWidgets[k.name].codeEditor
editor.clear()
editor.setText(k.code)
def setBoilerplateCode(self, skipDialog = False):
if not skipDialog:
ret = QtGui.QMessageBox.warning(
self, self.tr("Set Boilerplate Code"),
self.tr("Setting the boilerplate code will remove previous"+\
" code. \nDo you want to continue?"),
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel,
QtGui.QMessageBox.Cancel)
if ret == QtGui.QMessageBox.StandardButton.Cancel:
return
for kernel in self.kernels:
editor = self.kernelWidgets[kernel.name].codeEditor
if skipDialog and str(editor.toPlainText()) != "":
return
code = self.ip.BoilerplateCode(kernel)
editor.clear()
editor.setText(code)
def createDockWidgets(self):
LEFT = QtCore.Qt.LeftDockWidgetArea
RIGHT = QtCore.Qt.RightDockWidgetArea
# Create Log dock
dock = QtGui.QDockWidget("Log", self)
self.logBrowser = QtGui.QTextBrowser(dock)
dock.setAllowedAreas(QtCore.Qt.BottomDockWidgetArea)
dock.setWidget(self.logBrowser)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, dock)
self.windowsMenu.addAction(dock.toggleViewAction())
# Create buffers dock
self.dockBuffers = QtGui.QDockWidget("Buffers", self)
self.dockBuffers.setAllowedAreas(LEFT | RIGHT)
self.buffersWidget = bufferswidget.BuffersWidget(self)
self.dockBuffers.setWidget(self.buffersWidget)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dockBuffers)
self.windowsMenu.addAction(self.dockBuffers.toggleViewAction())
# Create display dock
dock = QtGui.QDockWidget("Display", self)
dock.setAllowedAreas(LEFT | RIGHT)
self.displayWidget = displaywidget.DisplayWidget(dock)
checkBox = self.displayWidget.interactiveCheckBox
checkBox.stateChanged.connect(self.toggleInteractive)
dock.setWidget(self.displayWidget)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, dock)
self.windowsMenu.addAction(dock.toggleViewAction())
# The buffers tab starts with being stacked on the display dock
self.tabifyDockWidget(dock, self.dockBuffers)
def createMenuAndActions(self):
menuNames = ["&File", "&Editor", "&Run", "&Windows", "&Help"]
fileMenu, editorMenu, runMenu, self.windowsMenu, helpMenu = \
[self.menuBar().addMenu(name) for name in menuNames]
toolBar = self.addToolBar("Toolbar")
toolBar.setIconSize(self.toolbarIconSize)
def _addAction(icon, actionName, shortcut, func, menu, toolbar):
action = QtGui.QAction(icon, actionName, self)
action.triggered.connect(func)
if shortcut:
action.setShortcut(shortcut)
menu.addAction(action)
if toolbar:
toolbar.addAction(action)
_addAction(icons.get("new"), "&New", QtGui.QKeySequence.New,
self.new, fileMenu, toolBar)
_addAction(icons.get("newExisting"), "&New from existing", None,
self.newFromExisting, fileMenu, toolBar),
_addAction(icons.get("open"), "&Open", QtGui.QKeySequence.Open,
self.open, fileMenu, toolBar),
_addAction(icons.get("save"), "&Save", QtGui.QKeySequence.Save,
self.save, fileMenu, toolBar),
_addAction(icons.get("save"), "&Save As", QtGui.QKeySequence.SaveAs,
self.saveAs, fileMenu, None),
_addAction(QtGui.QIcon(""), "&Quit", "Ctrl+Q",
self.close, fileMenu, None),
toolBar.addSeparator()
_addAction(icons.get("refresh"), "&Refresh Code From File", "Ctrl+R",
self.refreshCodeFromFile,editorMenu,toolBar),
_addAction(icons.get("boilerplate"), "&Set Boilerplate Code", "Ctrl+L",
self.setBoilerplateCode,editorMenu,toolBar),
toolBar.addSeparator()
_addAction(icons.get("build"), "1. &Build", "Ctrl+B",
self.build, runMenu, toolBar),
_addAction(icons.get("import"), "2. &Import from images", "Ctrl+W",
self.import_from_images, runMenu, toolBar),
_addAction(icons.get("init"), "3. &Allocate", "Ctrl+I",
self.allocate, runMenu, toolBar),
_addAction(icons.get("process"), "4. &Run", "Ctrl+P",
self.run, runMenu, toolBar),
_addAction(icons.get("export"), "5. &Export to images", "Ctrl+E",
self.export_to_images, runMenu, toolBar),
_addAction(QtGui.QIcon(""), "&All steps", "Ctrl+A",
self.run_all_steps, runMenu, None),
_addAction(QtGui.QIcon(""), "About &Qt", None,
QtGui.qApp.aboutQt, helpMenu, None)
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=unused-argument
import errno
import logging
import os
import re
import shutil
import tempfile
import threading
import time
from devil.android import decorators
from devil.android import device_errors
from devil.android.sdk import adb_wrapper
from devil.utils import reraiser_thread
class LogcatMonitor(object):
_WAIT_TIME = 0.2
_THREADTIME_RE_FORMAT = (
r'(?P<date>\S*) +(?P<time>\S*) +(?P<proc_id>%s) +(?P<thread_id>%s) +'
r'(?P<log_level>%s) +(?P<component>%s) *: +(?P<message>%s)$')
def __init__(self, adb, clear=True, filter_specs=None, output_file=None):
"""Create a LogcatMonitor instance.
Args:
adb: An instance of adb_wrapper.AdbWrapper.
clear: If True, clear the logcat when monitoring starts.
filter_specs: An optional list of '<tag>[:priority]' strings.
output_file: File path to save recorded logcat.
"""
if isinstance(adb, adb_wrapper.AdbWrapper):
self._adb = adb
else:
raise ValueError('Unsupported type passed for argument "device"')
self._clear = clear
self._filter_specs = filter_specs
self._output_file = output_file
self._record_file = None
self._record_thread = None
self._stop_recording_event = threading.Event()
@decorators.WithTimeoutAndRetriesDefaults(10, 0)
def WaitFor(self, success_regex, failure_regex=None, timeout=None,
retries=None):
"""Wait for a matching logcat line or until a timeout occurs.
This will attempt to match lines in the logcat against both |success_regex|
and |failure_regex| (if provided). Note that this calls re.search on each
logcat line, not re.match, so the provided regular expressions don't have
to match an entire line.
Args:
success_regex: The regular expression to search for.
failure_regex: An optional regular expression that, if hit, causes this
to stop looking for a match. Can be None.
timeout: timeout in seconds
retries: number of retries
Returns:
A match object if |success_regex| matches a part of a logcat line, or
None if |failure_regex| matches a part of a logcat line.
Raises:
CommandFailedError on logcat failure (NOT on a |failure_regex| match).
CommandTimeoutError if no logcat line matching either |success_regex| or
|failure_regex| is found in |timeout| seconds.
DeviceUnreachableError if the device becomes unreachable.
LogcatMonitorCommandError when calling |WaitFor| while not recording
logcat.
"""
if self._record_thread is None:
raise LogcatMonitorCommandError(
'Must be recording logcat when calling |WaitFor|',
device_serial=str(self._adb))
if isinstance(success_regex, basestring):
success_regex = re.compile(success_regex)
if isinstance(failure_regex, basestring):
failure_regex = re.compile(failure_regex)
logging.debug('Waiting %d seconds for "%s"', timeout, success_regex.pattern)
# NOTE This will continue looping until:
# - success_regex matches a line, in which case the match object is
# returned.
# - failure_regex matches a line, in which case None is returned
# - the timeout is hit, in which case a CommandTimeoutError is raised.
with open(self._record_file.name, 'r') as f:
while True:
line = f.readline()
if line:
m = success_regex.search(line)
if m:
return m
if failure_regex and failure_regex.search(line):
return None
else:
time.sleep(self._WAIT_TIME)
def FindAll(self, message_regex, proc_id=None, thread_id=None, log_level=None,
component=None):
"""Finds all lines in the logcat that match the provided constraints.
Args:
message_regex: The regular expression that the <message> section must
match.
proc_id: The process ID to match. If None, matches any process ID.
thread_id: The thread ID to match. If None, matches any thread ID.
log_level: The log level to match. If None, matches any log level.
component: The component to match. If None, matches any component.
Raises:
LogcatMonitorCommandError when calling |FindAll| before recording logcat.
Yields:
A match object for each matching line in the logcat. The match object
will always contain, in addition to groups defined in |message_regex|,
the following named groups: 'date', 'time', 'proc_id', 'thread_id',
'log_level', 'component', and 'message'.
"""
if self._record_file is None:
raise LogcatMonitorCommandError(
'Must have recorded or be recording a logcat to call |FindAll|',
device_serial=str(self._adb))
if proc_id is None:
proc_id = r'\d+'
if thread_id is None:
thread_id = r'\d+'
if log_level is None:
log_level = r'[VDIWEF]'
if component is None:
component = r'[^\s:]+'
# pylint: disable=protected-access
threadtime_re = re.compile(
type(self)._THREADTIME_RE_FORMAT % (
proc_id, thread_id, log_level, component, message_regex))
with open(self._record_file.name, 'r') as f:
for line in f:
m = re.match(threadtime_re, line)
if m:
yield m
def _StartRecording(self):
"""Starts recording logcat to file.
Function spawns a thread that records logcat to file and will not die
until |StopRecording| is called.
"""
def record_to_file():
# Write the log with line buffering so the consumer sees each individual
# line.
with open(self._record_file.name, 'a', 1) as f:
for data in self._adb.Logcat(filter_specs=self._filter_specs,
logcat_format='threadtime'):
if self._stop_recording_event.isSet():
f.flush()
return
f.write(data + '\n')
self._stop_recording_event.clear()
if not self._record_thread:
self._record_thread = reraiser_thread.ReraiserThread(record_to_file)
self._record_thread.start()
def _StopRecording(self):
"""Finish recording logcat."""
if self._record_thread:
self._stop_recording_event.set()
self._record_thread.join()
self._record_thread.ReraiseIfException()
self._record_thread = None
def Start(self):
"""Starts the logcat monitor.
Clears the logcat if |clear| was set in |__init__|.
"""
if self._clear:
self._adb.Logcat(clear=True)
if not self._record_file:
self._record_file = tempfile.NamedTemporaryFile()
self._StartRecording()
def Stop(self):
"""Stops the logcat monitor.
Stops recording the logcat. Copies currently recorded logcat to
|self._output_file|.
"""
self._StopRecording()
if self._record_file and self._output_file:
try:
os.makedirs(os.path.dirname(self._output_file))
except OSError as e:
if e.errno != errno.EEXIST:
raise
shutil.copy(self._record_file.name, self._output_file)
def Close(self):
"""Closes logcat recording file.
Should be called when finished using the logcat monitor.
"""
if self._record_file:
self._record_file.close()
self._record_file = None
def __enter__(self):
"""Starts the logcat monitor."""
self.Start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stops the logcat monitor."""
self.Stop()
def __del__(self):
"""Closes logcat recording file in case |Close| was never called."""
if self._record_file:
logging.warning('Need to call |Close| on the logcat monitor when done!')
self._record_file.close()
class LogcatMonitorCommandError(device_errors.CommandFailedError):
"""Exception for errors with logcat monitor commands."""
pass
|
|
import pdb
import numpy as np
import math
import time
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda
from util import gaussian_kl_divergence_standard
from util import gaussian_logp
from util import gaussian_logp0
from util import bernoulli_logp
class VAE(chainer.Chain):
def __init__(self, dim_in, dim_hidden, dim_latent, num_layers, temperature, num_zsamples=1):
super(VAE, self).__init__()
# initialise first encoder and decoder hidden layer separately because
# the input and output dims differ from the other hidden layers
# auxiliary variable
self.qlina0 = L.Linear(dim_in, dim_hidden)
self.plina0 = L.Linear(dim_latent, dim_hidden)
self._children.append('qlina0')
self._children.append('plina0')
# z and x variable
self.qlinz0 = L.Linear(dim_in+dim_latent, dim_hidden)
self.plinx0 = L.Linear(dim_latent+dim_latent, dim_hidden)
self._children.append('qlinz0')
self._children.append('plinx0')
# Set up the auxiliary inference model q(a|x) and the latent inference model q(z|a,x)
for i in range(num_layers-1):
# encoder for a
layer_name = 'qlina' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
# decoder for a
layer_name = 'plina' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
# encoder for z
layer_name = 'qlinz' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
# decoder for z
layer_name = 'plinx' + str(i+1)
setattr(self, layer_name, L.Linear(2*dim_hidden, dim_hidden))
self._children.append(layer_name)
# initialise the encoder and decoder output layer separately because
# the input and output dims differ from the other hidden layers
self.qlina_mu = L.Linear(2*dim_hidden, dim_latent)
self.qlina_ln_var = L.Linear(2*dim_hidden, dim_latent)
self.qlinz_mu = L.Linear(2*dim_hidden, dim_latent)
self.qlinz_ln_var = L.Linear(2*dim_hidden, dim_latent)
self.plina_mu = L.Linear(2*dim_hidden, dim_latent)
self.plina_ln_var = L.Linear(2*dim_hidden, dim_latent)
self.plinx_ber_prob = L.Linear(2*dim_hidden, dim_in)
self._children.append('qlina_mu')
self._children.append('qlina_ln_var')
self._children.append('qlinz_mu')
self._children.append('qlinz_ln_var')
self._children.append('plina_mu')
self._children.append('plina_ln_var')
self._children.append('plinx_ber_prob')
self.num_layers = num_layers
self.temperature = temperature
self.num_zsamples = num_zsamples
self.epochs_seen = 0
def encode_a(self, x):
a_params = F.crelu(self.qlina0(x))
for i in range(self.num_layers-1):
layer_name = 'qlina' + str(i+1)
a_params = F.crelu(self[layer_name](a_params))
self.qmu_a = self.qlina_mu(a_params)
self.qln_var_a = self.qlina_ln_var(a_params)
return self.qmu_a, self.qln_var_a
def encode_z(self, x, a):
# a = F.gaussian(self.qmu_a, self.qln_var_a) # This should be outside the encoding function. Pass the function a.
net_input = F.concat((x,a), axis=1)
h = F.crelu(self.qlinz0(net_input))
for i in range(self.num_layers-1):
layer_name = 'qlinz' + str(i+1)
h = F.crelu(self[layer_name](h))
self.qmu_z = self.qlinz_mu(h)
self.qln_var_z = self.qlinz_ln_var(h)
return self.qmu_z, self.qln_var_z
def decode_a(self, z):
# net_input = F.concat((x,z), axis=1)
h = F.crelu(self.plina0(z))
for i in range(self.num_layers-1):
layer_name = 'plina' + str(i+1)
h = F.crelu(self[layer_name](h))
self.pmu_a = self.plina_mu(h)
self.pln_var_a = self.plina_ln_var(h)
return self.pmu_a, self.pln_var_a
def decode(self,z):
# pdb.set_trace()
a = self.a_enc
# If this function is coming from the sampling call, the batch size of z and a won't match. Manually handle that here.
if (a.shape[0]!=z.shape[0]):
a.volatile = 'ON'
batch_size = z.shape[0]
a.data = a.data[0:batch_size,:]
net_input = F.concat((z,a), axis=1)
h = F.crelu(self.plinx0(net_input))
for i in range(self.num_layers-1):
layer_name = 'plinx' + str(i+1)
h = F.crelu(self[layer_name](h))
self.p_ber_prob_logit = self.plinx_ber_prob(h)
return self.p_ber_prob_logit
def __call__(self, x):
# Compute parameters for q(z|x, a)
encoding_time_1 = time.time()
qmu_a, qln_var_a = self.encode_a(x)
encoding_time_1 = float(time.time() - encoding_time_1)
a_enc = F.gaussian(qmu_a, qln_var_a)
self.a_enc = a_enc
encoding_time_2 = time.time()
qmu_z, qln_var_z = self.encode_z(x, a_enc)
encoding_time_2 = float(time.time() - encoding_time_2)
encoding_time = encoding_time_1 + encoding_time_2
decoding_time_average = 0.
self.kl = 0
self.logp = 0
logp_a_z = 0
logp_x_az = 0
logp_z = 0
logq_a_x = 0
logq_z_ax = 0
current_temperature = min(self.temperature['value'],1.0)
self.temperature['value'] += self.temperature['increment']
for j in xrange(self.num_zsamples):
# z ~ q(z|x, a)
z = F.gaussian(self.qmu_z, self.qln_var_z)
# Compute p(x|z)
decoding_time = time.time()
pmu_a, pln_var_a = self.decode_a(z)
p_ber_prob_logit = self.decode(z)
decoding_time = time.time() - decoding_time
decoding_time_average += decoding_time
logp_a_z += gaussian_logp(a_enc, pmu_a, pln_var_a)
logp_x_az += bernoulli_logp(x, p_ber_prob_logit)
logp_z += current_temperature*gaussian_logp0(z)
logq_a_x += gaussian_logp(a_enc, qmu_a, qln_var_a)
logq_z_ax += current_temperature*gaussian_logp(z, qmu_z, qln_var_z)
logp_a_z /= self.num_zsamples
logp_x_az /= self.num_zsamples
logp_z /= self.num_zsamples
logq_a_x /= self.num_zsamples
logq_z_ax /= self.num_zsamples
decoding_time_average /= self.num_zsamples
self.logp /= self.num_zsamples
self.obj_batch = logp_a_z + logp_x_az + logp_z - logq_a_x - logq_z_ax
self.kl = logq_z_ax - logp_z
self.logp = logp_x_az
self.timing_info = np.array([encoding_time,decoding_time_average])
batch_size = self.obj_batch.shape[0]
self.obj = -F.sum(self.obj_batch)/batch_size
return self.obj
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
import os
from knack.log import get_logger
from azure.cli.core.commands import LongRunningOperation
from azure.cli.command_modules.vm.custom import set_vm, _compute_client_factory, _is_linux_os
from azure.cli.command_modules.vm._vm_utils import get_key_vault_base_url, create_keyvault_data_plane_client
_DATA_VOLUME_TYPE = 'DATA'
_ALL_VOLUME_TYPE = 'ALL'
_STATUS_ENCRYPTED = 'Encrypted'
logger = get_logger(__name__)
vm_extension_info = {
'Linux': {
'publisher': os.environ.get('ADE_TEST_EXTENSION_PUBLISHER') or 'Microsoft.Azure.Security',
'name': os.environ.get('ADE_TEST_EXTENSION_NAME') or 'AzureDiskEncryptionForLinux',
'version': '1.1',
'legacy_version': '0.1'
},
'Windows': {
'publisher': os.environ.get('ADE_TEST_EXTENSION_PUBLISHER') or 'Microsoft.Azure.Security',
'name': os.environ.get('ADE_TEST_EXTENSION_NAME') or 'AzureDiskEncryption',
'version': '2.2',
'legacy_version': '1.1'
}
}
def _find_existing_ade(vm, use_instance_view=False, ade_ext_info=None):
if not ade_ext_info:
ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows']
if use_instance_view:
exts = vm.instance_view.extensions or []
r = next((e for e in exts if e.type and e.type.lower().startswith(ade_ext_info['publisher'].lower()) and
e.name.lower() == ade_ext_info['name'].lower()), None)
else:
exts = vm.resources or []
r = next((e for e in exts if (e.publisher.lower() == ade_ext_info['publisher'].lower() and
e.type_properties_type.lower() == ade_ext_info['name'].lower())), None)
return r
def _detect_ade_status(vm):
if vm.storage_profile.os_disk.encryption_settings:
return False, True
ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows']
ade = _find_existing_ade(vm, ade_ext_info=ade_ext_info)
if ade is None:
return False, False
if ade.type_handler_version.split('.')[0] == ade_ext_info['legacy_version'].split('.')[0]:
return False, True
return True, False # we believe impossible to have both old & new ADE
def encrypt_vm(cmd, resource_group_name, vm_name, # pylint: disable=too-many-locals, too-many-statements
disk_encryption_keyvault,
aad_client_id=None,
aad_client_secret=None, aad_client_cert_thumbprint=None,
key_encryption_keyvault=None,
key_encryption_key=None,
key_encryption_algorithm='RSA-OAEP',
volume_type=None,
encrypt_format_all=False,
force=False):
from msrestazure.tools import parse_resource_id
from knack.util import CLIError
# pylint: disable=no-member
compute_client = _compute_client_factory(cmd.cli_ctx)
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
is_linux = _is_linux_os(vm)
backup_encryption_settings = vm.storage_profile.os_disk.encryption_settings
vm_encrypted = backup_encryption_settings.enabled if backup_encryption_settings else False
_, has_old_ade = _detect_ade_status(vm)
use_new_ade = not aad_client_id and not has_old_ade
extension = vm_extension_info['Linux' if is_linux else 'Windows']
if not use_new_ade and not aad_client_id:
raise CLIError('Please provide --aad-client-id')
# 1. First validate arguments
if not use_new_ade and not aad_client_cert_thumbprint and not aad_client_secret:
raise CLIError('Please provide either --aad-client-cert-thumbprint or --aad-client-secret')
if volume_type is None:
if not is_linux:
volume_type = _ALL_VOLUME_TYPE
elif vm.storage_profile.data_disks:
raise CLIError('VM has data disks, please supply --volume-type')
else:
volume_type = 'OS'
# sequence_version should be unique
sequence_version = uuid.uuid4()
# retrieve keyvault details
disk_encryption_keyvault_url = get_key_vault_base_url(
cmd.cli_ctx, (parse_resource_id(disk_encryption_keyvault))['name'])
# disk encryption key itself can be further protected, so let us verify
if key_encryption_key:
key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
# to avoid bad server errors, ensure the vault has the right configurations
_verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vm, force)
# if key name and not key url, get url.
if key_encryption_key and '://' not in key_encryption_key: # if key name and not key url
key_encryption_key = _get_keyvault_key_url(
cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)
# 2. we are ready to provision/update the disk encryption extensions
# The following logic was mostly ported from xplat-cli
public_config = {
'KeyVaultURL': disk_encryption_keyvault_url,
'VolumeType': volume_type,
'EncryptionOperation': 'EnableEncryption' if not encrypt_format_all else 'EnableEncryptionFormatAll',
'KeyEncryptionKeyURL': key_encryption_key,
'KeyEncryptionAlgorithm': key_encryption_algorithm,
'SequenceVersion': sequence_version,
}
if use_new_ade:
public_config.update({
"KeyVaultResourceId": disk_encryption_keyvault,
"KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '',
})
else:
public_config.update({
'AADClientID': aad_client_id,
'AADClientCertThumbprint': aad_client_cert_thumbprint,
})
ade_legacy_private_config = {
'AADClientSecret': aad_client_secret if is_linux else (aad_client_secret or '')
}
VirtualMachineExtension, DiskEncryptionSettings, KeyVaultSecretReference, KeyVaultKeyReference, SubResource = \
cmd.get_models('VirtualMachineExtension', 'DiskEncryptionSettings', 'KeyVaultSecretReference',
'KeyVaultKeyReference', 'SubResource')
ext = VirtualMachineExtension(
location=vm.location, # pylint: disable=no-member
publisher=extension['publisher'],
type_properties_type=extension['name'],
protected_settings=None if use_new_ade else ade_legacy_private_config,
type_handler_version=extension['version'] if use_new_ade else extension['legacy_version'],
settings=public_config,
auto_upgrade_minor_version=True)
poller = compute_client.virtual_machine_extensions.begin_create_or_update(
resource_group_name, vm_name, extension['name'], ext)
LongRunningOperation(cmd.cli_ctx)(poller)
poller.result()
# verify the extension was ok
extension_result = compute_client.virtual_machine_extensions.get(
resource_group_name, vm_name, extension['name'], 'instanceView')
if extension_result.provisioning_state != 'Succeeded':
raise CLIError('Extension needed for disk encryption was not provisioned correctly')
if not use_new_ade:
if not (extension_result.instance_view.statuses and
extension_result.instance_view.statuses[0].message):
raise CLIError('Could not find url pointing to the secret for disk encryption')
# 3. update VM's storage profile with the secrets
status_url = extension_result.instance_view.statuses[0].message
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
secret_ref = KeyVaultSecretReference(secret_url=status_url,
source_vault=SubResource(id=disk_encryption_keyvault))
key_encryption_key_obj = None
if key_encryption_key:
key_encryption_key_obj = KeyVaultKeyReference(key_url=key_encryption_key,
source_vault=SubResource(id=key_encryption_keyvault))
disk_encryption_settings = DiskEncryptionSettings(disk_encryption_key=secret_ref,
key_encryption_key=key_encryption_key_obj,
enabled=True)
if vm_encrypted:
# stop the vm before update if the vm is already encrypted
logger.warning("Deallocating the VM before updating encryption settings...")
compute_client.virtual_machines.deallocate(resource_group_name, vm_name).result()
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
set_vm(cmd, vm)
if vm_encrypted:
# and start after the update
logger.warning("Restarting the VM after the update...")
compute_client.virtual_machines.start(resource_group_name, vm_name).result()
if is_linux and volume_type != _DATA_VOLUME_TYPE:
old_ade_msg = "If you see 'VMRestartPending', please restart the VM, and the encryption will finish shortly"
logger.warning("The encryption request was accepted. Please use 'show' command to monitor "
"the progress. %s", "" if use_new_ade else old_ade_msg)
def decrypt_vm(cmd, resource_group_name, vm_name, volume_type=None, force=False):
from knack.util import CLIError
compute_client = _compute_client_factory(cmd.cli_ctx)
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
has_new_ade, has_old_ade = _detect_ade_status(vm)
if not has_new_ade and not has_old_ade:
logger.warning('Azure Disk Encryption is not enabled')
return
is_linux = _is_linux_os(vm)
# pylint: disable=no-member
# 1. be nice, figure out the default volume type and also verify VM will not be busted
if is_linux:
if volume_type:
if not force and volume_type != _DATA_VOLUME_TYPE:
raise CLIError("Only Data disks can have encryption disabled in a Linux VM. "
"Use '--force' to ignore the warning")
else:
volume_type = _DATA_VOLUME_TYPE
elif volume_type is None:
volume_type = _ALL_VOLUME_TYPE
extension = vm_extension_info['Linux' if is_linux else 'Windows']
# sequence_version should be incremented since encryptions occurred before
sequence_version = uuid.uuid4()
# 2. update the disk encryption extension
# The following logic was mostly ported from xplat-cli
public_config = {
'VolumeType': volume_type,
'EncryptionOperation': 'DisableEncryption',
'SequenceVersion': sequence_version,
}
VirtualMachineExtension, DiskEncryptionSettings = cmd.get_models(
'VirtualMachineExtension', 'DiskEncryptionSettings')
ext = VirtualMachineExtension(
location=vm.location, # pylint: disable=no-member
publisher=extension['publisher'],
virtual_machine_extension_type=extension['name'],
type_handler_version=extension['version'] if has_new_ade else extension['legacy_version'],
settings=public_config,
auto_upgrade_minor_version=True)
poller = compute_client.virtual_machine_extensions.begin_create_or_update(resource_group_name,
vm_name,
extension['name'], ext)
LongRunningOperation(cmd.cli_ctx)(poller)
poller.result()
extension_result = compute_client.virtual_machine_extensions.get(resource_group_name, vm_name,
extension['name'],
'instanceView')
if extension_result.provisioning_state != 'Succeeded':
raise CLIError("Extension updating didn't succeed")
if not has_new_ade:
# 3. Remove the secret from VM's storage profile
vm = compute_client.virtual_machines.get(resource_group_name, vm_name)
disk_encryption_settings = DiskEncryptionSettings(enabled=False)
vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings
set_vm(cmd, vm)
def _show_vm_encryption_status_thru_new_ade(vm_instance_view):
ade = _find_existing_ade(vm_instance_view, use_instance_view=True)
disk_infos = []
for div in vm_instance_view.instance_view.disks or []:
disk_infos.append({
'name': div.name,
'encryptionSettings': div.encryption_settings,
'statuses': [x for x in (div.statuses or []) if (x.code or '').startswith('EncryptionState')],
})
return {
'status': ade.statuses if ade else None,
'substatus': ade.substatuses if ade else None,
'disks': disk_infos
}
def show_vm_encryption_status(cmd, resource_group_name, vm_name):
encryption_status = {
'osDisk': 'NotEncrypted',
'osDiskEncryptionSettings': None,
'dataDisk': 'NotEncrypted',
'osType': None
}
compute_client = _compute_client_factory(cmd.cli_ctx)
vm = compute_client.virtual_machines.get(resource_group_name, vm_name, 'instanceView')
has_new_ade, has_old_ade = _detect_ade_status(vm)
if not has_new_ade and not has_old_ade:
logger.warning('Azure Disk Encryption is not enabled')
return None
if has_new_ade:
return _show_vm_encryption_status_thru_new_ade(vm)
is_linux = _is_linux_os(vm)
# pylint: disable=no-member
# The following logic was mostly ported from xplat-cli
os_type = 'Linux' if is_linux else 'Windows'
encryption_status['osType'] = os_type
extension = vm_extension_info[os_type]
extension_result = compute_client.virtual_machine_extensions.get(resource_group_name,
vm_name,
extension['name'],
'instanceView')
logger.debug(extension_result)
if extension_result.instance_view and extension_result.instance_view.statuses:
encryption_status['progressMessage'] = extension_result.instance_view.statuses[0].message
substatus_message = None
if getattr(extension_result.instance_view, 'substatuses', None):
substatus_message = extension_result.instance_view.substatuses[0].message
encryption_status['osDiskEncryptionSettings'] = vm.storage_profile.os_disk.encryption_settings
import json
if is_linux:
try:
message_object = json.loads(substatus_message)
except Exception: # pylint: disable=broad-except
message_object = None # might be from outdated extension
if message_object and ('os' in message_object):
encryption_status['osDisk'] = message_object['os']
else:
encryption_status['osDisk'] = 'Unknown'
if message_object and 'data' in message_object:
encryption_status['dataDisk'] = message_object['data']
else:
encryption_status['dataDisk'] = 'Unknown'
else:
# Windows - get os and data volume encryption state from the vm model
if (encryption_status['osDiskEncryptionSettings'] and
encryption_status['osDiskEncryptionSettings'].enabled and
encryption_status['osDiskEncryptionSettings'].disk_encryption_key and
encryption_status['osDiskEncryptionSettings'].disk_encryption_key.secret_url):
encryption_status['osDisk'] = _STATUS_ENCRYPTED
else:
encryption_status['osDisk'] = 'Unknown'
if extension_result.provisioning_state == 'Succeeded':
volume_type = extension_result.settings.get('VolumeType', None)
about_data_disk = not volume_type or volume_type.lower() != 'os'
if about_data_disk and extension_result.settings.get('EncryptionOperation', None) == 'EnableEncryption':
encryption_status['dataDisk'] = _STATUS_ENCRYPTED
return encryption_status
def _get_keyvault_key_url(cli_ctx, keyvault_name, key_name):
client = create_keyvault_data_plane_client(cli_ctx)
result = client.get_key(get_key_vault_base_url(cli_ctx, keyvault_name), key_name, '')
return result.key.kid # pylint: disable=no-member
def _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force):
if is_linux:
volume_type = volume_type or _DATA_VOLUME_TYPE
if volume_type != _DATA_VOLUME_TYPE:
msg = 'OS disk encyrption is not yet supported for Linux VM scale sets'
if force:
logger.warning(msg)
else:
from knack.util import CLIError
raise CLIError(msg)
else:
volume_type = volume_type or _ALL_VOLUME_TYPE
return volume_type
def encrypt_vmss(cmd, resource_group_name, vmss_name, # pylint: disable=too-many-locals, too-many-statements
disk_encryption_keyvault,
key_encryption_keyvault=None,
key_encryption_key=None,
key_encryption_algorithm='RSA-OAEP',
volume_type=None,
force=False):
from msrestazure.tools import parse_resource_id
# pylint: disable=no-member
UpgradeMode, VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models(
'UpgradeMode', 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile')
compute_client = _compute_client_factory(cmd.cli_ctx)
vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
is_linux = _is_linux_os(vmss.virtual_machine_profile)
extension = vm_extension_info['Linux' if is_linux else 'Windows']
# 1. First validate arguments
volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force)
# retrieve keyvault details
disk_encryption_keyvault_url = get_key_vault_base_url(cmd.cli_ctx,
(parse_resource_id(disk_encryption_keyvault))['name'])
# disk encryption key itself can be further protected, so let us verify
if key_encryption_key:
key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault
# to avoid bad server errors, ensure the vault has the right configurations
_verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vmss, force)
# if key name and not key url, get url.
if key_encryption_key and '://' not in key_encryption_key:
key_encryption_key = _get_keyvault_key_url(
cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key)
# 2. we are ready to provision/update the disk encryption extensions
public_config = {
'KeyVaultURL': disk_encryption_keyvault_url,
'KeyEncryptionKeyURL': key_encryption_key or '',
"KeyVaultResourceId": disk_encryption_keyvault,
"KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '',
'KeyEncryptionAlgorithm': key_encryption_algorithm if key_encryption_key else '',
'VolumeType': volume_type,
'EncryptionOperation': 'EnableEncryption'
}
ext = VirtualMachineScaleSetExtension(name=extension['name'],
publisher=extension['publisher'],
type_properties_type=extension['name'],
type_handler_version=extension['version'],
settings=public_config,
auto_upgrade_minor_version=True,
force_update_tag=uuid.uuid4())
exts = [ext]
# remove any old ade extensions set by this command and add the new one.
vmss_ext_profile = vmss.virtual_machine_profile.extension_profile
if vmss_ext_profile and vmss_ext_profile.extensions:
exts.extend(old_ext for old_ext in vmss.virtual_machine_profile.extension_profile.extensions
if old_ext.type != ext.type or old_ext.name != ext.name)
vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=exts)
# Avoid unnecessary permission error
vmss.virtual_machine_profile.storage_profile.image_reference = None
poller = compute_client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss)
LongRunningOperation(cmd.cli_ctx)(poller)
_show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, True)
def decrypt_vmss(cmd, resource_group_name, vmss_name, volume_type=None, force=False):
UpgradeMode, VirtualMachineScaleSetExtension = cmd.get_models('UpgradeMode', 'VirtualMachineScaleSetExtension')
compute_client = _compute_client_factory(cmd.cli_ctx)
vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
is_linux = _is_linux_os(vmss.virtual_machine_profile)
extension = vm_extension_info['Linux' if is_linux else 'Windows']
# 1. be nice, figure out the default volume type
volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force)
# 2. update the disk encryption extension
public_config = {
'VolumeType': volume_type,
'EncryptionOperation': 'DisableEncryption',
}
ext = VirtualMachineScaleSetExtension(name=extension['name'],
publisher=extension['publisher'],
type_properties_type=extension['name'],
type_handler_version=extension['version'],
settings=public_config,
auto_upgrade_minor_version=True,
force_update_tag=uuid.uuid4())
if (not vmss.virtual_machine_profile.extension_profile or
not vmss.virtual_machine_profile.extension_profile.extensions):
extensions = []
else:
extensions = vmss.virtual_machine_profile.extension_profile.extensions
ade_extension = [x for x in extensions if
x.type_properties_type.lower() == extension['name'].lower() and x.publisher.lower() == extension['publisher'].lower()] # pylint: disable=line-too-long
if not ade_extension:
from knack.util import CLIError
raise CLIError("VM scale set '{}' was not encrypted".format(vmss_name))
index = vmss.virtual_machine_profile.extension_profile.extensions.index(ade_extension[0])
vmss.virtual_machine_profile.extension_profile.extensions[index] = ext
# Avoid unnecessary permission error
vmss.virtual_machine_profile.storage_profile.image_reference = None
poller = compute_client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss)
LongRunningOperation(cmd.cli_ctx)(poller)
_show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, False)
def _show_post_action_message(resource_group_name, vmss_name, maunal_mode, enable):
msg = ''
if maunal_mode:
msg = ("With manual upgrade mode, you will need to run 'az vmss update-instances -g {} -n {} "
"--instance-ids \"*\"' to propagate the change.\n".format(resource_group_name, vmss_name))
msg += ("Note, {} encryption will take a while to finish. Please query the status using "
"'az vmss encryption show -g {} -n {}'. For Linux VM, you will lose the access during the period".format(
'enabling' if enable else 'disabling', resource_group_name, vmss_name))
logger.warning(msg)
def show_vmss_encryption_status(cmd, resource_group_name, vmss_name):
client = _compute_client_factory(cmd.cli_ctx)
vm_instances = list(client.virtual_machine_scale_set_vms.list(resource_group_name, vmss_name,
select='instanceView', expand='instanceView'))
result = []
for instance in vm_instances:
view = instance.instance_view
disk_infos = []
vm_enc_info = {
'id': instance.id,
'disks': disk_infos
}
for div in view.disks:
disk_infos.append({
'name': div.name,
'encryptionSettings': div.encryption_settings,
'statuses': [x for x in (div.statuses or []) if (x.code or '').startswith('EncryptionState')]
})
result.append(vm_enc_info)
return result
def _verify_keyvault_good_for_encryption(cli_ctx, disk_vault_id, kek_vault_id, vm_or_vmss, force):
def _report_client_side_validation_error(msg):
if force:
logger.warning("WARNING: %s %s", msg, "Encryption might fail.")
else:
from knack.util import CLIError
raise CLIError("ERROR: {}".format(msg))
resource_type = "VMSS" if vm_or_vmss.type.lower().endswith("virtualmachinescalesets") else "VM"
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import parse_resource_id
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
disk_vault_resource_info = parse_resource_id(disk_vault_id)
key_vault = client.get(disk_vault_resource_info['resource_group'], disk_vault_resource_info['name'])
# ensure vault has 'EnabledForDiskEncryption' permission
if not key_vault.properties or not key_vault.properties.enabled_for_disk_encryption:
_report_client_side_validation_error("Keyvault '{}' is not enabled for disk encryption.".format(
disk_vault_resource_info['resource_name']))
if kek_vault_id:
kek_vault_info = parse_resource_id(kek_vault_id)
if disk_vault_resource_info['name'].lower() != kek_vault_info['name'].lower():
client.get(kek_vault_info['resource_group'], kek_vault_info['name'])
# verify subscription mataches
vm_vmss_resource_info = parse_resource_id(vm_or_vmss.id)
if vm_vmss_resource_info['subscription'].lower() != disk_vault_resource_info['subscription'].lower():
_report_client_side_validation_error("{} {}'s subscription does not match keyvault's subscription."
.format(resource_type, vm_vmss_resource_info['name']))
# verify region matches
if key_vault.location.replace(' ', '').lower() != vm_or_vmss.location.replace(' ', '').lower():
_report_client_side_validation_error(
"{} {}'s region does not match keyvault's region.".format(resource_type, vm_vmss_resource_info['name']))
|
|
#!/usr/bin/python
# 1. Iterate through all the image pairs and triangulate the match points.
# 2. Set the 3d location of features to triangulated position (possibly
# averaged if the feature is included in multiple matches
# 3. Compute new camera poses with solvePnP() using triangulated point locations
# 4. Repeat
import sys
sys.path.insert(0, "/usr/local/lib/python2.7/site-packages/")
import argparse
import commands
import cPickle as pickle
import cv2
import fnmatch
import json
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import os.path
from progress.bar import Bar
import scipy.spatial
sys.path.append('../lib')
import Matcher
import Pose
import ProjectMgr
import SRTM
import transformations
# constants
d2r = math.pi / 180.0
parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument('--strategy', default='my_triangulate',
choices=['my_triangulate', 'triangulate', 'dem'], help='projection strategy')
parser.add_argument('--iterations', type=int, help='stop after this many solver iterations')
parser.add_argument('--target-mre', type=float, help='stop when mre meets this threshold')
parser.add_argument('--plot', action='store_true', help='plot the solution state')
args = parser.parse_args()
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
proj.load_features()
proj.undistort_keypoints()
matches_direct = pickle.load( open( args.project + "/matches_direct", "rb" ) )
print "unique features:", len(matches_direct)
# compute keypoint usage map
proj.compute_kp_usage_new(matches_direct)
# setup SRTM ground interpolator
ref = proj.ned_reference_lla
sss = SRTM.NEDGround( ref, 2000, 2000, 30 )
start_mre = -1.0
# iterate through the matches list and triangulate the 3d location for
# all feature points, given the associated camera poses. Returns a
# new matches_dict with update point positions
import LineSolver
def my_triangulate(matches_direct, cam_dict):
IK = np.linalg.inv( proj.cam.get_K() )
for match in matches_direct:
#print match
points = []
vectors = []
for m in match[1:]:
image = proj.image_list[m[0]]
cam2body = image.get_cam2body()
body2ned = image.rvec_to_body2ned(cam_dict[image.name]['rvec'])
uv_list = [ image.uv_list[m[1]] ] # just one uv element
vec_list = proj.projectVectors(IK, body2ned, cam2body, uv_list)
points.append( cam_dict[image.name]['ned'] )
vectors.append( vec_list[0] )
#print ' ', image.name
#print ' ', uv_list
#print ' ', vec_list
p = LineSolver.ls_lines_intersection(points, vectors, transpose=True).tolist()
#print p, p[0]
match[0] = [ p[0][0], p[1][0], p[2][0] ]
# iterate through the project image list and triangulate the 3d
# location for all feature points, given the current camera pose.
# Returns a new matches_dict with update point positions
def triangulate(matches_direct, cam_dict):
IK = np.linalg.inv( proj.cam.get_K() )
match_pairs = proj.generate_match_pairs(matches_direct)
# zero the match NED coordinate and initialize the corresponding
# count array
counters = []
for match in matches_direct:
match[0] = np.array( [0.0, 0.0, 0.0] )
counters.append( 0)
for i, i1 in enumerate(proj.image_list):
#rvec1, tvec1 = i1.get_proj()
rvec1 = cam_dict[i1.name]['rvec']
tvec1 = cam_dict[i1.name]['tvec']
R1, jac = cv2.Rodrigues(rvec1)
PROJ1 = np.concatenate((R1, tvec1), axis=1)
for j, i2 in enumerate(proj.image_list):
matches = match_pairs[i][j]
if (j <= i) or (len(matches) == 0):
continue
# distance between two cameras
ned1 = np.array(cam_dict[i1.name]['ned'])
ned2 = np.array(cam_dict[i2.name]['ned'])
dist = np.linalg.norm(ned2 - ned1)
if dist < 40:
# idea: the closer together two poses are, the greater
# the triangulation error will be relative to small
# attitude errors. If we only compare more distance
# camera views the solver will be more stable.
continue
#rvec2, tvec2 = i2.get_proj()
rvec2 = cam_dict[i2.name]['rvec']
tvec2 = cam_dict[i2.name]['tvec']
R2, jac = cv2.Rodrigues(rvec2)
PROJ2 = np.concatenate((R2, tvec2), axis=1)
uv1 = []; uv2 = []; indices = []
for pair in matches:
p1 = i1.kp_list[ pair[0] ].pt
p2 = i2.kp_list[ pair[1] ].pt
uv1.append( [p1[0], p1[1], 1.0] )
uv2.append( [p2[0], p2[1], 1.0] )
# pair[2] is the index back into the matches_direct structure
indices.append( pair[2] )
pts1 = IK.dot(np.array(uv1).T)
pts2 = IK.dot(np.array(uv2).T)
points = cv2.triangulatePoints(PROJ1, PROJ2, pts1[:2], pts2[:2])
points /= points[3]
#print "points:\n", points[0:3].T
# fixme: need to update result, sum_dict is no longer used
print "%s vs %s" % (i1.name, i2.name)
for k, p in enumerate(points[0:3].T):
match = matches_direct[indices[k]]
match[0] += p
counters[indices[k]] += 1
# divide each NED coordinate (sum of triangulated point locations)
# of matches_direct_dict by the count of references to produce an
# average NED coordinate for each match.
for i, match in enumerate(matches_direct):
if counters[i] > 0:
match[0] /= counters[i]
else:
print 'invalid match from images too close to each other:', match
for j in range(1, len(match)):
match[j] = [-1, -1]
# return the new match structure
return matches_direct
# Iterate through the project image list and run solvePnP on each
# image's feature set to derive new estimated camera locations
cam1 = []
def solvePnP(matches_direct):
# start with a clean slate
for image in proj.image_list:
image.img_pts = []
image.obj_pts = []
# build a new cam_dict that is a copy of the current one
cam_dict = {}
for image in proj.image_list:
cam_dict[image.name] = {}
rvec, tvec = image.get_proj()
ned, ypr, quat = image.get_camera_pose()
cam_dict[image.name]['rvec'] = rvec
cam_dict[image.name]['tvec'] = tvec
cam_dict[image.name]['ned'] = ned
# iterate through the match dictionary and build a per image list of
# obj_pts and img_pts
for match in matches_direct:
ned = match[0]
for p in match[1:]:
image = proj.image_list[ p[0] ]
kp = image.kp_list[ p[1] ]
image.img_pts.append( kp.pt )
image.obj_pts.append( ned )
camw, camh = proj.cam.get_image_params()
for image in proj.image_list:
# print image.name
if len(image.img_pts) < 4:
continue
scale = float(image.width) / float(camw)
K = proj.cam.get_K(scale)
rvec, tvec = image.get_proj()
(result, rvec, tvec) \
= cv2.solvePnP(np.float32(image.obj_pts),
np.float32(image.img_pts),
K, None,
rvec, tvec, useExtrinsicGuess=True)
# The idea of using the Ransac version of solvePnP() is to
# look past outliers instead of being affected by them. We
# don't use the outlier information at this point in the
# process for outlier rejection. However, it appears that
# this process leads to divergence, not convergence.
# (rvec, tvec, inliers) \
# = cv2.solvePnPRansac(np.float32(image.obj_pts),
# np.float32(image.img_pts),
# K, None,
# rvec, tvec, useExtrinsicGuess=True)
#print "rvec=", rvec
#print "tvec=", tvec
Rned2cam, jac = cv2.Rodrigues(rvec)
#print "Rraw (from SolvePNP):\n", Rraw
ned = image.camera_pose['ned']
#print "original ned = ", ned
#tvec = -np.matrix(R[:3,:3]) * np.matrix(ned).T
#print "tvec =", tvec
pos = -np.matrix(Rned2cam[:3,:3]).T * np.matrix(tvec)
newned = pos.T[0].tolist()[0]
#print "new ned =", newned
# Our Rcam matrix (in our ned coordinate system) is body2cam * Rned,
# so solvePnP returns this combination. We can extract Rned by
# premultiplying by cam2body aka inv(body2cam).
cam2body = image.get_cam2body()
Rned2body = cam2body.dot(Rned2cam)
#print "R (after M * R):\n", R
ypr = image.camera_pose['ypr']
#print "original ypr = ", ypr
Rbody2ned = np.matrix(Rned2body).T
IRo = transformations.euler_matrix(ypr[0]*d2r, ypr[1]*d2r, ypr[2]*d2r, 'rzyx')
IRq = transformations.quaternion_matrix(image.camera_pose['quat'])
#print "Original IR:\n", IRo
#print "Original IR (from quat)\n", IRq
#print "IR (from SolvePNP):\n", IR
(yaw, pitch, roll) = transformations.euler_from_matrix(Rbody2ned, 'rzyx')
#print "ypr =", [yaw/d2r, pitch/d2r, roll/d2r]
#image.set_camera_pose( pos.T[0].tolist(), [yaw/d2r, pitch/d2r, roll/d2r] )
#print "Proj =", np.concatenate((R, tvec), axis=1)
cam_dict[image.name] = {}
cam_dict[image.name]['rvec'] = rvec
cam_dict[image.name]['tvec'] = tvec
cam_dict[image.name]['ned'] = newned
return cam_dict
# return a 3d affine tranformation between fitted camera locations and
# original camera locations.
def get_recenter_affine(cam_dict):
src = [[], [], [], []] # current camera locations
dst = [[], [], [], []] # original camera locations
for image in proj.image_list:
if image.feature_count > 0:
newned = cam_dict[image.name]['ned']
src[0].append(newned[0])
src[1].append(newned[1])
src[2].append(newned[2])
src[3].append(1.0)
origned, ypr, quat = image.get_camera_pose()
dst[0].append(origned[0])
dst[1].append(origned[1])
dst[2].append(origned[2])
dst[3].append(1.0)
#print image.name, '%s -> %s' % (origned, newned)
A = transformations.superimposition_matrix(src, dst, scale=True)
print "Affine 3D:\n", A
return A
# transform the camera ned positions with the provided affine matrix
# to keep all the camera poses best fitted to the original camera
# locations. Also rotate the camera poses by the rotational portion
# of the affine matrix to update the camera alignment.
def transform_cams(A, cam_dict):
# construct an array of camera positions
src = [[], [], [], []]
for image in proj.image_list:
new = cam_dict[image.name]['ned']
src[0].append(new[0])
src[1].append(new[1])
src[2].append(new[2])
src[3].append(1.0)
# extract the rotational portion of the affine matrix
scale, shear, angles, trans, persp = transformations.decompose_matrix(A)
R = transformations.euler_matrix(*angles)
#print "R:\n", R
# full transform the camera ned positions to best align with
# original locations
update_cams = A.dot( np.array(src) )
#print update_cams[:3]
for i, p in enumerate(update_cams.T):
key = proj.image_list[i].name
if not key in cam_dict:
cam_dict[key] = {}
ned = [ p[0], p[1], p[2] ]
# print "ned:", ned
cam_dict[key]['ned'] = ned
# adjust the camera projection matrix (rvec) to rotate by the
# amount of the affine transformation as well
rvec = cam_dict[key]['rvec']
tvec = cam_dict[key]['tvec']
Rcam, jac = cv2.Rodrigues(rvec)
# print "Rcam:\n", Rcam
Rcam_new = R[:3,:3].dot(Rcam)
# print "Rcam_new:\n", Rcam_new
rvec, jac = cv2.Rodrigues(Rcam_new)
cam_dict[key]['rvec'] = rvec
tvec = -np.matrix(Rcam_new) * np.matrix(ned).T
cam_dict[key]['tvec'] = tvec
# transform all the match point locations
def transform_points( A, pts_dict ):
src = [[], [], [], []]
for key in pts_dict:
p = pts_dict[key]
src[0].append(p[0])
src[1].append(p[1])
src[2].append(p[2])
src[3].append(1.0)
dst = A.dot( np.array(src) )
result_dict = {}
for i, key in enumerate(pts_dict):
result_dict[key] = [ dst[0][i], dst[1][i], dst[2][i] ]
return result_dict
# mark items that exceed the cutoff reprojection error for deletion
def mark_outliers(result_list, cutoff, matches_direct):
print " marking outliers..."
mark_count = 0
for line in result_list:
# print "line:", line
if line[0] > cutoff:
print " outlier index %d-%d err=%.2f" % (line[1], line[2],
line[0])
#if args.show:
# draw_match(line[1], line[2])
match = matches_direct[line[1]]
match[line[2]+1] = [-1, -1]
mark_count += 1
# mark matches not referencing images in the main group
def mark_non_group(main_group, matches_direct):
# construct set of image indices in main_group
group_dict = {}
for image in main_group:
for i, i1 in enumerate(proj.image_list):
if image == i1:
group_dict[i] = True
#print 'group_dict:', group_dict
print " marking non group..."
mark_sum = 0
for match in matches_direct:
for j, p in enumerate(match[1:]):
if not p[0] in group_dict:
match[j+1] = [-1, -1]
mark_sum += 1
print 'marked:', mark_sum, 'matches for deletion'
# delete marked matches
def delete_marked_matches(matches_direct):
print " deleting marked items..."
for i in reversed(range(len(matches_direct))):
match = matches_direct[i]
has_bad_elem = False
for j in reversed(range(1, len(match))):
p = match[j]
if p == [-1, -1]:
has_bad_elem = True
match.pop(j)
if len(match) < 4:
print "deleting match that is now in less than 3 images:", match
matches_direct.pop(i)
# any image with less than 25 matches has all it's matches marked for
# deletion
def mark_weak_images(matches_direct):
# count how many features show up in each image
for i in proj.image_list:
i.feature_count = 0
for i, match in enumerate(matches_direct):
for j, p in enumerate(match[1:]):
if p[1] != [-1, -1]:
image = proj.image_list[ p[0] ]
image.feature_count += 1
# make a dict of all images with less than 25 feature matches
weak_dict = {}
for i, img in enumerate(proj.image_list):
if img.feature_count < 25:
weak_dict[i] = True
if img.feature_count > 0:
print 'new weak image:', img.name
img.feature_count = 0 # will be zero very soon
print 'weak images:', weak_dict
# mark any features in the weak images list
mark_sum = 0
for i, match in enumerate(matches_direct):
#print 'before:', match
for j, p in enumerate(match[1:]):
if p[0] in weak_dict:
match[j+1] = [-1, -1]
mark_sum += 1
#print 'after:', match
def plot(surface0, cam0, surface1, cam1):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = []; ys = []; zs = []
for p in surface0:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='r', marker='.')
xs = []; ys = []; zs = []
for p in surface1:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='b', marker='.')
xs = []; ys = []; zs = []
for p in cam0:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='y', marker='^')
xs = []; ys = []; zs = []
for p in cam1:
xs.append(p[0])
ys.append(p[1])
zs.append(p[2])
ax.scatter(np.array(xs), np.array(ys), np.array(zs), c='b', marker='^')
plt.show()
# temporary testing ....
# match_pairs = proj.generate_match_pairs(matches_direct)
# group_list = Matcher.groupByConnections(proj.image_list, matches_direct, match_pairs)
# mark_non_group(group_list[0], matches_direct)
# quit()
# iterate through the match dictionary and build a simple list of
# starting surface points
surface0 = []
for match in matches_direct:
ned = match[0]
surface0.append( [ned[1], ned[0], -ned[2]] )
cam0 = []
for image in proj.image_list:
ned, ypr, quat = image.get_camera_pose()
cam0.append( [ned[1], ned[0], -ned[2]] )
# iterate through the image list and build the camera pose dictionary
# (and a simple list of camera locations for plotting)
# cam_dict = {}
# for image in proj.image_list:
# rvec, tvec, ned = image.get_proj()
# cam_dict[image.name] = {}
# cam_dict[image.name]['rvec'] = rvec
# cam_dict[image.name]['tvec'] = tvec
# cam_dict[image.name]['ned'] = ned
count = 0
while True:
# find the 'best fit' camera poses for the triangulation averaged
# together.
cam_dict = solvePnP(matches_direct)
# measure our current mean reprojection error and trim mre
# outliers from the match set (any points with mre 4x stddev) as
# well as any weak images with < 25 matches.
(result_list, mre, stddev) \
= proj.compute_reprojection_errors(cam_dict, matches_direct)
if start_mre < 0.0: start_mre = mre
print "mre = %.4f stddev = %.4f features = %d" % (mre, stddev, len(matches_direct))
cull_outliers = False
if cull_outliers:
mark_outliers(result_list, mre + stddev*4, matches_direct)
mark_weak_images(matches_direct)
delete_marked_matches(matches_direct)
# after outlier deletion, re-evalute matched pairs and connection
# cycles.
match_pairs = proj.generate_match_pairs(matches_direct)
group_list = Matcher.groupByConnections(proj.image_list, matches_direct, match_pairs)
mark_non_group(group_list[0], matches_direct)
delete_marked_matches(matches_direct)
else:
# keep accounting structures happy
mark_weak_images(matches_direct)
# get the affine transformation required to bring the new camera
# locations back inqto a best fit with the original camera
# locations
A = get_recenter_affine(cam_dict)
# thought #1: if we are triangulating, this could be done once at the
# end to fix up the solution, not every iteration? But it doesn't
# seem to harm the triangulation.
# thought #2: if we are projecting onto the dem surface, we
# probably shouldn't transform the cams back to the original
# because this could perpetually pull things out of convergence
transform_cams(A, cam_dict)
if args.strategy == 'my_triangulate':
# run the triangulation step (modifies NED coordinates in
# place). This computes a best fit for all the feature
# locations based on the current best camera poses.
my_triangulate(matches_direct, cam_dict)
elif args.strategy == 'triangulate':
# run the triangulation step (modifies NED coordinates in
# place). This computes a best fit for all the feature
# locations based on the current best camera poses.
triangulate(matches_direct, cam_dict)
elif args.strategy == 'dem':
# project the keypoints back onto the DEM surface from the
# updated camera poses.
proj.fastProjectKeypointsTo3d(sss, cam_dict)
# estimate new world coordinates for each match point
for match in matches_direct:
sum = np.array( [0.0, 0.0, 0.0] )
for p in match[1:]:
sum += proj.image_list[ p[0] ].coord_list[ p[1] ]
ned = sum / len(match[1:])
# print "avg =", ned
match[0] = ned.tolist()
else:
print 'unknown triangulation strategy, script will probably fail to do anything useful'
surface1 = []
for match in matches_direct:
ned = match[0]
print ned
surface1.append( [ned[1], ned[0], -ned[2]] )
# transform all the feature points by the affine matrix (modifies
# matches_direct NED coordinates in place)
# fixme: transform_points(A, matches_direct)
# fixme: transform camera locations and orientations as well
# run solvePnP now on the updated points (hopefully this will
# naturally reorient the cameras as needed.)
# 9/6/2016: shouldn't be needed since transform_points() now rotates
# the camera orientation as well?
# cam_dict = solvePnP(newpts_dict)
cam1 = []
for key in cam_dict:
p = cam_dict[key]['ned']
cam1.append( [ p[1], p[0], -p[2] ] )
if args.plot:
plot(surface0, cam0, surface1, cam1)
count += 1
# test stop conditions
if args.iterations:
if count >= args.iterations:
print 'Stopping (by request) after', count, 'iterations.'
break
elif args.target_mre:
if mre <= args.target_mre:
print 'Stopping (by request) with mre:', mre
break
else:
print 'No stop condition specified, running one iteration and stopping.'
break
(result_list, mre, stddev) \
= proj.compute_reprojection_errors(cam_dict, matches_direct)
print 'Start mre:', start_mre, 'end mre:', mre
result=raw_input('Update matches and camera poses? (y/n):')
if result == 'y' or result == 'Y':
print 'Writing direct matches...'
pickle.dump(matches_direct, open(args.project+"/matches_direct", "wb"))
print 'Updating and saving camera poses...'
for image in proj.image_list:
pose = cam_dict[image.name]
Rned2cam, jac = cv2.Rodrigues(pose['rvec'])
pos = -np.matrix(Rned2cam[:3,:3]).T * np.matrix(pose['tvec'])
ned = pos.T[0].tolist()[0]
# Our Rcam matrix (in our ned coordinate system) is body2cam * Rned,
# so solvePnP returns this combination. We can extract Rned by
# premultiplying by cam2body aka inv(body2cam).
cam2body = image.get_cam2body()
Rned2body = cam2body.dot(Rned2cam)
Rbody2ned = np.matrix(Rned2body).T
(yaw, pitch, roll) \
= transformations.euler_from_matrix(Rbody2ned, 'rzyx')
# print "ypr =", [yaw/d2r, pitch/d2r, roll/d2r]
print 'orig:', image.get_camera_pose()
image.set_camera_pose( ned, [yaw/d2r, pitch/d2r, roll/d2r] )
print 'new: ', image.get_camera_pose()
image.save_meta()
|
|
from __future__ import annotations
import asyncio
import logging
import uuid
from collections import defaultdict, deque
from collections.abc import Container
from math import log2
from time import time
from tlz import topk
from tornado.ioloop import PeriodicCallback
import dask
from dask.utils import parse_timedelta
from .comm.addressing import get_address_host
from .core import CommClosedError, Status
from .diagnostics.plugin import SchedulerPlugin
from .utils import log_errors, recursive_to_dict
# Stealing requires multiple network bounces and if successful also task
# submission which may include code serialization. Therefore, be very
# conservative in the latency estimation to suppress too aggressive stealing
# of small tasks
LATENCY = 0.1
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
_WORKER_STATE_CONFIRM = {
"ready",
"constrained",
"waiting",
}
_WORKER_STATE_REJECT = {
"memory",
"executing",
"long-running",
"cancelled",
"resumed",
}
_WORKER_STATE_UNDEFINED = {
"released",
None,
}
class WorkStealing(SchedulerPlugin):
def __init__(self, scheduler):
self.scheduler = scheduler
# { level: { task states } }
self.stealable_all = [set() for i in range(15)]
# { worker: { level: { task states } } }
self.stealable = dict()
# { task state: (worker, level) }
self.key_stealable = dict()
self.cost_multipliers = [1 + 2 ** (i - 6) for i in range(15)]
self.cost_multipliers[0] = 1
for worker in scheduler.workers:
self.add_worker(worker=worker)
self._callback_time = parse_timedelta(
dask.config.get("distributed.scheduler.work-stealing-interval"),
default="ms",
)
# `callback_time` is in milliseconds
self.scheduler.add_plugin(self)
self.scheduler.extensions["stealing"] = self
self.scheduler.events["stealing"] = deque(maxlen=100000)
self.count = 0
# { task state: <stealing info dict> }
self.in_flight = dict()
# { worker state: occupancy }
self.in_flight_occupancy = defaultdict(lambda: 0)
self._in_flight_event = asyncio.Event()
self.scheduler.stream_handlers["steal-response"] = self.move_task_confirm
async def start(self, scheduler=None):
"""Start the background coroutine to balance the tasks on the cluster.
Idempotent.
The scheduler argument is ignored. It is merely required to satisify the
plugin interface. Since this class is simultaneouly an extension, the
scheudler instance is already registered during initialization
"""
if "stealing" in self.scheduler.periodic_callbacks:
return
pc = PeriodicCallback(
callback=self.balance, callback_time=self._callback_time * 1000
)
pc.start()
self.scheduler.periodic_callbacks["stealing"] = pc
self._in_flight_event.set()
async def stop(self):
"""Stop the background task balancing tasks on the cluster.
This will block until all currently running stealing requests are
finished. Idempotent
"""
pc = self.scheduler.periodic_callbacks.pop("stealing", None)
if pc:
pc.stop()
await self._in_flight_event.wait()
def _to_dict_no_nest(self, *, exclude: Container[str] = ()) -> dict:
"""Dictionary representation for debugging purposes.
Not type stable and not intended for roundtrips.
See also
--------
Client.dump_cluster_state
distributed.utils.recursive_to_dict
"""
return recursive_to_dict(self, exclude=exclude, members=True)
def log(self, msg):
return self.scheduler.log_event("stealing", msg)
def add_worker(self, scheduler=None, worker=None):
self.stealable[worker] = [set() for i in range(15)]
def remove_worker(self, scheduler=None, worker=None):
del self.stealable[worker]
def teardown(self):
pcs = self.scheduler.periodic_callbacks
if "stealing" in pcs:
pcs["stealing"].stop()
del pcs["stealing"]
def transition(
self, key, start, finish, compute_start=None, compute_stop=None, *args, **kwargs
):
if finish == "processing":
ts = self.scheduler.tasks[key]
self.put_key_in_stealable(ts)
elif start == "processing":
ts = self.scheduler.tasks[key]
self.remove_key_from_stealable(ts)
d = self.in_flight.pop(ts, None)
if d:
thief = d["thief"]
victim = d["victim"]
self.in_flight_occupancy[thief] -= d["thief_duration"]
self.in_flight_occupancy[victim] += d["victim_duration"]
if not self.in_flight:
self.in_flight_occupancy.clear()
self._in_flight_event.set()
def recalculate_cost(self, ts):
if ts not in self.in_flight:
self.remove_key_from_stealable(ts)
self.put_key_in_stealable(ts)
def put_key_in_stealable(self, ts):
cost_multiplier, level = self.steal_time_ratio(ts)
if cost_multiplier is not None:
ws = ts.processing_on
worker = ws.address
self.stealable_all[level].add(ts)
self.stealable[worker][level].add(ts)
self.key_stealable[ts] = (worker, level)
def remove_key_from_stealable(self, ts):
result = self.key_stealable.pop(ts, None)
if result is None:
return
worker, level = result
try:
self.stealable[worker][level].remove(ts)
except KeyError:
pass
try:
self.stealable_all[level].remove(ts)
except KeyError:
pass
def steal_time_ratio(self, ts):
"""The compute to communication time ratio of a key
Returns
-------
cost_multiplier: The increased cost from moving this task as a factor.
For example a result of zero implies a task without dependencies.
level: The location within a stealable list to place this value
"""
split = ts.prefix.name
if split in fast_tasks:
return None, None
if not ts.dependencies: # no dependencies fast path
return 0, 0
ws = ts.processing_on
compute_time = ws.processing[ts]
if compute_time < 0.005: # 5ms, just give up
return None, None
nbytes = ts.get_nbytes_deps()
transfer_time = nbytes / self.scheduler.bandwidth + LATENCY
cost_multiplier = transfer_time / compute_time
if cost_multiplier > 100:
return None, None
level = int(round(log2(cost_multiplier) + 6))
if level < 1:
level = 1
return cost_multiplier, level
def move_task_request(self, ts, victim, thief) -> str:
try:
if ts in self.in_flight:
return "in-flight"
# Stimulus IDs are used to verify the response, see
# `move_task_confirm`. Therefore, this must be truly unique.
stimulus_id = f"steal-{uuid.uuid4().hex}"
key = ts.key
self.remove_key_from_stealable(ts)
logger.debug(
"Request move %s, %s: %2f -> %s: %2f",
key,
victim,
victim.occupancy,
thief,
thief.occupancy,
)
victim_duration = victim.processing[ts]
thief_duration = self.scheduler.get_task_duration(
ts
) + self.scheduler.get_comm_cost(ts, thief)
self.scheduler.stream_comms[victim.address].send(
{"op": "steal-request", "key": key, "stimulus_id": stimulus_id}
)
self.in_flight[ts] = {
"victim": victim, # guaranteed to be processing_on
"thief": thief,
"victim_duration": victim_duration,
"thief_duration": thief_duration,
"stimulus_id": stimulus_id,
}
self._in_flight_event.clear()
self.in_flight_occupancy[victim] -= victim_duration
self.in_flight_occupancy[thief] += thief_duration
return stimulus_id
except CommClosedError:
logger.info("Worker comm %r closed while stealing: %r", victim, ts)
return "comm-closed"
except Exception as e: # pragma: no cover
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
async def move_task_confirm(self, *, key, state, stimulus_id, worker=None):
try:
ts = self.scheduler.tasks[key]
except KeyError:
logger.debug("Key released between request and confirm: %s", key)
return
try:
d = self.in_flight.pop(ts)
if d["stimulus_id"] != stimulus_id:
self.log(("stale-response", key, state, worker, stimulus_id))
self.in_flight[ts] = d
return
except KeyError:
self.log(("already-aborted", key, state, worker, stimulus_id))
return
thief = d["thief"]
victim = d["victim"]
logger.debug("Confirm move %s, %s -> %s. State: %s", key, victim, thief, state)
self.in_flight_occupancy[thief] -= d["thief_duration"]
self.in_flight_occupancy[victim] += d["victim_duration"]
if not self.in_flight:
self.in_flight_occupancy.clear()
self._in_flight_event.set()
if self.scheduler.validate:
assert ts.processing_on == victim
try:
_log_msg = [key, state, victim.address, thief.address, stimulus_id]
if ts.state != "processing":
self.scheduler._reevaluate_occupancy_worker(thief)
self.scheduler._reevaluate_occupancy_worker(victim)
elif (
state in _WORKER_STATE_UNDEFINED
or state in _WORKER_STATE_CONFIRM
and thief.address not in self.scheduler.workers
):
self.log(
(
"reschedule",
thief.address not in self.scheduler.workers,
*_log_msg,
)
)
self.scheduler.reschedule(key)
# Victim had already started execution
elif state in _WORKER_STATE_REJECT:
self.log(("already-computing", *_log_msg))
# Victim was waiting, has given up task, enact steal
elif state in _WORKER_STATE_CONFIRM:
self.remove_key_from_stealable(ts)
ts.processing_on = thief
duration = victim.processing.pop(ts)
victim.occupancy -= duration
self.scheduler.total_occupancy -= duration
if not victim.processing:
self.scheduler.total_occupancy -= victim.occupancy
victim.occupancy = 0
thief.processing[ts] = d["thief_duration"]
thief.occupancy += d["thief_duration"]
self.scheduler.total_occupancy += d["thief_duration"]
self.put_key_in_stealable(ts)
self.scheduler.send_task_to_worker(thief.address, ts)
self.log(("confirm", *_log_msg))
else:
raise ValueError(f"Unexpected task state: {state}")
except Exception as e: # pragma: no cover
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
finally:
self.scheduler.check_idle_saturated(thief)
self.scheduler.check_idle_saturated(victim)
def balance(self):
s = self.scheduler
def combined_occupancy(ws):
return ws.occupancy + self.in_flight_occupancy[ws]
def maybe_move_task(level, ts, sat, idl, duration, cost_multiplier):
occ_idl = combined_occupancy(idl)
occ_sat = combined_occupancy(sat)
if occ_idl + cost_multiplier * duration <= occ_sat - duration / 2:
self.move_task_request(ts, sat, idl)
log.append(
(
start,
level,
ts.key,
duration,
sat.address,
occ_sat,
idl.address,
occ_idl,
)
)
s.check_idle_saturated(sat, occ=occ_sat)
s.check_idle_saturated(idl, occ=occ_idl)
with log_errors():
i = 0
# Paused and closing workers must never become thieves
idle = [ws for ws in s.idle.values() if ws.status == Status.running]
if not idle or len(idle) == len(s.workers):
return
log = []
start = time()
saturated = s.saturated
if not saturated:
saturated = topk(10, s.workers.values(), key=combined_occupancy)
saturated = [
ws
for ws in saturated
if combined_occupancy(ws) > 0.2 and len(ws.processing) > ws.nthreads
]
elif len(saturated) < 20:
saturated = sorted(saturated, key=combined_occupancy, reverse=True)
if len(idle) < 20:
idle = sorted(idle, key=combined_occupancy)
for level, cost_multiplier in enumerate(self.cost_multipliers):
if not idle:
break
for sat in list(saturated):
stealable = self.stealable[sat.address][level]
if not stealable or not idle:
continue
for ts in list(stealable):
if ts not in self.key_stealable or ts.processing_on is not sat:
stealable.discard(ts)
continue
i += 1
if not idle:
break
if _has_restrictions(ts):
thieves = [ws for ws in idle if _can_steal(ws, ts, sat)]
else:
thieves = idle
if not thieves:
break
thief = thieves[i % len(thieves)]
duration = sat.processing.get(ts)
if duration is None:
stealable.discard(ts)
continue
maybe_move_task(
level, ts, sat, thief, duration, cost_multiplier
)
if self.cost_multipliers[level] < 20: # don't steal from public at cost
stealable = self.stealable_all[level]
for ts in list(stealable):
if not idle:
break
if ts not in self.key_stealable:
stealable.discard(ts)
continue
sat = ts.processing_on
if sat is None:
stealable.discard(ts)
continue
if combined_occupancy(sat) < 0.2:
continue
if len(sat.processing) <= sat.nthreads:
continue
i += 1
if _has_restrictions(ts):
thieves = [ws for ws in idle if _can_steal(ws, ts, sat)]
else:
thieves = idle
if not thieves:
continue
thief = thieves[i % len(thieves)]
duration = sat.processing[ts]
maybe_move_task(
level, ts, sat, thief, duration, cost_multiplier
)
if log:
self.log(log)
self.count += 1
stop = time()
if s.digests:
s.digests["steal-duration"].add(stop - start)
def restart(self, scheduler):
for stealable in self.stealable.values():
for s in stealable:
s.clear()
for s in self.stealable_all:
s.clear()
self.key_stealable.clear()
def story(self, *keys):
keys = {key.key if not isinstance(key, str) else key for key in keys}
out = []
for _, L in self.scheduler.get_events(topic="stealing"):
if not isinstance(L, list):
L = [L]
for t in L:
if any(x in keys for x in t):
out.append(t)
return out
def _has_restrictions(ts):
"""Determine whether the given task has restrictions and whether these
restrictions are strict.
"""
return not ts.loose_restrictions and (
ts.host_restrictions or ts.worker_restrictions or ts.resource_restrictions
)
def _can_steal(thief, ts, victim):
"""Determine whether worker ``thief`` can steal task ``ts`` from worker
``victim``.
Assumes that `ts` has some restrictions.
"""
if (
ts.host_restrictions
and get_address_host(thief.address) not in ts.host_restrictions
):
return False
elif ts.worker_restrictions and thief.address not in ts.worker_restrictions:
return False
if not ts.resource_restrictions:
return True
for resource, value in ts.resource_restrictions.items():
try:
supplied = thief.resources[resource]
except KeyError:
return False
else:
if supplied < value:
return False
return True
fast_tasks = {"split-shuffle"}
|
|
import asyncio
import concurrent.futures
import copy
import glob
import logging
import os
import sys
import time
import types
from collections import OrderedDict
from functools import partial
from pprint import pformat
from types import SimpleNamespace
import aiocron
import asyncssh
from biothings.utils.configuration import *
def _config_for_app(config_mod=None):
if not config_mod:
config_name = os.environ.get("HUB_CONFIG", "config")
config_mod = import_module(config_name)
if not isinstance(config_mod, (types.ModuleType, SimpleNamespace)):
raise TypeError(type(config_mod))
for attr in dir(config_mod):
value = getattr(config_mod, attr)
if isinstance(value, ConfigurationError):
raise ConfigurationError("%s: %s" % (attr, str(value)))
try:
app_path = os.path.split(config_mod.__file__)[0]
sys.path.insert(0, app_path)
except Exception:
logging.exception(config_mod)
app_path = "" # TODO
wrapper = ConfigurationWrapper(config_mod)
wrapper.APP_PATH = app_path
if not hasattr(config_mod, "HUB_DB_BACKEND"):
raise AttributeError("HUB_DB_BACKEND Not Found.")
# this will create a "biothings.config" module
# so "from biothings from config" will get app config at lib level
biothings = import_module("biothings")
biothings.config = wrapper
globals()["config"] = wrapper
import biothings.utils.hub_db # the order of the following commands matter
wrapper.hub_db = import_module(config_mod.HUB_DB_BACKEND["module"])
biothings.utils.hub_db.setup(wrapper)
wrapper._db = biothings.utils.hub_db.get_hub_config()
# setup logging
from biothings.utils.loggers import EventRecorder
logger = logging.getLogger()
fmt = logging.Formatter(
'%(asctime)s [%(process)d:%(threadName)s] - %(name)s - %(levelname)s -- %(message)s',
datefmt="%H:%M:%S")
erh = EventRecorder()
erh.name = "event_recorder"
erh.setFormatter(fmt)
if erh.name not in [h.name for h in logger.handlers]:
logger.addHandler(erh)
_config_for_app()
# FOR DEVELOPMENT USAGE
# --------------------------
# try:
# _config_for_app()
# except Exception:
# logging.exception("Fallback to local DB.")
# _config = SimpleNamespace()
# _config.HUB_DB_BACKEND = {
# "module": "biothings.utils.sqlite3",
# "sqlite_db_folder": "."}
# _config.DATA_HUB_DB_DATABASE = ".hubdb"
# _config_for_app(_config)
from biothings.utils.common import get_class_from_classpath
from biothings.utils.hub import (AlreadyRunningException, CommandDefinition,
CommandError, HubShell, get_hub_reloader,
pending)
from biothings.utils.jsondiff import make as jsondiff
from biothings.utils.loggers import (ShellLogger, WSLogHandler, WSShellHandler,
get_logger)
from biothings.utils.version import check_new_version, get_version
# adjust some loggers...
if os.environ.get("HUB_VERBOSE", "0") != "1":
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("botocore").setLevel(logging.ERROR)
logging.getLogger("boto3").setLevel(logging.ERROR)
logging.getLogger("git").setLevel(logging.ERROR)
def get_loop(max_workers=None):
loop = asyncio.get_event_loop()
executor = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)
loop.set_default_executor(executor)
return loop
# Keys used as category in pinfo (description of jobs submitted to JobManager)
# Those are used in different places
DUMPER_CATEGORY = "dumper"
UPLOADER_CATEGORY = "uploader"
BUILDER_CATEGORY = "builder"
INDEXER_CATEGORY = "indexer"
INDEXMANAGER_CATEGORY = "indexmanager"
RELEASEMANAGER_CATEGORY = "releasemanager"
RELEASER_CATEGORY = "releaser"
SNAPSHOTMANAGER_CATEGORY = "snapshotmanager"
SNAPSHOOTER_CATEGORY = "snapshooter"
DIFFER_CATEGORY = "differ"
DIFFMANAGER_CATEGORY = "diffmanager"
SYNCER_CATEGORY = "syncer"
INSPECTOR_CATEGORY = "inspector"
# HUB_REFRESH_COMMANDS = hasattr(
# config, "HUB_REFRESH_COMMANDS"
# ) and config.HUB_REFRESH_COMMANDS or "* * * * * *" # every sec
HUB_REFRESH_COMMANDS = getattr(
config, "HUB_REFRESH_COMMANDS", "* * * * * *" # every sec
)
# Check for new code update from app and biothings Git repo
HUB_CHECK_UPGRADE = getattr(
config, "HUB_CHECK_UPGRADE", "0 * * * *" # every hour
)
class JobRenderer(object):
def __init__(self):
self.rendered = {
types.FunctionType: self.render_func,
types.MethodType: self.render_method,
partial: self.render_partial,
types.LambdaType: self.render_lambda,
}
def render(self, job):
r = self.rendered.get(type(job._callback))
rstr = r(job._callback)
delta = job._when - job._loop.time()
days = None
if delta > 86400:
days = int(delta / 86400)
delta = delta - 86400
strdelta = time.strftime("%Hh:%Mm:%Ss", time.gmtime(int(delta)))
if days:
strdelta = "%d day(s) %s" % (days, strdelta)
return "%s {run in %s}" % (rstr, strdelta)
def render_partial(self, p):
# class.method(args)
return self.rendered[type(p.func)](p.func) + "%s" % str(p.args)
def render_cron(self, c):
# func type associated to cron can vary
return self.rendered[type(c.func)](c.func) + " [%s]" % c.spec
def render_func(self, f):
return f.__name__
def render_method(self, m):
# what is self ? cron ?
# if type(m.__self__) == aiocron.Cron: # TODO: delete if confirmed
if isinstance(m.__self__, aiocron.Cron):
return self.render_cron(m.__self__)
else:
return "%s.%s" % (m.__self__.__class__.__name__, m.__name__)
def render_lambda(self, l):
return l.__name__
renderer = JobRenderer()
def status(managers):
"""
Return a global hub status (number or sources, documents, etc...)
according to available managers
"""
total_srcs = None
total_docs = None
total_confs = None
total_builds = None
total_apis = None
total_running_apis = None
if managers.get("source_manager"):
try:
srcm = managers["source_manager"]
srcs = srcm.get_sources()
total_srcs = len(srcs)
total_docs = sum([s["upload"]["sources"][subs].get("count", 0) or 0
for s in srcs
for subs in s.get("upload", {}).get("sources", {})
if s.get("upload")])
except Exception:
logging.exception("Can't get stats for sources:")
try:
bm = managers["build_manager"]
total_confs = len(bm.build_config_info())
except Exception:
logging.exception("Can't get total number of build configurations:")
try:
total_builds = len(bm.build_info())
except Exception:
logging.exception("Can't get total number of builds:")
try:
am = managers["api_manager"]
apis = am.get_apis()
total_apis = len(apis)
total_running_apis = len(
[a for a in apis if a.get("status") == "running"])
except Exception:
logging.exception("Can't get stats for APIs:")
return {
"source": {
"total": total_srcs,
"documents": total_docs
},
"build": {
"total": total_builds
},
"build_conf": {
"total": total_confs
},
"api": {
"total": total_apis,
"running": total_running_apis
},
}
def get_schedule(loop):
"""try to render job in a human-readable way..."""
out = []
for sch in loop._scheduled:
if type(sch) != asyncio.events.TimerHandle:
continue
if sch._cancelled:
continue
try:
info = renderer.render(sch)
out.append(info)
except Exception:
import traceback
traceback.print_exc()
out.append(sch)
return "\n".join(out)
# @asyncio.coroutine
async def start_ssh_server(loop,
name,
passwords,
keys=['bin/ssh_host_key'],
shell=None,
host='',
port=8022):
for key in keys:
assert os.path.exists(
key
), "Missing key '%s' (use: 'ssh-keygen -f %s' to generate it" % (key,
key)
HubSSHServer.PASSWORDS = passwords
HubSSHServer.NAME = name
HubSSHServer.SHELL = shell
aiocron.crontab(HUB_REFRESH_COMMANDS,
func=shell.__class__.refresh_commands,
start=True,
loop=loop)
# yield from asyncssh.create_server(HubSSHServer,
# host,
# port,
# loop=loop,
# server_host_keys=keys)
await asyncssh.create_server(HubSSHServer,
host,
port,
#loop=loop,
server_host_keys=keys)
class HubCommands(OrderedDict):
def __setitem__(self, k, v):
if k in self:
raise ValueError("Command '%s' already defined" % k)
super().__setitem__(k, v)
class HubServer(object):
DEFAULT_FEATURES = [
"config", "job", "dump", "upload", "dataplugin", "source", "build",
"diff", "index", "snapshot", "release", "inspect", "sync", "api",
"terminal", "reloader", "dataupload", "ws", "readonly", "upgrade",
"autohub", "hooks",
]
DEFAULT_MANAGERS_ARGS = {"upload": {"poll_schedule": "* * * * * */10"}}
DEFAULT_RELOADER_CONFIG = {
"folders": None, # will use default one
"managers": ["source_manager", "assistant_manager"],
"reload_func": None
} # will use default one
DEFAULT_DATAUPLOAD_CONFIG = {
"upload_root": getattr(config, "DATA_UPLOAD_FOLDER", None)
}
DEFAULT_WEBSOCKET_CONFIG = {}
DEFAULT_API_CONFIG = {}
DEFAULT_AUTOHUB_CONFIG = {
"version_urls": getattr(config, "VERSION_URLS", []),
"indexer_factory": getattr(config, "AUTOHUB_INDEXER_FACTORY", None),
"es_host": getattr(config, "AUTOHUB_ES_HOST", None),
}
def __init__(self,
source_list,
features=None,
name="BioThings Hub",
managers_custom_args={},
api_config=None,
reloader_config=None,
dataupload_config=None,
websocket_config=None,
autohub_config=None):
"""
Helper to setup and instantiate common managers usually used in a hub
(eg. dumper manager, uploader manager, etc...)
"source_list" is either:
- a list of string corresponding to paths to datasources modules
- a package containing sub-folders with datasources modules
Specific managers can be retrieved adjusting "features" parameter, where
each feature corresponds to one or more managers. Parameter defaults to
all possible available. Managers are configured/init in the same order as the list,
so if a manager (eg. job_manager) is required by all others, it must be the first
in the list.
"managers_custom_args" is an optional dict used to pass specific arguments while
init managers:
managers_custom_args={"upload" : {"poll_schedule" : "*/5 * * * *"}}
will set poll schedule to check upload every 5min (instead of default 10s)
"reloader_config", "dataupload_config", "autohub_config" and "websocket_config"
can be used to customize reloader, dataupload and websocket. If None, default config
is used. If explicitely False, feature is deactivated.
"""
self.name = name
self.source_list = source_list
self.logger, self.logfile = get_logger("hub")
self._passed_features = features
self._passed_managers_custom_args = managers_custom_args
self.features = self.clean_features(features or self.DEFAULT_FEATURES)
self.managers_custom_args = managers_custom_args
self.reloader_config = reloader_config or self.DEFAULT_RELOADER_CONFIG
self.dataupload_config = dataupload_config or self.DEFAULT_DATAUPLOAD_CONFIG
self.websocket_config = websocket_config or self.DEFAULT_WEBSOCKET_CONFIG
self.autohub_config = autohub_config or self.DEFAULT_AUTOHUB_CONFIG
self.ws_listeners = [
] # collect listeners that should be connected (push data through) to websocket
self.api_config = api_config or self.DEFAULT_API_CONFIG
# set during configure()
self.managers = None
self.api_endpoints = {}
self.readonly_api_endpoints = None
self.shell = None
self.commands = None # default "public" commands
self.extra_commands = None # "hidden" commands, but still useful for advanced usage
self.hook_files = None # user-defined commands as hook files
self.routes = []
self.readonly_routes = []
self.ws_urls = [] # only one set, shared between r/w and r/o hub api server
# flag "do we need to configure?"
self.configured = False
def clean_features(self, features):
"""
Sanitize (ie. remove duplicates) features
"""
# we can't just use "set()" because we need to preserve order
ordered = OrderedDict()
for feat in features:
if feat not in ordered:
ordered[feat] = None
return list(ordered.keys())
def before_configure(self):
"""
Hook triggered before configure(),
used eg. to adjust features list
"""
pass
def configure_readonly_api_endpoints(self):
"""
Assuming read-write API endpoints have previously been defined (self.api_endpoints set)
extract commands and their endpoint definitions only when method is GET. That is, for any
given API definition honoring REST principle for HTTP verbs, generate endpoints only for
which actions are read-only actions.
"""
assert self.api_endpoints, "Can't derive a read-only API is no read-write endpoints are defined"
self.readonly_api_endpoints = {}
for cmd, api_endpoints in self.api_endpoints.items():
if not isinstance(api_endpoints, list):
api_endpoints = [api_endpoints]
for endpoint in api_endpoints:
if endpoint["method"].lower() != "get":
self.logger.debug("Skipping %s: %s for read-only API" % (cmd, endpoint))
continue
else:
self.readonly_api_endpoints.setdefault(cmd, []).append(endpoint)
def configure(self):
self.before_configure()
self.remaining_features = copy.deepcopy(
self.features) # keep track of what's been configured
self.configure_ioloop()
self.configure_managers()
# setup the shell
self.shell = HubShell(self.managers["job_manager"])
self.shell.register_managers(self.managers)
self.shell.server = self # propagate server instance in shell
# so it's accessible from the console if needed
self.configure_remaining_features()
self.configure_commands()
self.configure_extra_commands()
self.shell.set_commands(self.commands, self.extra_commands)
self.ingest_hooks()
# setapi
if self.api_config is not False:
self.configure_api_endpoints(
) # after shell setup as it adds some default commands
# we want to expose throught the api
from biothings.hub.api import generate_api_routes
from biothings.hub.api.handlers.base import RootHandler
# First deal with read-only API
if "readonly" in self.features:
self.configure_readonly_api_endpoints()
self.readonly_routes.extend(
generate_api_routes(self.shell, self.readonly_api_endpoints))
# we don't want to expose feature read-only for the API that is *not*
# read-only. "readonly" feature means we're running another webapp for
# a specific readonly API. UI can then query the root handler and see
# if the API is readonly or not, and adjust the components & actions
ro_features = copy.deepcopy(self.features)
# terminal feature certainly not allowed in read-only server...
if "terminal" in self.features:
ro_features.remove("terminal")
# if we have readonly feature, it means another non-readonly server is running
self.features.remove("readonly")
hub_name = getattr(config, "HUB_NAME", "Hub") + " (read-only)"
self.readonly_routes.append(("/", RootHandler, {
"features": ro_features, "hub_name": hub_name
}))
# Then deal with read-write API
self.routes.extend(
generate_api_routes(self.shell, self.api_endpoints))
from biothings.hub.api.handlers.log import (HubLogDirHandler,
HubLogFileHandler)
self.routes.append(("/logs/(.*)", HubLogDirHandler, {"path": config.LOG_FOLDER}))
self.routes.append(("/log/(.+)", HubLogFileHandler, {"path": config.LOG_FOLDER}))
self.routes.append(("/", RootHandler, {
"features": self.features,
}))
# done
self.configured = True
def configure_ioloop(self):
import tornado.platform.asyncio
tornado.platform.asyncio.AsyncIOMainLoop().install()
def before_start(self):
pass
def start(self):
if not self.configured:
self.configure()
self.logger.info("Starting '%s'", self.name)
# can't use asyncio.get_event_loop() if python < 3.5.3 as it would return
# another instance of aio loop, take it from job_manager to make sure
# we share the same one
loop = self.managers["job_manager"].loop
if self.routes:
self.logger.info("Starting Hub API server on port %s" % config.HUB_API_PORT)
#self.logger.info(self.routes)
import tornado.web
# register app into current event loop
api = tornado.web.Application(self.routes)
self.extra_commands["api"] = api
from biothings.hub.api import start_api
start_api(api,
config.HUB_API_PORT,
settings=getattr(config, "TORNADO_SETTINGS", {})
)
if self.readonly_routes:
if not getattr(config, "READONLY_HUB_API_PORT", None):
self.logger.warning("Read-only Hub API feature is set but READONLY_HUB_API_PORT "
+ "isn't set in configuration")
else:
self.logger.info("Starting read-only Hub API server on port %s" % config.READONLY_HUB_API_PORT)
#self.logger.info(self.readonly_routes)
ro_api = tornado.web.Application(self.readonly_routes)
start_api(ro_api,
config.READONLY_HUB_API_PORT,
settings=getattr(config, "TORNADO_SETTINGS", {}))
else:
self.logger.info("No route defined, API server won't start")
# at this point, everything is ready/set, last call for customizations
self.before_start()
self.logger.info("Starting Hub SSH server on port %s" % config.HUB_SSH_PORT)
self.ssh_server = start_ssh_server(loop,
self.name,
passwords=config.HUB_PASSWD,
port=config.HUB_SSH_PORT,
shell=self.shell)
try:
loop.run_until_complete(self.ssh_server)
except (OSError, asyncssh.Error) as exc:
sys.exit('Error starting server: ' + str(exc))
loop.run_forever()
def mixargs(self, feat, params=None):
params = params or {}
args = {}
for p in params:
args[p] = self.managers_custom_args.get(feat, {}).pop(
p, None) or params[p]
# mix remaining
args.update(self.managers_custom_args.get(feat, {}))
return args
def configure_job_manager(self):
import asyncio
loop = asyncio.get_event_loop()
from biothings.utils.manager import JobManager
args = self.mixargs(
"job", {
"num_workers": config.HUB_MAX_WORKERS,
"max_memory_usage": config.HUB_MAX_MEM_USAGE
})
job_manager = JobManager(loop, **args)
self.managers["job_manager"] = job_manager
def configure_dump_manager(self):
from biothings.hub.dataload.dumper import DumperManager
args = self.mixargs("dump")
dmanager = DumperManager(job_manager=self.managers["job_manager"],
**args)
self.managers["dump_manager"] = dmanager
def configure_upload_manager(self):
from biothings.hub.dataload.uploader import UploaderManager
args = self.mixargs("upload", {"poll_schedule": "* * * * * */10"})
upload_manager = UploaderManager(
job_manager=self.managers["job_manager"], **args)
self.managers["upload_manager"] = upload_manager
def configure_dataplugin_manager(self):
from biothings.hub.dataplugin.manager import DataPluginManager
dp_manager = DataPluginManager(
job_manager=self.managers["job_manager"])
self.managers["dataplugin_manager"] = dp_manager
from biothings.hub.dataplugin.assistant import AssistantManager
args = self.mixargs("dataplugin")
assistant_manager = AssistantManager(
data_plugin_manager=dp_manager,
dumper_manager=self.managers["dump_manager"],
uploader_manager=self.managers["upload_manager"],
job_manager=self.managers["job_manager"],
**args)
self.managers["assistant_manager"] = assistant_manager
def configure_build_manager(self):
from biothings.hub.databuild.builder import BuilderManager
args = self.mixargs("build")
build_manager = BuilderManager(
job_manager=self.managers["job_manager"], **args)
build_manager.configure()
self.managers["build_manager"] = build_manager
build_manager.poll()
def configure_diff_manager(self):
from biothings.hub.databuild.differ import (DifferManager,
SelfContainedJsonDiffer)
args = self.mixargs("diff")
diff_manager = DifferManager(job_manager=self.managers["job_manager"],
poll_schedule="* * * * * */10",
**args)
diff_manager.configure([
SelfContainedJsonDiffer,
])
diff_manager.poll(
"diff", lambda doc: diff_manager.diff(
"jsondiff-selfcontained", old=None, new=doc["_id"]))
self.managers["diff_manager"] = diff_manager
def configure_index_manager(self):
from biothings.hub.dataindex.indexer import IndexManager
args = self.mixargs("index")
index_manager = IndexManager(job_manager=self.managers["job_manager"], **args)
index_manager.configure(config.INDEX_CONFIG)
self.managers["index_manager"] = index_manager
def configure_snapshot_manager(self):
assert "index" in self.features, "'snapshot' feature requires 'index'"
from biothings.hub.dataindex.snapshooter import SnapshotManager
args = self.mixargs("snapshot")
snapshot_manager = SnapshotManager(
index_manager=self.managers["index_manager"],
job_manager=self.managers["job_manager"],
poll_schedule="* * * * * */10", **args)
snapshot_manager.configure(config.SNAPSHOT_CONFIG)
snapshot_manager.poll("snapshot", snapshot_manager.snapshot_a_build)
self.managers["snapshot_manager"] = snapshot_manager
def configure_release_manager(self):
assert "diff" in self.features, "'release' feature requires 'diff'"
assert "snapshot" in self.features, "'release' feature requires 'snapshot'"
from biothings.hub.datarelease.publisher import ReleaseManager
args = self.mixargs("release")
release_manager = ReleaseManager(
diff_manager=self.managers["diff_manager"],
snapshot_manager=self.managers["snapshot_manager"],
job_manager=self.managers["job_manager"],
poll_schedule="* * * * * */10",
**args)
release_manager.configure(config.RELEASE_CONFIG)
release_manager.poll("release_note", release_manager.create_release_note_from_build)
release_manager.poll("publish", release_manager.publish_build)
self.managers["release_manager"] = release_manager
def configure_sync_manager(self):
from biothings.hub.databuild.syncer import SyncerManager
args = self.mixargs("sync")
sync_manager = SyncerManager(job_manager=self.managers["job_manager"],
**args)
sync_manager.configure()
self.managers["sync_manager"] = sync_manager
def configure_inspect_manager(self):
assert "upload" in self.features, "'inspect' feature requires 'upload'"
assert "build" in self.features, "'inspect' feature requires 'build'"
from biothings.hub.datainspect.inspector import InspectorManager
args = self.mixargs("inspect")
inspect_manager = InspectorManager(
upload_manager=self.managers["upload_manager"],
build_manager=self.managers["build_manager"],
job_manager=self.managers["job_manager"],
**args)
self.managers["inspect_manager"] = inspect_manager
def configure_api_manager(self):
assert "index" in self.features, "'api' feature requires 'index'"
from biothings.hub.api.manager import APIManager
args = self.mixargs("api")
api_manager = APIManager(**args)
self.managers["api_manager"] = api_manager
def configure_source_manager(self):
if "dump" in self.features or "upload" in self.features:
self.mixargs("source")
from biothings.hub.dataload.source import SourceManager
source_manager = SourceManager(
source_list=self.source_list,
dump_manager=self.managers["dump_manager"],
upload_manager=self.managers["upload_manager"],
data_plugin_manager=self.managers.get("dataplugin_manager"),
)
self.managers["source_manager"] = source_manager
# init data plugin once source_manager has been set (it inits dumper and uploader
# managers, if assistant_manager is configured/loaded before, datasources won't appear
# in dumper/uploader managers as they were not ready yet)
if "dataplugin" in self.features:
self.managers["assistant_manager"].configure()
self.managers["assistant_manager"].load()
# now that we have the source manager setup, we can schedule and poll
if "dump" in self.features and not getattr(
config, "SKIP_DUMPER_SCHEDULE", False):
self.managers["dump_manager"].schedule_all()
if "upload" in self.features and not getattr(
config, "SKIP_UPLOADER_POLL", False):
self.managers["upload_manager"].poll(
'upload', lambda doc: self.shell.launch(
partial(self.managers["upload_manager"].upload_src, doc[
"_id"])))
def configure_autohub_feature(self):
"""
See bt.hub.standalone.AutoHubFeature
"""
# "autohub" feature is based on "dump","upload" and "sync" features.
# If autohub is running on its own (standalone instance only for instance)
# we don't list them in DEFAULT_FEATURES as we don't want them to produce
# commands such as dump() or upload() as these are renamed for clarity
# that said, those managers could still exist *if* autohub is mixed
# with "standard" hub, so we don't want to override them if already configured
if not self.managers.get("dump_manager"):
self.configure_dump_manager()
if not self.managers.get("upload_manager"):
self.configure_upload_manager()
if not self.managers.get("sync_manager"):
self.configure_sync_manager()
# Originally, autohub was a hub server on its own, it's now
# converted a feature;to avoid mixins and bringing complexity in this HubServer
# definition, we use composition pointing to an instance of that feature which
# encapsulates that complexity
from biothings.hub.standalone import AutoHubFeature
# only pass required manage rs
autohub_managers = {
"dump_manager": self.managers["dump_manager"],
"upload_manager": self.managers["upload_manager"],
"sync_manager": self.managers["sync_manager"],
"job_manager": self.managers["job_manager"]
}
version_urls = self.autohub_config["version_urls"]
indexer_factory = self.autohub_config["indexer_factory"]
es_host = self.autohub_config["es_host"]
factory = None
if indexer_factory:
assert es_host, "indexer_factory set but es_host not set (AUTOHUB_ES_HOST), can't know which ES server to use"
try:
factory_class = get_class_from_classpath(indexer_factory)
factory = factory_class(version_urls, es_host)
except (ImportError, ModuleNotFoundError) as e:
self.logger.error("Couldn't find indexer factory class from '%s': %s" % (indexer_factory, e))
self.autohub_feature = AutoHubFeature(autohub_managers, version_urls, factory)
try:
self.autohub_feature.configure()
self.autohub_feature.configure_auto_release(config)
except Exception as e:
self.logger.error("Could't configure feature 'autohub', will be deactivated: %s" % e)
self.features.remove("autohub")
def configure_hooks_feature(self):
"""
Ingest user-defined commands into hub namespace, giving access
to all pre-defined commands (commands, extra_commands).
This method prepare the hooks but the ingestion is done later
when all commands are defined
"""
hooks_folder = getattr(config, "HOOKS_FOLDER", "./hooks")
if not os.path.exists(hooks_folder):
self.logger.info("Hooks folder '%s' doesn't exist, creating it" % hooks_folder)
os.makedirs(hooks_folder)
self.hook_files = glob.glob(os.path.join(hooks_folder, "*.py"))
def ingest_hooks(self):
if not self.hook_files:
return
for pyfile in self.hook_files:
try:
self.logger.info("Processing hook file '%s'" % pyfile)
self.process_hook_file(pyfile)
except Exception as e:
self.logger.exception("Can't process hook file: %s" % e)
def process_hook_file(self, hook_file):
strcode = open(hook_file).read()
code = compile(strcode, "<string>", "exec")
eval(code, self.shell.extra_ns, self.shell.extra_ns)
def configure_managers(self):
if self.managers is not None:
raise Exception("Managers have already been configured")
self.managers = {}
self.logger.info("Setting up managers for following features: %s",
self.features)
assert "job" in self.features, "'job' feature is mandatory"
if "source" in self.features:
assert "dump" in self.features and "upload" in self.features, "'source' feature requires both 'dump' and 'upload' features"
if "dataplugin" in self.features:
assert "source" in self.features, "'dataplugin' feature requires 'source' feature"
# specific order, eg. job_manager is used by all managers
for feat in self.features:
if hasattr(self, "configure_%s_manager" % feat):
self.logger.info("Configuring feature '%s'", feat)
getattr(self, "configure_%s_manager" % feat)()
self.remaining_features.remove(feat)
elif hasattr(self, "configure_%s_feature" % feat):
# see configure_remaining_features()
pass # this is configured after managers but should not produce an error
else:
raise AttributeError(
"Feature '%s' listed but no 'configure_%s_{manager|feature}' method found"
% (feat, feat))
self.logger.info("Active manager(s): %s" % pformat(self.managers))
def configure_config_feature(self):
# just a placeholder
pass
def configure_upgrade_feature(self):
"""
Allows a Hub to check for new versions (new commits to apply on running branch)
and apply them on current code base
"""
if not getattr(config, "app_folder", None) or not getattr(config, "biothings_folder", None):
self.logger.warning("Can't schedule check for new code updates, "
+ "app folder and/or biothings folder not defined")
return
from biothings.hub.upgrade import (ApplicationSystemUpgrade,
BioThingsSystemUpgrade)
def get_upgrader(klass, folder):
version = get_version(folder)
if version:
klass.SRC_ROOT_FOLDER = folder
klass.GIT_REPO_URL = version["giturl"]
klass.DEFAULT_BRANCH = version["branch"]
return klass
else:
# set a flag to skip version checks, folder is likely not a git folder
_skip_list = getattr(self, 'upgrader_skip_folders', [])
if folder not in _skip_list:
_skip_list.append(folder)
setattr(self, 'upgrader_skip_folders', _skip_list)
bt_upgrader_class = get_upgrader(BioThingsSystemUpgrade, config.biothings_folder)
app_upgrader_class = get_upgrader(ApplicationSystemUpgrade, config.app_folder)
self.managers["dump_manager"].register_classes(
[cls for cls in [bt_upgrader_class, app_upgrader_class] if cls]
)
@asyncio.coroutine
def check_code_upgrade():
_skip_list = getattr(self, 'upgrader_skip_folders', [])
if _skip_list and config.biothings_folder in _skip_list and config.app_folder in _skip_list:
# both folders cannot be checked for versions, exit now
return
self.logger.info("Checking for new code updates")
if config.biothings_folder in _skip_list:
bt_new = None
else:
bt_new = check_new_version(config.biothings_folder)
if config.app_folder in _skip_list:
app_new = None
else:
try:
app_new = check_new_version(config.app_folder)
except Exception as e:
self.logger.warning("Can't check for new version: %s" % e)
return
# enrich existing version information with an "upgrade" field.
# note: we do that on config._module, the actual config.py module,
# *not* directly on config as it's a wrapper over config._module
for (name, new, param) in (("app", app_new, "APP_VERSION"), ("biothings", bt_new, "BIOTHINGS_VERSION")):
if new:
self.logger.info("Found updates for %s:\n%s" % (name, pformat(new)))
getattr(config._module, param)["upgrade"] = new
else:
# just in case, we pop out the key
val = getattr(config._module, param)
if val:
val.pop("upgrade", None)
loop = self.managers.get("job_manager") and self.managers[
"job_manager"].loop or asyncio.get_event_loop()
# check at startup, then regularly
asyncio.ensure_future(check_code_upgrade())
aiocron.crontab(HUB_CHECK_UPGRADE,
func=check_code_upgrade,
start=True,
loop=loop)
def get_websocket_urls(self):
if self.ws_urls:
return self.ws_urls
import biothings.hub.api.handlers.ws as ws
import sockjs.tornado
from biothings.utils.hub_db import ChangeWatcher
# monitor change in database to report activity in webapp
self.db_listener = ws.HubDBListener()
ChangeWatcher.add(self.db_listener)
ChangeWatcher.publish()
self.log_listener = ws.LogListener()
# push log statements to the webapp
root_logger = logging.getLogger(
) # careful, asyncio logger will trigger log statement while in the handler
# (ie. infinite loop), root logger not recommended)
root_logger.addHandler(WSLogHandler(self.log_listener))
self.ws_listeners.extend([self.db_listener, self.log_listener])
ws_router = sockjs.tornado.SockJSRouter(
partial(ws.WebSocketConnection, listeners=self.ws_listeners),
'/ws')
self.ws_urls = ws_router.urls
return self.ws_urls
def configure_ws_feature(self):
# add websocket endpoint
ws_urls = self.get_websocket_urls()
self.routes.extend(ws_urls)
def configure_terminal_feature(self):
assert "ws" in self.features, "'terminal' feature requires 'ws'"
assert "ws" in self.remaining_features, "'terminal' feature should configured before 'ws'"
# shell logger/listener to communicate between webapp and hub ssh console
import biothings.hub.api.handlers.ws as ws
shell_listener = ws.LogListener()
shell_logger = logging.getLogger("shell")
assert isinstance(shell_logger,
ShellLogger), "shell_logger isn't properly set"
shell_logger.addHandler(WSShellHandler(shell_listener))
self.ws_listeners.append(shell_listener)
# webapp terminal to hub shell connection through /shell endpoint
from biothings.hub.api.handlers.shell import ShellHandler
shell_endpoint = ("/shell", ShellHandler, {
"shell": self.shell,
"shellog": shell_logger
})
self.routes.append(shell_endpoint)
def configure_dataupload_feature(self):
assert "ws" in self.features, "'dataupload' feature requires 'ws'"
assert "ws" in self.remaining_features, "'dataupload' feature should configured before 'ws'"
# this one is not bound to a specific command
from biothings.hub.api.handlers.upload import UploadHandler
# tuple type = interpreted as a route handler
self.routes.append(
(r"/dataupload/([\w\.-]+)?", UploadHandler, self.dataupload_config))
def configure_reloader_feature(self):
monitored_folders = self.reloader_config["folders"] or [
"hub/dataload/sources",
getattr(config, "DATA_PLUGIN_FOLDER", None),
getattr(config, "HOOKS_FOLDER", "./hooks"),
]
reload_func = self.reloader_config["reload_func"] or partial(
self.shell.restart, force=True)
reloader = get_hub_reloader(monitored_folders,
reload_func=reload_func)
reloader and reloader.monitor()
def configure_readonly_feature(self):
"""
Define then expose read-only Hub API endpoints
so Hub can be accessed without any risk of modifying data
"""
assert self.api_config is not False, "api_config (read/write API) is required " \
+ "to defined a read-only API (it's derived from)"
# first websockets URLs (we only fetch data from websocket, so no
# risk of write operations there
ws_urls = self.get_websocket_urls()
self.readonly_routes.extend(ws_urls)
# the rest of the readonly feature setup is done as the end, when starting the server
def configure_remaining_features(self):
self.logger.info("Setting up remaining features: %s",
self.remaining_features)
# specific order, eg. job_manager is used by all managers
for feat in copy.deepcopy(self.remaining_features):
if hasattr(self, "configure_%s_feature" % feat):
getattr(self, "configure_%s_feature" % feat)()
self.remaining_features.remove(feat)
pass # this is configured after managers but should not produce an error
else:
raise AttributeError(
"Feature '%s' listed but no 'configure_%s_feature' method found"
% (feat, feat))
def configure_commands(self):
"""
Configure hub commands according to available managers
"""
assert self.managers, "No managers configured"
self.commands = HubCommands()
self.commands["status"] = CommandDefinition(command=partial(status, self.managers),
tracked=False)
if "config" in self.features:
self.commands["config"] = CommandDefinition(command=config.show,
tracked=False)
self.commands["setconf"] = config.store_value_to_db
self.commands["resetconf"] = config.reset
# getting info
if self.managers.get("source_manager"):
self.commands["source_info"] = CommandDefinition(
command=self.managers["source_manager"].get_source,
tracked=False)
self.commands["source_reset"] = CommandDefinition(
command=self.managers["source_manager"].reset, tracked=True)
# dump commands
if self.managers.get("dump_manager"):
self.commands["dump"] = self.managers["dump_manager"].dump_src
self.commands["dump_all"] = self.managers["dump_manager"].dump_all
# upload commands
if self.managers.get("upload_manager"):
self.commands["upload"] = self.managers[
"upload_manager"].upload_src
self.commands["upload_all"] = self.managers[
"upload_manager"].upload_all
# building/merging
if self.managers.get("build_manager"):
self.commands["whatsnew"] = CommandDefinition(
command=self.managers["build_manager"].whatsnew, tracked=False)
self.commands["lsmerge"] = self.managers[
"build_manager"].list_merge
self.commands["rmmerge"] = self.managers[
"build_manager"].delete_merge
self.commands["merge"] = self.managers["build_manager"].merge
self.commands["archive"] = self.managers[
"build_manager"].archive_merge
if hasattr(config, "INDEX_CONFIG"):
self.commands["index_config"] = config.INDEX_CONFIG
if hasattr(config, "SNAPSHOT_CONFIG"):
self.commands["snapshot_config"] = config.SNAPSHOT_CONFIG
if hasattr(config, "PUBLISH_CONFIG"):
self.commands["publish_config"] = config.PUBLISH_CONFIG
# diff
if self.managers.get("diff_manager"):
self.commands["diff"] = self.managers["diff_manager"].diff
self.commands["report"] = self.managers["diff_manager"].diff_report
# indexing commands
if self.managers.get("index_manager"):
self.commands["index"] = self.managers["index_manager"].index
self.commands["index_cleanup"] = self.managers["index_manager"].cleanup
if self.managers.get("snapshot_manager"):
self.commands["snapshot"] = self.managers["snapshot_manager"].snapshot
self.commands["snapshot_cleanup"] = self.managers["snapshot_manager"].cleanup
# data release commands
if self.managers.get("release_manager"):
self.commands["create_release_note"] = self.managers[
"release_manager"].create_release_note
self.commands["get_release_note"] = CommandDefinition(
command=self.managers["release_manager"].get_release_note,
tracked=False)
self.commands["publish"] = self.managers["release_manager"].publish
self.commands["publish_diff"] = self.managers[
"release_manager"].publish_diff
self.commands["publish_snapshot"] = self.managers[
"release_manager"].publish_snapshot
if self.managers.get("sync_manager"):
self.commands["sync"] = CommandDefinition(
command=self.managers["sync_manager"].sync)
# inspector
if self.managers.get("inspect_manager"):
self.commands["inspect"] = self.managers["inspect_manager"].inspect
# data plugins
if self.managers.get("assistant_manager"):
self.commands["register_url"] = partial(
self.managers["assistant_manager"].register_url)
self.commands["unregister_url"] = partial(
self.managers["assistant_manager"].unregister_url)
self.commands["export_plugin"] = partial(
self.managers["assistant_manager"].export)
if self.managers.get("dataplugin_manager"):
self.commands["dump_plugin"] = self.managers[
"dataplugin_manager"].dump_src
if "autohub" in self.DEFAULT_FEATURES:
self.commands["list"] = CommandDefinition(command=self.autohub_feature.list_biothings, tracked=False)
# dump commands
self.commands["versions"] = partial(self.managers["dump_manager"].call, method_name="versions")
self.commands["check"] = partial(self.managers["dump_manager"].dump_src, check_only=True)
self.commands["info"] = partial(self.managers["dump_manager"].call, method_name="info")
self.commands["download"] = partial(self.managers["dump_manager"].dump_src)
# upload commands
self.commands["apply"] = partial(self.managers["upload_manager"].upload_src)
self.commands["install"] = partial(self.autohub_feature.install)
self.commands["backend"] = partial(self.managers["dump_manager"].call, method_name="get_target_backend")
self.commands["reset_backend"] = partial(self.managers["dump_manager"].call, method_name="reset_target_backend")
logging.info("Registered commands: %s", list(self.commands.keys()))
def configure_extra_commands(self):
"""
Same as configure_commands() but commands are not exposed publicly in the shell
(they are shortcuts or commands for API endpoints, supporting commands, etc...)
"""
assert self.managers, "No managers configured"
self.extra_commands = {} # unordered since not exposed, we don't care
loop = self.managers.get("job_manager") and self.managers[
"job_manager"].loop or asyncio.get_event_loop()
self.extra_commands["g"] = CommandDefinition(command=globals(),
tracked=False)
self.extra_commands["sch"] = CommandDefinition(command=partial(get_schedule, loop),
tracked=False)
# expose contant so no need to put quotes (eg. top(pending) instead of top("pending")
self.extra_commands["pending"] = CommandDefinition(command=pending,
tracked=False)
self.extra_commands["loop"] = CommandDefinition(command=loop,
tracked=False)
if self.managers.get("job_manager"):
self.extra_commands["pqueue"] = CommandDefinition(
command=self.managers["job_manager"].process_queue,
tracked=False)
self.extra_commands["tqueue"] = CommandDefinition(
command=self.managers["job_manager"].thread_queue,
tracked=False)
self.extra_commands["jm"] = CommandDefinition(
command=self.managers["job_manager"], tracked=False)
self.extra_commands["top"] = CommandDefinition(
command=self.managers["job_manager"].top, tracked=False)
self.extra_commands["job_info"] = CommandDefinition(
command=self.managers["job_manager"].job_info, tracked=False)
self.extra_commands["schedule"] = CommandDefinition(
command=self.managers["job_manager"].schedule, tracked=False)
if self.managers.get("source_manager"):
self.extra_commands["sm"] = CommandDefinition(
command=self.managers["source_manager"], tracked=False)
self.extra_commands["sources"] = CommandDefinition(
command=self.managers["source_manager"].get_sources,
tracked=False)
self.extra_commands["source_save_mapping"] = CommandDefinition(
command=self.managers["source_manager"].save_mapping)
if self.managers.get("dump_manager"):
self.extra_commands["dm"] = CommandDefinition(
command=self.managers["dump_manager"], tracked=False)
self.extra_commands["dump_info"] = CommandDefinition(
command=self.managers["dump_manager"].dump_info, tracked=False)
if self.managers.get("dataplugin_manager"):
self.extra_commands["dpm"] = CommandDefinition(
command=self.managers["dataplugin_manager"], tracked=False)
if self.managers.get("assistant_manager"):
self.extra_commands["am"] = CommandDefinition(
command=self.managers["assistant_manager"], tracked=False)
if self.managers.get("upload_manager"):
self.extra_commands["um"] = CommandDefinition(
command=self.managers["upload_manager"], tracked=False)
self.extra_commands["upload_info"] = CommandDefinition(
command=self.managers["upload_manager"].upload_info,
tracked=False)
if self.managers.get("build_manager"):
self.extra_commands["bm"] = CommandDefinition(
command=self.managers["build_manager"], tracked=False)
self.extra_commands["builds"] = CommandDefinition(
command=self.managers["build_manager"].build_info,
tracked=False)
self.extra_commands["build"] = CommandDefinition(
command=lambda id: self.managers["build_manager"].build_info(
id=id),
tracked=False)
self.extra_commands["build_config_info"] = CommandDefinition(
command=self.managers["build_manager"].build_config_info,
tracked=False)
self.extra_commands["build_save_mapping"] = CommandDefinition(
command=self.managers["build_manager"].save_mapping)
self.extra_commands["create_build_conf"] = CommandDefinition(
command=self.managers["build_manager"].
create_build_configuration)
self.extra_commands["update_build_conf"] = CommandDefinition(
command=self.managers["build_manager"].
update_build_configuration)
self.extra_commands["delete_build_conf"] = CommandDefinition(
command=self.managers["build_manager"].
delete_build_configuration)
if self.managers.get("diff_manager"):
self.extra_commands["dim"] = CommandDefinition(
command=self.managers["diff_manager"], tracked=False)
self.extra_commands["diff_info"] = CommandDefinition(
command=self.managers["diff_manager"].diff_info, tracked=False)
self.extra_commands["jsondiff"] = CommandDefinition(
command=jsondiff, tracked=False)
if self.managers.get("sync_manager"):
self.extra_commands["sym"] = CommandDefinition(
command=self.managers["sync_manager"], tracked=False)
if self.managers.get("index_manager"):
self.extra_commands["im"] = CommandDefinition(
command=self.managers["index_manager"], tracked=False)
self.extra_commands["index_info"] = CommandDefinition(
command=self.managers["index_manager"].index_info,
tracked=False)
self.extra_commands["validate_mapping"] = CommandDefinition(
command=self.managers["index_manager"].validate_mapping)
self.extra_commands["update_metadata"] = CommandDefinition(
command=self.managers["index_manager"].update_metadata)
if self.managers.get("snapshot_manager"):
self.extra_commands["ssm"] = CommandDefinition(
command=self.managers["snapshot_manager"], tracked=False)
self.extra_commands["snapshot_info"] = CommandDefinition(
command=self.managers["snapshot_manager"].snapshot_info,
tracked=False)
if self.managers.get("release_manager"):
self.extra_commands["rm"] = CommandDefinition(
command=self.managers["release_manager"], tracked=False)
self.extra_commands["release_info"] = CommandDefinition(
command=self.managers["release_manager"].release_info,
tracked=False)
self.extra_commands["reset_synced"] = CommandDefinition(
command=self.managers["release_manager"].reset_synced,
tracked=True)
if self.managers.get("inspect_manager"):
self.extra_commands["ism"] = CommandDefinition(
command=self.managers["inspect_manager"], tracked=False)
if self.managers.get("api_manager"):
self.extra_commands["api"] = CommandDefinition(
command=self.managers["api_manager"], tracked=False)
self.extra_commands["get_apis"] = CommandDefinition(
command=self.managers["api_manager"].get_apis, tracked=False)
self.extra_commands["delete_api"] = CommandDefinition(
command=self.managers["api_manager"].delete_api)
self.extra_commands["create_api"] = CommandDefinition(
command=self.managers["api_manager"].create_api)
self.extra_commands["start_api"] = CommandDefinition(
command=self.managers["api_manager"].start_api)
self.extra_commands["stop_api"] = self.managers[
"api_manager"].stop_api
if "upgrade" in self.DEFAULT_FEATURES:
def upgrade(code_base): # just a wrapper over dumper
"""Upgrade (git pull) repository for given code base name ("biothings_sdk" or "application")"""
assert code_base in ("application", "biothings_sdk"), "Unknown code base '%s'" % code_base
return self.managers["dump_manager"].dump_src("__" + code_base)
self.commands["upgrade"] = CommandDefinition(command=upgrade)
self.extra_commands["expose"] = self.add_api_endpoint
logging.debug("Registered extra (private) commands: %s",
list(self.extra_commands.keys()))
def add_api_endpoint(self, endpoint_name, command_name, method, **kwargs):
"""
Add an API endpoint to expose command named "command_name"
using HTTP method "method". **kwargs are used to specify
more arguments for EndpointDefinition
"""
if self.configured:
raise Exception("API endpoint creation must be done before Hub is configured")
from biothings.hub.api import EndpointDefinition
endpoint = EndpointDefinition(name=command_name, method=method, **kwargs)
self.api_endpoints[endpoint_name] = endpoint
def configure_api_endpoints(self):
cmdnames = list(self.commands.keys())
if self.extra_commands:
cmdnames.extend(list(self.extra_commands.keys()))
from biothings.hub.api import EndpointDefinition
self.api_endpoints["config"] = []
if "config" in cmdnames:
self.api_endpoints["config"].append(
EndpointDefinition(name="config", method="get"))
self.api_endpoints["config"].append(
EndpointDefinition(name="setconf",
method="put",
force_bodyargs=True))
self.api_endpoints["config"].append(
EndpointDefinition(name="resetconf",
method="delete",
force_bodyargs=True))
if not self.api_endpoints["config"]:
self.api_endpoints.pop("config")
if "builds" in cmdnames:
self.api_endpoints["builds"] = EndpointDefinition(name="builds",
method="get")
self.api_endpoints["build"] = []
if "build" in cmdnames:
self.api_endpoints["build"].append(
EndpointDefinition(method="get", name="build"))
if "archive" in cmdnames:
self.api_endpoints["build"].append(
EndpointDefinition(method="post",
name="archive",
suffix="archive"))
if "rmmerge" in cmdnames:
self.api_endpoints["build"].append(
EndpointDefinition(method="delete", name="rmmerge"))
if "merge" in cmdnames:
self.api_endpoints["build"].append(
EndpointDefinition(name="merge", method="put", suffix="new"))
if "build_save_mapping" in cmdnames:
self.api_endpoints["build"].append(
EndpointDefinition(name="build_save_mapping",
method="put",
suffix="mapping"))
if not self.api_endpoints["build"]:
self.api_endpoints.pop("build")
self.api_endpoints["publish"] = []
if "publish_diff" in cmdnames:
self.api_endpoints["publish"].append(
EndpointDefinition(name="publish_diff",
method="post",
suffix="incremental",
force_bodyargs=True))
if "publish_snapshot" in cmdnames:
self.api_endpoints["publish"].append(
EndpointDefinition(name="publish_snapshot",
method="post",
suffix="full",
force_bodyargs=True))
if not self.api_endpoints["publish"]:
self.api_endpoints.pop("publish")
if "diff" in cmdnames:
self.api_endpoints["diff"] = EndpointDefinition(
name="diff", method="put", force_bodyargs=True)
if "job_info" in cmdnames:
self.api_endpoints["job_manager"] = EndpointDefinition(
name="job_info", method="get")
if "dump_info" in cmdnames:
self.api_endpoints["dump_manager"] = EndpointDefinition(
name="dump_info", method="get")
if "upload_info" in cmdnames:
self.api_endpoints["upload_manager"] = EndpointDefinition(
name="upload_info", method="get")
if "build_config_info" in cmdnames:
self.api_endpoints["build_manager"] = EndpointDefinition(
name="build_config_info", method="get")
if "index_info" in cmdnames:
self.api_endpoints["index_manager"] = EndpointDefinition(
name="index_info", method="get")
if "snapshot_info" in cmdnames:
self.api_endpoints["snapshot_manager"] = EndpointDefinition(
name="snapshot_info", method="get")
if "release_info" in cmdnames:
self.api_endpoints["release_manager"] = EndpointDefinition(
name="release_info", method="get")
if "reset_synced" in cmdnames:
self.api_endpoints[
"release_manager/reset_synced"] = EndpointDefinition(
name="reset_synced", method="put")
if "diff_info" in cmdnames:
self.api_endpoints["diff_manager"] = EndpointDefinition(
name="diff_info", method="get")
if "commands" in cmdnames:
self.api_endpoints["commands"] = EndpointDefinition(
name="commands", method="get")
if "command" in cmdnames:
self.api_endpoints["command"] = EndpointDefinition(name="command",
method="get")
if "sources" in cmdnames:
self.api_endpoints["sources"] = EndpointDefinition(name="sources",
method="get")
self.api_endpoints["source"] = []
if "source_info" in cmdnames:
self.api_endpoints["source"].append(
EndpointDefinition(name="source_info", method="get"))
if "source_reset" in cmdnames:
self.api_endpoints["source"].append(
EndpointDefinition(name="source_reset",
method="post",
suffix="reset"))
if "dump" in cmdnames:
self.api_endpoints["source"].append(
EndpointDefinition(name="dump", method="put", suffix="dump"))
if "upload" in cmdnames:
self.api_endpoints["source"].append(
EndpointDefinition(name="upload",
method="put",
suffix="upload"))
if "source_save_mapping" in cmdnames:
self.api_endpoints["source"].append(
EndpointDefinition(name="source_save_mapping",
method="put",
suffix="mapping"))
if not self.api_endpoints["source"]:
self.api_endpoints.pop("source")
if "inspect" in cmdnames:
self.api_endpoints["inspect"] = EndpointDefinition(
name="inspect", method="put", force_bodyargs=True)
if "register_url" in cmdnames:
self.api_endpoints["dataplugin/register_url"] = EndpointDefinition(
name="register_url", method="post", force_bodyargs=True)
if "unregister_url" in cmdnames:
self.api_endpoints[
"dataplugin/unregister_url"] = EndpointDefinition(
name="unregister_url",
method="delete",
force_bodyargs=True)
self.api_endpoints["dataplugin"] = []
if "dump_plugin" in cmdnames:
self.api_endpoints["dataplugin"].append(
EndpointDefinition(name="dump_plugin",
method="put",
suffix="dump"))
if "export_plugin" in cmdnames:
self.api_endpoints["dataplugin"].append(
EndpointDefinition(name="export_plugin",
method="put",
suffix="export"))
if not self.api_endpoints["dataplugin"]:
self.api_endpoints.pop("dataplugin")
if "jsondiff" in cmdnames:
self.api_endpoints["jsondiff"] = EndpointDefinition(
name="jsondiff", method="post", force_bodyargs=True)
if "validate_mapping" in cmdnames:
self.api_endpoints["mapping/validate"] = EndpointDefinition(
name="validate_mapping", method="post", force_bodyargs=True)
self.api_endpoints["buildconf"] = []
if "create_build_conf" in cmdnames:
self.api_endpoints["buildconf"].append(
EndpointDefinition(name="create_build_conf",
method="post",
force_bodyargs=True))
self.api_endpoints["buildconf"].append(
EndpointDefinition(name="update_build_conf",
method="put",
force_bodyargs=True))
if "delete_build_conf" in cmdnames:
self.api_endpoints["buildconf"].append(
EndpointDefinition(name="delete_build_conf",
method="delete",
force_bodyargs=True))
if not self.api_endpoints["buildconf"]:
self.api_endpoints.pop("buildconf")
if "index" in cmdnames:
self.api_endpoints["index"] = EndpointDefinition(
name="index", method="put", force_bodyargs=True)
if "snapshot" in cmdnames:
self.api_endpoints["snapshot"] = EndpointDefinition(
name="snapshot", method="put", force_bodyargs=True)
if "sync" in cmdnames:
self.api_endpoints["sync"] = EndpointDefinition(
name="sync", method="post", force_bodyargs=True)
if "whatsnew" in cmdnames:
self.api_endpoints["whatsnew"] = EndpointDefinition(
name="whatsnew", method="get")
if "status" in cmdnames:
self.api_endpoints["status"] = EndpointDefinition(name="status",
method="get")
self.api_endpoints["release_note"] = []
if "create_release_note" in cmdnames:
self.api_endpoints["release_note"].append(
EndpointDefinition(name="create_release_note",
method="put",
suffix="create",
force_bodyargs=True))
if "get_release_note" in cmdnames:
self.api_endpoints["release_note"].append(
EndpointDefinition(name="get_release_note",
method="get",
force_bodyargs=True))
if not self.api_endpoints["release_note"]:
self.api_endpoints.pop("release_note")
self.api_endpoints["api"] = []
if "start_api" in cmdnames:
self.api_endpoints["api"].append(
EndpointDefinition(name="start_api",
method="put",
suffix="start"))
if "stop_api" in cmdnames:
self.api_endpoints["api"].append(
EndpointDefinition(name="stop_api",
method="put",
suffix="stop"))
if "delete_api" in cmdnames:
self.api_endpoints["api"].append(
EndpointDefinition(name="delete_api",
method="delete",
force_bodyargs=True))
if "create_api" in cmdnames:
self.api_endpoints["api"].append(
EndpointDefinition(name="create_api",
method="post",
force_bodyargs=True))
if not self.api_endpoints["api"]:
self.api_endpoints.pop("api")
if "get_apis" in cmdnames:
self.api_endpoints["api/list"] = EndpointDefinition(
name="get_apis", method="get")
if "stop" in cmdnames:
self.api_endpoints["stop"] = EndpointDefinition(name="stop",
method="put")
if "restart" in cmdnames:
self.api_endpoints["restart"] = EndpointDefinition(name="restart",
method="put")
self.api_endpoints["standalone"] = []
if "list" in cmdnames:
self.api_endpoints["standalone"].append(EndpointDefinition(name="list", method="get", suffix="list"))
if "versions" in cmdnames:
self.api_endpoints["standalone"].append(EndpointDefinition(name="versions", method="get", suffix="versions"))
if "check" in cmdnames:
self.api_endpoints["standalone"].append(EndpointDefinition(name="check", method="get", suffix="check"))
if "info" in cmdnames:
self.api_endpoints["standalone"].append(EndpointDefinition(name="info", method="get", suffix="info"))
if "download" in cmdnames:
self.api_endpoints["standalone"].append(EndpointDefinition(name="download", method="post", suffix="download"))
if "apply" in cmdnames:
self.api_endpoints["standalone"].append(EndpointDefinition(name="apply", method="post", suffix="apply"))
if "install" in cmdnames:
self.api_endpoints["standalone"].append(EndpointDefinition(name="install", method="post", suffix="install"))
if "backend" in cmdnames:
self.api_endpoints["standalone"].append(EndpointDefinition(name="backend", method="get", suffix="backend"))
if "reset_backend" in cmdnames:
self.api_endpoints["standalone"].append(EndpointDefinition(name="reset_backend", method="delete", suffix="backend"))
if not self.api_endpoints["standalone"]:
self.api_endpoints.pop("standalone")
if "upgrade" in self.commands:
self.api_endpoints["code/upgrade"] = EndpointDefinition(name="upgrade", method="put")
class HubSSHServer(asyncssh.SSHServer):
PASSWORDS = {}
SHELL = None
def session_requested(self):
return HubSSHServerSession(self.__class__.NAME, self.__class__.SHELL)
def connection_made(self, connection):
self._conn = connection
print('SSH connection received from %s.' %
connection.get_extra_info('peername')[0])
def connection_lost(self, exc):
if exc:
print('SSH connection error: ' + str(exc), file=sys.stderr)
else:
print('SSH connection closed.')
def begin_auth(self, username):
try:
self._conn.set_authorized_keys('bin/authorized_keys/%s.pub' %
username)
except IOError:
pass
return True
def password_auth_supported(self):
return True
def validate_password(self, username, password):
import crypt # not available on windows
if self.password_auth_supported():
pw = self.__class__.PASSWORDS.get(username, '*')
return crypt.crypt(password, pw) == pw
else:
return False
class HubSSHServerSession(asyncssh.SSHServerSession):
def __init__(self, name, shell):
self.name = name
self.shell = shell
self._input = ''
def connection_made(self, chan):
self._chan = chan
def shell_requested(self):
return True
def exec_requested(self, command):
self.eval_lines(["%s" % command, "\n"])
return True
def session_started(self):
welcome = ('\nWelcome to %s, %s!\n' %
(self.name, self._chan.get_extra_info('username')))
self.shell.shellog.output(welcome)
self._chan.write(welcome)
prompt = 'hub> '
self.shell.shellog.output(prompt)
self._chan.write(prompt)
def data_received(self, data, datatype):
self._input += data
return self.eval_lines(self._input.split('\n'))
def eval_lines(self, lines):
for line in lines[:-1]:
try:
outs = [out for out in self.shell.eval(line) if out]
# trailing \n if not already there
if outs:
strout = "\n".join(outs).strip("\n") + "\n"
self._chan.write(strout)
self.shell.shellog.output(strout)
except AlreadyRunningException as e:
self._chan.write("AlreadyRunningException: %s" % e)
except CommandError as e:
self._chan.write("CommandError: %s" % e)
self._chan.write('hub> ')
# consume passed commands
self._input = lines[-1]
def eof_received(self):
self._chan.write('Have a good one...\n')
self._chan.exit(0)
def break_received(self, msec):
# simulate CR
self._chan.write('\n')
self.data_received("\n", None)
|
|
import h2o_nodes
from h2o_test import dump_json, verboseprint
import h2o_util
import h2o_print as h2p
from h2o_test import OutputObj
#************************************************************************
def runStoreView(node=None, **kwargs):
print "FIX! disabling runStoreView for now"
return {}
if not node: node = h2o_nodes.nodes[0]
print "\nStoreView:"
# FIX! are there keys other than frames and models
a = node.frames(**kwargs)
# print "storeview frames:", dump_json(a)
frameList = [af['key']['name'] for af in a['frames']]
for f in frameList:
print "frame:", f
print "# of frames:", len(frameList)
b = node.models()
# print "storeview models:", dump_json(b)
modelList = [bm['key'] for bm in b['models']]
for m in modelList:
print "model:", m
print "# of models:", len(modelList)
return {'keys': frameList + modelList}
#************************************************************************
def runExec(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
a = node.rapids(**kwargs)
return a
def runInspect(node=None, key=None, verbose=False, **kwargs):
if not key: raise Exception('No key for Inspect')
if not node: node = h2o_nodes.nodes[0]
a = node.frames(key, **kwargs)
if verbose:
print "inspect of %s:" % key, dump_json(a)
return a
#************************************************************************
def infoFromParse(parse):
if not parse:
raise Exception("parse is empty for infoFromParse")
# assumes just one result from Frames
if 'frames' not in parse:
raise Exception("infoFromParse expects parse= param from parse result: %s" % parse)
if len(parse['frames'])!=1:
raise Exception("infoFromParse expects parse= param from parse result: %s " % parse['frames'])
# it it index[0] or key '0' in a dictionary?
frame = parse['frames'][0]
# need more info about this dataset for debug
numCols = len(frame['columns'])
numRows = frame['rows']
key_name = frame['frame_id']['name']
return numRows, numCols, key_name
#************************************************************************
# make this be the basic way to get numRows, numCols
def infoFromInspect(inspect):
if not inspect:
raise Exception("inspect is empty for infoFromInspect")
# assumes just one result from Frames
if 'frames' not in inspect:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s" % inspect)
if len(inspect['frames'])!=1:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s " % inspect['frames'])
# it it index[0] or key '0' in a dictionary?
frame = inspect['frames'][0]
# need more info about this dataset for debug
columns = frame['columns']
key_name = frame['frame_id']['name']
missingList = []
labelList = []
typeList = []
for i, colDict in enumerate(columns): # columns is a list
if 'missing_count' not in colDict:
# debug
print "\ncolDict"
for k in colDict:
print " key: %s" % k
# data
# domain
# string_data
# type
# label
# percentiles
# precision
# mins
# maxs
# mean
# histogram_base
# histogram_bins
# histogram_stride
# zero_count
# missing_count
# positive_infinity_count
# negative_infinity_count
# __meta
mins = colDict['mins']
maxs = colDict['maxs']
missing = colDict['missing_count']
label = colDict['label']
stype = colDict['type']
missingList.append(missing)
labelList.append(label)
typeList.append(stype)
if missing!=0:
print "%s: col: %s %s, missing: %d" % (key_name, i, label, missing)
print "inspect typeList:", typeList
# make missingList empty if all 0's
if sum(missingList)==0:
missingList = []
# no type per col in inspect2
numCols = len(frame['columns'])
numRows = frame['rows']
print "\n%s numRows: %s, numCols: %s" % (key_name, numRows, numCols)
return missingList, labelList, numRows, numCols
#************************************************************************
# does all columns unless you specify column index.
# only will return first or specified column
def runSummary(node=None, key=None, column=None, expected=None, maxDelta=None, noPrint=False, **kwargs):
if not key: raise Exception('No key for Summary')
if not node: node = h2o_nodes.nodes[0]
# return node.summary(key, **kwargs)
i = InspectObj(key=key)
# just so I don't have to change names below
missingList = i.missingList
labelList = i.labelList
numRows = i.numRows
numCols = i.numCols
print "labelList:", labelList
assert labelList is not None
# doesn't take indices? only column labels?
# return first column, unless specified
if not (column is None or isinstance(column, (basestring, int))):
raise Exception("column param should be string or integer index or None %s %s" % (type(column), column))
# either return the first col, or the col indentified by label. the column identifed could be string or index?
if column is None: # means the summary json when we ask for col 0, will be what we return (do all though)
colNameToDo = labelList
colIndexToDo = range(len(labelList))
elif isinstance(column, int):
colNameToDo = [labelList[column]]
colIndexToDo = [column]
elif isinstance(column, basestring):
colNameToDo = [column]
if column not in labelList:
raise Exception("% not in labellist: %s" % (column, labellist))
colIndexToDo = [labelList.index(column)]
else:
raise Exception("wrong type %s for column %s" % (type(column), column))
# we get the first column as result after walking across all, if no column parameter
desiredResult = None
for (colIndex, colName) in zip(colIndexToDo, colNameToDo):
print "doing summary on %s %s" % (colIndex, colName)
# ugly looking up the colIndex
co = SummaryObj(key=key, colIndex=colIndex, colName=colName)
if not desiredResult:
desiredResult = co
if not noPrint:
for k,v in co:
# only print [0] of mins and maxs because of the e308 values when they don't have dataset values
if k=='mins' or k=='maxs':
print "%s[0]" % k, v[0]
else:
print k, v
if expected is not None:
print "len(co.histogram_bins):", len(co.histogram_bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
# what is precision. -1?
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
# print "FIX! hacking the co.percentiles because it's short by two"
# if co.percentiles:
# percentiles = [0] + co.percentiles + [0]
# else:
# percentiles = None
percentiles = co.percentiles
assert len(co.percentiles) == len(co.default_percentiles)
# the thresholds h2o used, should match what we expected
# expected = [0] * 5
# Fix. doesn't check for expected = 0?
# max of one bin
if maxDelta is None:
maxDelta = (co.maxs[0] - co.mins[0])/1000
if expected[0]: h2o_util.assertApproxEqual(co.mins[0], expected[0], tol=maxDelta,
msg='min is not approx. expected')
if expected[1]: h2o_util.assertApproxEqual(percentiles[2], expected[1], tol=maxDelta,
msg='25th percentile is not approx. expected')
if expected[2]: h2o_util.assertApproxEqual(percentiles[4], expected[2], tol=maxDelta,
msg='50th percentile (median) is not approx. expected')
if expected[3]: h2o_util.assertApproxEqual(percentiles[6], expected[3], tol=maxDelta,
msg='75th percentile is not approx. expected')
if expected[4]: h2o_util.assertApproxEqual(co.maxs[0], expected[4], tol=maxDelta,
msg='max is not approx. expected')
# figure out the expected max error
# use this for comparing to sklearn/sort
MAX_QBINS = 1000
if expected[0] and expected[4]:
expectedRange = expected[4] - expected[0]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
pt = h2o_util.twoDecimals(percentiles)
# only look at [0] for now...bit e308 numbers if unpopulated due to not enough unique values in dataset column
mx = h2o_util.twoDecimals(co.maxs[0])
mn = h2o_util.twoDecimals(co.mins[0])
print "co.label:", co.label, "co.percentiles (2 places):", pt
print "co.default_percentiles:", co.default_percentiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! why would percentiles be None? enums?
if pt is None:
compareActual = mn, [None] * 3, mx
else:
compareActual = mn, pt[2], pt[4], pt[6], mx
h2p.green_print("actual min/25/50/75/max co.label:", co.label, "(2 places):", compareActual)
h2p.green_print("expected min/25/50/75/max co.label:", co.label, "(2 places):", expected)
return desiredResult
# this parses the json object returned for one col from runSummary...returns an OutputObj object
# summaryResult = h2o_cmd.runSummary(key=hex_key, column=0)
# co = h2o_cmd.infoFromSummary(summaryResult)
# print co.label
# legacy
def infoFromSummary(summaryResult, column=None):
return SummaryObj(summaryResult, column=column)
class ParseObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, parseResult, expectedNumRows=None, expectedNumCols=None, noPrint=False, **kwargs):
super(ParseObj, self).__init__(parseResult['frames'][0], "Parse", noPrint=noPrint)
# add my stuff
self.numRows, self.numCols, self.parse_key = infoFromParse(parseResult)
# h2o_import.py does this for test support
if 'python_elapsed' in parseResult:
self.python_elapsed = parseResult['python_elapsed']
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
print "ParseObj created for:", self.parse_key # vars(self)
# Let's experiment with creating new objects that are an api I control for generic operations (Inspect)
class InspectObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, key,
expectedNumRows=None, expectedNumCols=None, expectedMissingList=None, expectedLabelList=None,
noPrint=False, **kwargs):
inspectResult = runInspect(key=key)
super(InspectObj, self).__init__(inspectResult['frames'][0], "Inspect", noPrint=noPrint)
# add my stuff
self.missingList, self.labelList, self.numRows, self.numCols = infoFromInspect(inspectResult)
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
if expectedMissingList is not None:
assert self.missingList == expectedMissingList, "%s %s" % (self.MissingList, expectedMissingList)
if expectedLabelList is not None:
assert self.labelList == expectedLabelList, "%s %s" % (self.labelList, expectedLabelList)
print "InspectObj created for:", key #, vars(self)
class SummaryObj(OutputObj):
@classmethod
def check(self,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, **kwargs):
if expectedLabel is not None:
assert self.label != expectedLabel
if expectedType is not None:
assert self.type != expectedType
if expectedMissing is not None:
assert self.missing != expectedMissing
if expectedDomain is not None:
assert self.domain != expectedDomain
if expectedBinsSum is not None:
assert self.binsSum != expectedBinsSum
# column is column name?
def __init__(self, key, colIndex, colName,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, timeoutSecs=30, **kwargs):
# we need both colInndex and colName for doing Summary efficiently
# ugly.
assert colIndex is not None
assert colName is not None
summaryResult = h2o_nodes.nodes[0].summary(key=key, column=colName, timeoutSecs=timeoutSecs, **kwargs)
# this should be the same for all the cols? Or does the checksum change?
frame = summaryResult['frames'][0]
default_percentiles = frame['default_percentiles']
checksum = frame['checksum']
rows = frame['rows']
# assert colIndex < len(frame['columns']), "You're asking for colIndex %s but there are only %s. " % \
# (colIndex, len(frame['columns']))
# coJson = frame['columns'][colIndex]
# is it always 0 now? the one I asked for ?
coJson = frame['columns'][0]
assert checksum !=0 and checksum is not None
assert rows!=0 and rows is not None
# FIX! why is frame['key'] = None here?
# assert frame['key'] == key, "%s %s" % (frame['key'], key)
super(SummaryObj, self).__init__(coJson, "Summary for %s" % colName, noPrint=noPrint)
# how are enums binned. Stride of 1? (what about domain values)
# touch all
# print "vars", vars(self)
coList = [
len(self.data),
self.domain,
self.string_data,
self.type,
self.label,
self.percentiles,
self.precision,
self.mins,
self.maxs,
self.mean,
self.histogram_base,
len(self.histogram_bins),
self.histogram_stride,
self.zero_count,
self.missing_count,
self.positive_infinity_count,
self.negative_infinity_count,
]
assert self.label==colName, "%s You must have told me the wrong colName %s for the given colIndex %s" % \
(self.label, colName, colIndex)
print "you can look at this attributes in the returned object (which is OutputObj if you assigned to 'co')"
for k,v in self:
print "%s" % k,
# hack these into the column object from the full summary
self.default_percentiles = default_percentiles
self.checksum = checksum
self.rows = rows
print "\nSummaryObj for", key, "for colName", colName, "colIndex:", colIndex
print "SummaryObj created for:", key # vars(self)
# now do the assertion checks
self.check(expectedNumRows, expectedNumCols,
expectedLabel, expectedType, expectedMissing, expectedDomain, expectedBinsSum,
noPrint=noPrint, **kwargs)
|
|
"""
Summary
=======
Blogs don't always consist solely of blog entries. Sometimes you want
to add other content to your blog that's not a blog entry. For
example, an "about this blog" page or a page covering a list of your
development projects.
This plugin allows you to have pages served by douglas that aren't
blog entries.
Additionally, this plugin allows you to have a non-blog-entry front
page. This makes it easier to use douglas to run your entire
website.
Install
=======
This plugin comes with douglas. To install, do the following:
1. add ``douglas.plugins.pages`` to the ``load_plugins`` list in
your ``config.py`` file.
2. configure the plugin using the configuration variables below
``pagesdir``
This is the directory that holds the pages files.
For example, if you wanted your pages in
``/home/foo/blog/pages/``, then you would set it to::
py["pagesdir"] = "/home/foo/blog/pages/"
If you have ``blogdir`` defined in your ``config.py`` file which
holds your ``datadir`` and ``themedir`` directories, then you
could set it to::
py["pagesdir"] = os.path.join(blogdir, "pages")
``pages_trigger`` (optional)
Defaults to ``pages``.
This is the url trigger that causes the pages plugin to look for
pages.
py["pages_trigger"] = "pages"
``pages_frontpage`` (optional)
Defaults to False.
If set to True, then pages will show the ``frontpage`` page for
the front page.
This requires you to have a ``frontpage`` file in your pages
directory. The extension for this file works the same way as blog
entries. So if your blog entries end in ``.txt``, then you would
need a ``frontpage.txt`` file.
Example::
py["pages_frontpage"] = True
Usage
=====
Pages looks for urls that start with the trigger ``pages_trigger``
value as set in your ``config.py`` file. For example, if your
``pages_trigger`` was ``pages``, then it would look for urls like
this::
/pages/blah
/pages/blah.html
and pulls up the file ``blah.txt`` [1]_ which is located in the path
specified in the config file as ``pagesdir``.
If the file is not there, it kicks up a 404.
.. [1] The file ending (the ``.txt`` part) can be any file ending
that's valid for entries on your blog. For example, if you have
the textile entryparser installed, then ``.txtl`` is also a valid
file ending.
Template
========
pages formats the page using the ``pages`` template. So you need a
``pages`` template in the themes that you want these pages to be
rendered in. If you want your pages rendered exactly like an entry,
just extend the ``entry`` template.
Python code blocks
==================
pages handles evaluating python code blocks. Enclose python code in
``<%`` and ``%>``. The assumption is that only you can edit your
pages files, so there are no restrictions (security or otherwise).
For example::
<%
print "testing"
%>
<%
x = { "apple": 5, "banana": 6, "pear": 4 }
for mem in x.keys():
print "<li>%s - %s</li>" % (mem, x[mem])
%>
The request object is available in python code blocks. Reference it
by ``request``. Example::
<%
config = request.get_configuration()
print "your datadir is: %s" % config["datadir"]
%>
"""
__description__ = (
"Allows you to include non-blog-entry files in your site and have a "
"non-blog-entry front page.")
__category__ = "content"
__license__ = "MIT"
import StringIO
import os
import os.path
import sys
from douglas import tools
from douglas.entries.fileentry import FileEntry
from douglas.tools import pwrap_error
TRIGGER = "pages"
INIT_KEY = "pages_pages_file_initiated"
def verify_installation(cfg):
retval = True
if not 'pagesdir' in cfg:
pwrap_error("'pagesdir' property is not set in the config file.")
retval = False
elif not os.path.isdir(cfg["pagesdir"]):
pwrap_error(
"'pagesdir' directory does not exist. %s" % cfg["pagesdir"])
retval = False
return retval
def eval_python_blocks(req, body):
localsdict = {"request": req}
globalsdict = {}
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
start = 0
while body.find("<%", start) != -1:
start = body.find("<%")
end = body.find("%>", start)
if start != -1 and end != -1:
codeblock = body[start + 2:end].lstrip()
sys.stdout = StringIO.StringIO()
sys.stderr = StringIO.StringIO()
try:
exec codeblock in localsdict, globalsdict
except Exception as e:
print "ERROR in processing: %s" % e
output = sys.stdout.getvalue() + sys.stderr.getvalue()
body = body[:start] + output + body[end + 2:]
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
return body
def is_frontpage(pyhttp, config):
if not config.get("pages_frontpage"):
return False
pathinfo = pyhttp.get("PATH_INFO", "")
if pathinfo == "/":
return True
path, ext = os.path.splitext(pathinfo)
if path == "/index" and not ext in [".rss20", ".atom", ".rss"]:
return True
return False
def is_trigger(pyhttp, config):
trigger = config.get("pages_trigger", TRIGGER)
if not trigger.startswith("/"):
trigger = "/" + trigger
return pyhttp["PATH_INFO"].startswith(trigger)
def cb_filelist(args):
req = args["request"]
pyhttp = req.get_http()
data = req.get_data()
config = req.get_configuration()
page_name = None
if not (is_trigger(pyhttp, config) or is_frontpage(pyhttp, config)):
return
data[INIT_KEY] = 1
datadir = config['datadir']
data['root_datadir'] = config['datadir']
data['bl_type'] = 'page'
pagesdir = config['pagesdir']
if not pagesdir.endswith(os.sep):
pagesdir = pagesdir + os.sep
pathinfo = pyhttp.get('PATH_INFO', '')
path, ext = os.path.splitext(pathinfo)
if pathinfo in ('/', '/index'):
page_name = 'frontpage'
else:
page_name = pathinfo[len('/' + TRIGGER) + 1:]
# FIXME - do better job of sanitizing here
page_name = page_name.replace('\\', '').replace('/', '')
if not page_name:
return
# if the page has a theme, we use that. otherwise
# we default to the default theme.
page_name, theme = os.path.splitext(page_name)
if theme:
data['theme'] = theme[1:]
ext = tools.what_ext(config['extensions'].keys(), pagesdir + page_name)
if not ext:
return []
data['root_datadir'] = page_name + '.' + ext
filename = pagesdir + page_name + '.' + ext
if not os.path.isfile(filename):
return []
fe = FileEntry(req, filename, pagesdir)
fe.update({
'body': eval_python_blocks(req, fe['body']),
'absolute_path': TRIGGER,
'fn': page_name,
'file_path': TRIGGER + '/' + page_name,
})
data['bl_type'] = 'page'
# set the datadir back
config['datadir'] = datadir
return [fe]
|
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: memory_stats_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('bytes_in_use')
def bytes_in_use(name=None):
r"""TODO: add doc.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BytesInUse", name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BytesInUse", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BytesInUse",
name, _ctx._post_execution_callbacks)
return _result
except _core._FallbackException:
return bytes_in_use_eager_fallback(
name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def bytes_in_use_eager_fallback(name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bytes_in_use
"""
_ctx = ctx if ctx else _context.context()
_inputs_flat = []
_attrs = None
_result = _execute.execute(b"BytesInUse", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BytesInUse", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("BytesInUse")(None)
@tf_export('bytes_limit')
def bytes_limit(name=None):
r"""TODO: add doc.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BytesLimit", name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"BytesLimit", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BytesLimit",
name, _ctx._post_execution_callbacks)
return _result
except _core._FallbackException:
return bytes_limit_eager_fallback(
name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def bytes_limit_eager_fallback(name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bytes_limit
"""
_ctx = ctx if ctx else _context.context()
_inputs_flat = []
_attrs = None
_result = _execute.execute(b"BytesLimit", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BytesLimit", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("BytesLimit")(None)
@tf_export('max_bytes_in_use')
def max_bytes_in_use(name=None):
r"""TODO: add doc.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"MaxBytesInUse", name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"MaxBytesInUse", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"MaxBytesInUse", name, _ctx._post_execution_callbacks)
return _result
except _core._FallbackException:
return max_bytes_in_use_eager_fallback(
name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def max_bytes_in_use_eager_fallback(name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_bytes_in_use
"""
_ctx = ctx if ctx else _context.context()
_inputs_flat = []
_attrs = None
_result = _execute.execute(b"MaxBytesInUse", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxBytesInUse", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("MaxBytesInUse")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "BytesInUse"
# output_arg {
# name: "out"
# type: DT_INT64
# }
# is_stateful: true
# }
# op {
# name: "BytesLimit"
# output_arg {
# name: "out"
# type: DT_INT64
# }
# is_stateful: true
# }
# op {
# name: "MaxBytesInUse"
# output_arg {
# name: "out"
# type: DT_INT64
# }
# is_stateful: true
# }
_op_def_lib = _InitOpDefLibrary(b"\n\030\n\nBytesInUse\032\007\n\003out\030\t\210\001\001\n\030\n\nBytesLimit\032\007\n\003out\030\t\210\001\001\n\033\n\rMaxBytesInUse\032\007\n\003out\030\t\210\001\001")
|
|
import json
import flask
import requests
import markdown
import httplib
from time import gmtime, strftime
from flask import Flask, request, redirect, url_for, g, render_template, flash, session
from flask import abort,make_response, Markup, send_from_directory,send_file
from werkzeug.utils import secure_filename
from random import randrange
import sys,os
from mimetypes import MimeTypes
import urllib
import binascii
from rauth import OAuth2Service
sys.path.append('sys/controller')
sys.path.append('sys/model')
# import all helpers
from AuthorHelper import *
from DatabaseAdapter import *
from PostHelper import *
from RequestHelper import *
from CircleHelper import *
from SettingHelper import *
# import all controllers
from PostController import *
from AuthorController import *
from RequestController import *
from CommentController import *
from ServiceController import *
from ServerController import *
from PostPermissionController import *
from ImageHelper import *
DEBUG = True
# create a new database obj
dbAdapter = DatabaseAdapter()
# connect
dbAdapter.connect()
dbAdapter.setAutoCommit()
ahelper = AuthorHelper(dbAdapter)
aController = AuthorController(dbAdapter)
# use the conneted dbAdapter to initialize postHelper obj
postHelper = PostHelper(dbAdapter)
postController = PostController(dbAdapter)
#
reController = RequestController(dbAdapter)
#
circleHelper = CircleHelper(dbAdapter)
circleController = CircleController(dbAdapter)
#
commentController = CommentController(dbAdapter)
settingHelper = SettingHelper(dbAdapter)
#
serviceController = ServiceController(dbAdapter)
#
postPermissionHelper = PostPermissionController(dbAdapter)
#
serverController = ServerController(dbAdapter)
imageHelper = ImageHelper(dbAdapter)
#Allowed file extensions
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config.from_object(__name__)
REGISTER_RESTRICTION = None
POST_REMOTE_ACCESS_RESTRICTION = None
IMAGE_REMOTE_ACCESS_RESTRICTION = None
# add upload
UPLOAD_FOLDER='upload/image'
PERMISSION_IMAGE='static/image'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 5 * 1024 * 1024
app.secret_key = os.urandom(24)
admin_id = '000000'
admin_name='admin'
admin_model = False
error = None
'''git connection'''
GITHUB_CLIENT_ID = '5a4bdc64c247e1f45f61'
GITHUB_CLIENT_SECRET = '0640b9e5a32d2ebe6f4713158f2321f8ac43cee4'
github = OAuth2Service(
client_id=GITHUB_CLIENT_ID,
client_secret=GITHUB_CLIENT_SECRET,
name='github',
authorize_url='https://github.com/login/oauth/authorize',
access_token_url='https://github.com/login/oauth/access_token',
base_url='https://api.github.com/')
def flaskPostToJson():
'''Ah the joys of frameworks! They do so much work for you
that they get in the way of sane operation!'''
if (request.json != None):
return request.json
elif (request.data != None and request.data != ''):
return json.loads(request.data)
else:
return json.loads(request.form.keys()[0])
# default path
@app.route('/', methods=['GET', 'POST'])
def root():
'''direct for login'''
return redirect(url_for('login'))
@app.route('/<aid>', methods=['GET', 'POST'])
def author_view(aid):
"""
View the main page
"""
if 'logged_in' in session and aid ==session['logged_id']:
username = session['logged_in']
msgCount = reController.getRequestCountByAid(aid)
countnumber = json.loads(msgCount)['count']
return render_template('header.html',msgCount = countnumber)
else:
return redirect(url_for('login'))
@app.route('/<aid>/profile',methods=['GET'])
def view_profile(aid):
"""
return a html for profile
"""
return render_template('profile.html')
@app.route('/<aid>/profile/image/<imagename>',methods=['GET'])
def view_profile_image(aid,imagename):
"""
load the profile image
"""
import os.path
path = os.path.join(app.config['UPLOAD_FOLDER'],imagename);
if os.path.isfile(path):
return send_from_directory(app.config['UPLOAD_FOLDER'],imagename, as_attachment=False)
else:
return send_from_directory(app.config['UPLOAD_FOLDER'],"default.jpeg", as_attachment=False)
@app.route('/<aid>/profile.json',methods=['GET'])
def get_profile(aid):
"""
get profile image of author with the aid
"""
if 'logged_in' in session and aid ==session['logged_id']:
try:
re_aid = request.args.get("aid")
re = aController.getAuthorByAid(re_aid)
if re != None:
return re
return redirect(url_for('/'))
except KeyError:
return redirect(url_for('/'))
return redirect(url_for('/'))
@app.route('/<aid>/profile/change',methods=['POST'])
def change_profile(aid):
"""redirect after update profile change"""
if 'logged_in' in session and aid ==session['logged_id']:
return change_author_profile(aid)
else:
return redirect(url_for('/'))
def change_author_profile(aid):
"""
update the profile of author with aid, first check the argument, one is the infromation,
another is password
"""
try:
keyword = request.args.get('type')
print keyword
except KeyError:
return "Wrong URL",404
if keyword == "information":
gender=""
filename=""
email = request.form['email']
#parse optional information
nickName=request.form['nick_name']
birthday =request.form['birthday']
city = request.form['city']
try:
file = request.files['profile_image']
filename = file.filename
except KeyError:
file =None
try:
gender = request.form['gender']
except KeyError:
gender = ""
if file!=None and filename!="":
filename = save_image(aid,file)
if ahelper.updateAuthorInfo(aid,email,gender,city,birthday,filename) and ahelper.updateNickNameByAid(aid,nickName):
re = make_response("OK")
else:
re = make_response("Failed")
return re
elif keyword == "password":
new_pwd = request.form['register_pwd']
if ahelper.updatePasswordByAid(aid,new_pwd):
re = make_response("OK")
else:
re = make_response("Error")
return re
@app.route('/<aid>/admin',methods=['GET','POST'])
def admin_page(aid):
'''
direct to admin page,and render the html,first we need to check session whether the
admin_modal in session and aid is equal to value in admin_modal. if the two condition
arrives then continue, otherwise return 404
'''
if 'admin_model' not in session or aid != session['admin_model']:
abort(404);
try:
keyword = request.args.get('page')
if keyword == 'viewpost':
return render_template("admin_view_post.html")
except KeyError:
pass
return render_template("admin.html")
@app.route('/<aid>/admin/delete/author',methods=['GET'])
def admin_author_delete(aid):
'''
to delete the author in admin modal, first check the admin conditions,
and get the argument
'''
if 'admin_model' not in session or aid != session['admin_model']:
abort(404);
try:
keyword = request.args.get('aid')
if ahelper.deleteAuthor(keyword) == True:
re = make_response("OK")
else:
re = make_response("Wrong")
return re
except KeyError:
return "Wrong URL",404
@app.route('/<aid>/admin/delete/post',methods=['GET'])
def admin_post_delete(aid):
"""
delete the post in admin modal,first it will check the conditions of admin modal
"""
if 'admin_model' not in session or aid != session['admin_model']:
abort(404);
try:
keyword = request.args.get('pid')
if postHelper.deletePostByPid(keyword) == True:
re = make_response("OK")
else:
re = make_response("Wrong")
return re
except KeyError:
return "Wrong URL",404
@app.route('/<aid>/admin/author/approve',methods=['GET'])
def admin_author_approve(aid):
'''
approve application from authors in waiting list in admin modal
'''
if 'admin_model' not in session or aid != session['admin_model']:
abort(404);
try:
keyword = request.args.get('aid')
if ahelper.confirmAuthor(keyword) == True:
re = make_response("OK")
else:
re = make_response("Wrong")
return re
except KeyError:
return "Wrong URL",404
@app.route('/<aid>/admin/author/deny',methods=['GET'])
def admin_author_deny(aid):
'''deny application in admin'''
if 'admin_model' not in session or aid != session['admin_model']:
abort(404);
try:
keyword = request.args.get('aid')
if ahelper.deleteAuthor(keyword) == True:
re = make_response("OK")
else:
re = make_response("Wrong")
return re
except KeyError:
return "Wrong URL",404
@app.route('/<aid>/admin/view/post',methods=['GET'])
def admin_get_post(aid):
'''get post in admin mode'''
if 'admin_model' not in session or aid != session['admin_model']:
abort(404);
try:
keyword = request.args.get('aid')
post = postController.getPostByAid(keyword)
return post,200
except KeyError:
return "Wrong URL",404
@app.route('/<aid>/admin/view/circle',methods=['GET'])
def admin_get_circle(aid):
'''get friendship in admin mode'''
if 'admin_model' not in session or aid != session['admin_model']:
abort(404);
try:
keyword = request.args.get('aid')
re = circleController.getFriendList(keyword)
return re
except KeyError:
return "Wrong URL",404
@app.route('/<aid>/admin/view/tmp_author',methods=['GET'])
def admin_get_tmp_author(aid):
'''get the author that haven't been approve by admin'''
if 'admin_model' not in session or aid != session['admin_model']:
abort(404);
response = aController.getAllTmpAuthor();
if response == None:
response = make_response("ERROR")
return response
@app.route('/<aid>/admin/manage/<otheraid>',methods=['POST'])
def admin_change_author(aid,otheraid):
'''update the author profile in admin'''
if 'admin_model' not in session or aid != session['admin_model']:
abort(404);
return change_author_profile(otheraid)
@app.route('/<aid>/admin/global_setting/signup_policy',methods=['GET'])
def admin_change_signup_policy(aid):
'''adjust the forum is open register or not in admin mode'''
if 'admin_model' not in session or aid != session['admin_model']:
abort(404)
try:
operation = request.args.get('operation')
if operation == 'turunon':
settingHelper.removeSignUpRestriction()
re = make_response("OK")
elif operation == 'turnoff':
settingHelper.addSignUpRestriction()
re = make_response("OK")
else:
re = make_response("Error",404)
return re
except KeyError:
return "Wrong URL",404
@app.route('/ajax/aid')
def getuid():
'''get the user id'''
if 'logged_in' not in session:
return redirect(url_for('login'))
else:
re = make_response(session['logged_id'])
re.headers['Content-Type']='text/plain'
return re
@app.route('/ajax/author_name')
def getaname():
'''get the username'''
if 'logged_in' not in session:
return redirect(url_for('login'))
else:
re = make_response(session['logged_in'])
re.headers['Content-Type']='text/plain'
return re
# login page
@app.route('/login', methods=['GET', 'POST'])
def login():
'''author login'''
if request.method == 'POST':
authorName =request.form['username']
password =request.form['password']
json_str = ahelper.authorAuthenticate(authorName,password)
if json_str == False:
re = make_response("False")
re.headers['Content-Type']='text/plain'
return re ,200
elif json_str=="NO_CONFIRMED":
re = make_response("NO_CONFIRMED")
return re ,200
else:
session['logged_in'] = authorName
session['logged_id'] = json.loads(json_str)['aid']
if(session['logged_id']==admin_id):
session['admin_model']= admin_id;
return json_str,200
else:
if not session.get('oauth_state'):
session['oauth_state'] = binascii.hexlify(os.urandom(24))
authorize_url = github.get_authorize_url(scope='user,notifications', state=session.get('oauth_state'))
return render_template('header.html',authorize_url=authorize_url)
else:
return render_template('header.html',github=True)
if "logged_in" in session:
aid = session['logged_id']
msgCount = reController.getRequestCountByAid(aid)
countnumber = json.loads(msgCount)['count']
return render_template('header.html',msgCount = countnumber)
else:
return render_template('header.html')
# register page
@app.route('/register', methods=['PUT', 'POST'])
def register():
'''new user register'''
if request.method == 'POST':
#parse require information
gender=""
email = request.form['email']
authorName=request.form['author_name']
password=request.form['register_pwd']
#parse optional information
file = request.files['profile_image']
#print "--"+file.filename
nickName=request.form['nick_name']
birthday =request.form['birthday']
city = request.form['city']
try:
gender = request.form['gender']
except KeyError:
gender = ""
if REGISTER_RESTRICTION:
aid_json = ahelper.addLocalTmpAuthor(authorName,password,nickName)
else:
aid_json = ahelper.addAuthor(authorName,password,nickName)
if aid_json == False:
re = make_response("False")
re.headers['Content-Type']='text/plain'
return re
else:
aid = json.loads(aid_json)['aid']
if not REGISTER_RESTRICTION:
session['logged_in'] = authorName
session['logged_id'] = aid
path =""
if(file!=None and file.filename!=""):
path = save_image(aid,file)
if ahelper.updateAuthorInfo(aid,email,gender,city,birthday,path) ==False:
abort(500)
if not REGISTER_RESTRICTION:
return aid_json
else:
re= make_response("NO_CONFIRMED")
return re
return redirect(url_for('/'))
def save_image(aid,file):
'''upload the new image into service'''
filename = aid+"."+file.filename.rsplit('.', 1)[1]
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
return filename
@app.route('/<aid>/recommended_authorlist.json', methods=['GET'])
def authorlist(aid):
'''redirect to all author list'''
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
re = aController.getRecommendedAuthor(aid)
return re
# search authors with keyword
@app.route('/<aid>/author/search',methods=['GET'])
def search_author(aid):
'''search author by aid'''
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
try:
keyword = request.args.get('key')
except KeyError:
return redirect(url_for('/'))
if keyword!=None and keyword!="":
re = aController.searchAuthorByString(aid,keyword)
return re
@app.route('/<aid>/authorlist.json',methods=['GET'])
def allauthorlist(aid):
'''get all author by aid'''
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
re = aController.getOtherAuthor(aid)
return re
@app.route('/<aid>/circlelist.json',methods=['GET'])
def circleauthorlist(aid):
'''get the friendship list by author id'''
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
re = circleController.getFriendList(aid)
return re
@app.route('/<aid>/circle',methods=['GET'])
def render_circle_modal(aid):
'''render the friendship modal by aid'''
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
return render_template('view_circles_modal.html')
@app.route('/<aid>/circle/delete',methods=['GET'])
def delete_friends(aid):
'''delete firends by aid'''
if ('logged_in' not in session) or (aid !=session['logged_id']):
return redirect(url_for('/'))
try:
keyword = request.args.get('aid')
if circleController.deleteFriendOfAuthor(aid,keyword):
re =make_response("OK")
else:
re =make_response("Failed")
return re
except KeyError:
return redirect(url_for('/'))
@app.route('/<aid>/messages.json', methods=['GET'])
def messages(aid):
'''get all message by aid'''
if ('logged_in' not in session) or (aid !=session['logged_id']):
abort(404)
else:
jsonstring = reController.getAllRequestByAid(aid)
return jsonstring
# logout
@app.route('/logout')
def logout():
'''logout'''
session.pop('logged_in', None)
session.pop('oauth_state', None)
return redirect(url_for('login'))
# make request
@app.route('/<aid>/author/request',methods=['GET'])
def addfriend(aid):
'''send add friend request'''
if ('logged_in' not in session) or (session['logged_id'] != aid):
abort(404)
else:
try:
request_aid = request.args.get('recipient')
if reController.sendRequest(aid,request_aid) is True:
re = make_response("OK",200)
return re
else:
re = make_response("Existed",409)
return re
except KeyError:
return redirect(url_for(aid))
#accept request
@app.route('/<recipientAid>/author/request/accept',methods=['GET'])
def acceptRequest(recipientAid):
if ('logged_in' not in session) or (session['logged_id'] != recipientAid):
return redirect(url_for('/'))
else:
try:
senderAid = request.args.get('sender')
if( not aController.isRemoteAuthor(senderAid) ):
if( reController.acceptRequestFromSender(recipientAid,senderAid) ):
re = make_response("OK",200)
else:
re = make_response("Failed")
return re
else:
remoteAuthor = aController.getAuthorInfoByAid(senderAid)
localAuthor = aController.getAuthorInfoByAid(recipientAid)
recipientAid = localAuthor.getAid()
recipientName = localAuthor.getNickname()
remoteSenderAid = remoteAuthor.getAid()
remoteUrl = serverController.getServerUrlBySid(remoteAuthor.getSid())
response = sendAcceptRequestToRemoteServer(recipientAid,recipientName,remoteSenderAid,remoteUrl)
if(response == True):
if(reController.acceptRequestFromSender(recipientAid,senderAid)):
re = make_response("OK",200)
re = make_response("Failed")
return re
except KeyError:
return redirect(url_for('aid'))
#accept request
@app.route('/<aid>/author/request/deny',methods=['GET'])
def denyRequest(aid):
'''deny friend request by author id'''
if ('logged_in' not in session) or (session['logged_id'] != aid):
return redirect(url_for('/'))
else:
try:
senderAid = request.args.get('sender')
if reController.deleteRequest(senderAid,aid):
re = make_response("OK")
return re
else:
re = make_response("Fail")
return re
except KeyError:
return redirect(url_for('aid'))
'''redirect to main html (posts)'''
@app.route('/author/<authorName>')
def renderStruct(authorName):
if ('logged_in' in session) and (session['logged_in'] == authorName):
return render_template('struct.html')
else:
return abort(404)
# get all the new posts that a specific author can view from the server
'''get post by author id'''
@app.route('/<aid>/pull')
def getPostForAuthor(aid):
if ('logged_in' in session) and (session['logged_id'] == aid):
#aid = session['logged_id']
if aid == None:
return json.dumps({'status':None}),200
else:
post = postController.getPost(aid)
print(post)
return post,200
else:
return abort(404)
@app.route('/remote/posts')
def getRemotePublicPostsForAuthor():
posts = getPublicPostsFromRemoteServer()
return posts,200
'''main function of send markdown'''
@app.route('/markdown',methods=['GET','POST'])
def index():
if request.method == 'POST':
content = request.form['postContent']
content = Markup(markdown.markdown(content))
html_string = render_template('markdown.html', **locals())
return html_string
return render_template('markdown_input.html')
'''get comments by author'''
@app.route('/author/<aid>/posts/comments/',methods=['GET'])
def getCommentsForAuthor(aid):
if ('logged_in' in session) and (session['logged_id'] == aid):
return commentController.getCommentsForAuthor(aid),200
else:
return abort(404)
'''check whether the file user upload is allowed or not'''
def allowed_file(filename):
return '.' in filename and filename.rsplit('.' ,1)[1] in app.config['ALLOWED_EXTENSIONS']
'''upload new image by author id and post id'''
@app.route('/<aid>/<pid>/upload',methods=['POST'])
def upload(aid,pid):
if ('logged_in' not in session) or (session['logged_id'] != aid):
abort(404)
file = request.files['img_file']
if file:
if not allowed_file(file.filename):
re = make_response("Wrong Type");
else:
filename = save_image(pid,file)
iid = imageHelper.insertImage(filename,aid,pid)
if iid !=False:
re = make_response("OK");
else:
re = make_response("DatabaseError")
return re
else:
abort(404)
'''view the image in post by author id'''
@app.route('/<aid>/<pid>/image/view',methods=['GET'])
def viewPostImage(aid,pid):
if ('logged_in' not in session) or (session['logged_id'] != aid):
abort(404)
image = imageHelper.getImageByPid(pid)
if image == False:
re = make_response("DatabaseError")
return re
else:
if len(image) >0:
filename = image[0].getPath();
return send_from_directory(app.config['UPLOAD_FOLDER'],filename)
else:
return send_from_directory(app.config['UPLOAD_FOLDER'],"unfound.gif")
'''upload new post to server by author name'''
@app.route('/<authorName>/post',methods=['PUT','POST'])
def uploadPostToServer(authorName):
if ('logged_in' in session) and (session['logged_in'] == authorName):
aid = session['logged_id']
postName = authorName
postObj = flaskPostToJson()
try:
type = request.args.get('markdown')
postMsg = postObj['message']
if type =='true':
content = Markup(markdown.markdown(postMsg))
postMsg = render_template('markdown.html', **locals())
except KeyError:
return "Wrong URL",404
postTitle = postObj['title']
postType = postObj['type']
postPermission = postObj['permission']
postDate = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if aid == None:
return json.dumps({'status':False}),200
else:
newPost = Post(None,aid,postName,postDate,postTitle,postMsg,postType,postPermission)
result = postHelper.addPost(aid,postTitle,postMsg,postType,postPermission)
return json.dumps({'status':result}),200
else:
return abort(404)
'''Retrive the posting permission information for a specific author by authorName'''
@app.route('/<authorName>/post/getPermissionList/',methods=['GET'])
def getPermissionList(authorName):
if ('logged_in' in session) and (session['logged_in'] == authorName):
if request.method == 'GET':
aid = session['logged_id']
# Get the permission: friend or fof, from parameter
permission = request.args.get('option')
if permission == "specify":
friendlist = circleHelper.getFriendList(aid)
if friendlist != None:
return json.dumps(friendlist),200
else:
return abort(404)
'''Get all the comments for a specific post from DB'''
@app.route('/author/<aid>/posts/<pid>/comments/',methods=['GET'])
def getCommentsForPost(aid,pid):
if('logged_id' in session) and (session['logged_id'] == aid):
result = commentController.getCommentsForPost(pid)
return result,200
else:
return abort(404)
'''Add a comment for a specific post into DB'''
@app.route('/author/<aid>/posts/<pid>/comments/',methods=['PUT','POST'])
def addCommentForPost(aid,pid):
if ('logged_in' in session) and (session['logged_id'] == aid):
commentObj = flaskPostToJson()
#Follow the json example on github
aid = commentObj['posts'][0]['author']['id']
content = commentObj['posts'][0]['comments'][0]['comment']
pid = commentObj['posts'][0]['guid']
result = commentController.addCommentForPost(aid,pid,content)
if result != None:
return result,200
else:
return None,200
else:
return abort(404)
'''get the notification by author'''
# get all the new posts that a specific author can view from the server
@app.route('/<authorName>/github/notification')
def getNotification(authorName):
authorToken = authorName + '_authToken'
if ('logged_in' in session) and (session['logged_in'] == authorName) and (authorToken in session):
# get author auth token
authorAuthToken=session[authorToken]
# get auth session
auth_session = github.get_session(token=authorAuthToken)
aid = session['logged_id']
if aid == None:
return json.dumps({'status':None}),200
else:
author_notification = aid+'_notitfication'
notification_number = session[author_notification]
notifications={}
r = auth_session.get('/users/'+authorName+'/received_events')
for i in range(notification_number,len(r.json())):
notification={}
content=""
title = ""
repo=""
ref=""
for key,value in r.json()[i].iteritems():
if key == "payload":
for key1,value1 in value.iteritems():
if key1 == "commits":
for j in range(0,len(value1)):
for key2,value2 in value1[j].iteritems():
if key2 == "message":
content = content + value2 + " "
elif key1 == "ref":
ref = value1
elif key == "created_at":
notification['time']=value
elif key == "actor":
for key1,value1 in value.iteritems():
if key1 == "login":
title = title + value1
elif key == "repo":
for key1,value1 in value.iteritems():
if key1 == "name":
repo = value1
elif key == "type":
if value == "PushEvent":
title = title + ' Push ' + repo + ' ' + ref
elif value == "CommitCommentEvent":
title = title + ' Commit Comment ' + repo + ' ' + ref
elif value == "CreateEvent":
title = title + ' Create ' + repo + ' ' + ref
elif value == "DeleteEvent":
title = title + ' Delete ' + repo + ' ' + ref
elif value == "DeploymentEvent":
title = title + ' Deploytment ' + repo + ' ' + ref
elif value == "DownloadEvent":
title = title + ' Download ' + repo + ' ' + ref
elif value == "FollowEvent":
title = title + ' Follow ' + repo + ' ' + ref
elif value == "ForkEvent":
title = title + ' Fork ' + repo + ' ' + ref
notification['title']=title
notification['content']=content
notifications[i]=notification
session[author_notification] = len(r.json())
return json.dumps(notifications),200
else:
return abort(404)
'''get authorization from github'''
@app.route('/github/callback')
def callback():
code = request.args['code']
state = request.args['state'].encode('utf-8')
#if state!=session.get('oauth_state'):
#return render_template('header.html')
# get auth session
auth_session = github.get_auth_session(data={'code': code})
# get author name
r = auth_session.get('/user')
authorName = r.json()['login']
# store author token
authorToken = authorName + '_authToken'
session[authorToken] = auth_session.access_token
# try to register account
aid_json = ahelper.addAuthor(authorName,123,authorName)
if aid_json!= False:
aid = json.loads(aid_json)['aid']
session['logged_in'] = authorName
session['logged_id'] = aid
else:
# try to log in
json_str = ahelper.authorAuthenticate(authorName,123)
if json_str!=False:
session['logged_in'] = authorName
session['logged_id'] = json.loads(json_str)['aid']
else:
re = make_response("False")
re.headers['Content-Type']='text/plain'
return re
aid = session['logged_id']
author_notification = aid+'_notitfication'
session[author_notification] = 0
return redirect(url_for('login'))
'''get all the new posts that a specific author can view from the server '''
@app.route('/<aid>/pull/mypost')
def getMyPostForAuthor(aid):
if ('logged_in' in session) and (session['logged_id'] == aid):
#aid = session['logged_id']
if aid == None:
return json.dumps({'status':None}),200
else:
post = postController.getMyPost(aid)
return post,200
else:
return abort(404)
'''get all posts by author'''
@app.route('/<authorName>/mypost')
def myPost(authorName):
if ('logged_in' in session) and (session['logged_in'] == authorName):
return render_template('mypost.html')
else:
abort(404);
'''delete my post by author'''
@app.route('/<authorName>/mypost/delete/<pid>')
def myPostDelete(authorName,pid):
if ('logged_in' in session) and (session['logged_in'] == authorName):
result = postHelper.deletePostByPid(pid)
return render_template('mypost.html')
else:
abort(404);
'''
Public API: receieve the friend request from a remote server
'''
@app.route('/friendrequest',methods=['GET','POST'])
def friendRequestService():
if(request.method == 'POST'):
print(request)
response = make_response()
result = serviceController.receiveFriendRequestFromRemoteServer(json.loads(request.data))
if(result):
return make_response("", 200)
else:
return make_response("", 409)
else:
return make_response("", 409)
'''
Public API: receieve the friend request from a remote server
'''
@app.route('/service/friendrequest',methods=['GET','POST'])
def friendRequestService2():
if(request.method == 'POST'):
response = make_response()
result = serviceController.receiveFriendRequestFromRemoteServer(json.loads(request.data))
if(result):
return make_response("", 200)
else:
return make_response("", 409)
else:
return make_response("", 409)
'''
Don't access this API from client side
This is for internal server to use only
'''
#@app.route('/response/accept')
def sendAcceptRequestToRemoteServer(recipientAid,recipientName,remoteSenderAid,remoteSid):
payload = serviceController.sendFriendRequestToRemoteServer(recipientAid,recipientName,remoteSenderAid,remoteSid)
if(payload != None):
url = payload['friend']['host']
headers = {'content-type': 'application/json'}
response = requests.post(url,data = json.dumps(payload),headers = headers )
if(response.status_code == '200'):
return True
else:
return False
return False
'''
Public API: all posts marked as public on the server
'''
@app.route('/service/posts',methods=['GET'])
def sendPublicPostsToRemoteServerService():
payload = serviceController.sendPublicPostsToRemoteServer()
if(payload != None):
return json.dumps(payload),200
else:
return json.dumps([]),200
'''
Public API: all posts marked as public on the server
'''
@app.route('/posts',methods=['GET'])
def sendPublicPostsToRemoteServer():
payload = serviceController.sendPublicPostsToRemoteServer()
if(payload != None):
return json.dumps(payload),200
else:
return json.dumps([]),200
'''
Public API: send glabal authors to remote servers
'''
@app.route('/global/authors',methods=['GET'])
def sendGlobalAuthorsToRemoteServer():
payload = serviceController.sendGlobalAuthorsToRemoteServer()
if(payload != None):
return json.dumps(payload),200
else:
return json.dumps([]),200
'''view images that have permission'''
@app.route('/permission/image/<imagename>',methods=['GET'])
def view_permission_image(imagename):
imagename=PERMISSION_IMAGE+'/'+imagename+'.png'
return send_file(imagename, mimetype='image/png')
'''upload post's permission by author'''
@app.route('/<authorName>/postpermission/<pid>',methods=['PUT','POST'])
def uploadPostPermissionToServer(authorName,pid):
if ('logged_in' in session) and (session['logged_in'] == authorName):
send = flaskPostToJson()
checked = send['data']
result = postPermissionHelper.addPostPermission(pid,checked)
return json.dumps({'status':result}),200
else:
return abort(404)
def getPublicPostsFromRemoteServer():
url = "http://cs410-06/posts"
response = requests.get(url)
result = serviceController.getPublicPostsFromRemoteServer(response.content)
return result
if __name__ == '__main__':
app.debug = True
REGISTER_RESTRICTION = settingHelper.getSignUpRestrictionValue()
POST_REMOTE_ACCESS_RESTRICTION = settingHelper.getRemotePostAccessRestrictionValue()
IMAGE_REMOTE_ACCESS_RESTRICTION = settingHelper.getRemoteImageAccessRestrictionValue()
app.run(host='0.0.0.0',port=8080)
|
|
"""
Low-level operating system functions from :mod:`os`.
Cooperative I/O
===============
This module provides cooperative versions of :func:`os.read` and
:func:`os.write`. These functions are *not* monkey-patched; you
must explicitly call them or monkey patch them yourself.
POSIX functions
---------------
On POSIX, non-blocking IO is available.
- :func:`nb_read`
- :func:`nb_write`
- :func:`make_nonblocking`
All Platforms
-------------
On non-POSIX platforms (e.g., Windows), non-blocking IO is not
available. On those platforms (and on POSIX), cooperative IO can
be done with the threadpool.
- :func:`tp_read`
- :func:`tp_write`
Child Processes
===============
The functions :func:`fork` and (on POSIX) :func:`forkpty` and :func:`waitpid` can be used
to manage child processes.
.. warning::
Forking a process that uses greenlets does not eliminate all non-running
greenlets. Any that were scheduled in the hub of the forking thread in the parent
remain scheduled in the child; compare this to how normal threads operate. (This behaviour
may change is a subsequent major release.)
"""
from __future__ import absolute_import
import os
import sys
from gevent.hub import get_hub, reinit, PY3
import errno
EAGAIN = getattr(errno, 'EAGAIN', 11)
try:
import fcntl
except ImportError:
fcntl = None
__implements__ = ['fork']
__extensions__ = ['tp_read', 'tp_write']
_read = os.read
_write = os.write
ignored_errors = [EAGAIN, errno.EINTR]
if fcntl:
__extensions__ += ['make_nonblocking', 'nb_read', 'nb_write']
def make_nonblocking(fd):
"""Put the file descriptor *fd* into non-blocking mode if possible.
:return: A boolean value that evaluates to True if successful."""
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
if not bool(flags & os.O_NONBLOCK):
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
return True
def nb_read(fd, n):
"""Read up to `n` bytes from file descriptor `fd`. Return a string
containing the bytes read. If end-of-file is reached, an empty string
is returned.
The descriptor must be in non-blocking mode.
"""
hub, event = None, None
while True:
try:
return _read(fd, n)
except OSError as e:
if e.errno not in ignored_errors:
raise
if not PY3:
sys.exc_clear()
if hub is None:
hub = get_hub()
event = hub.loop.io(fd, 1)
hub.wait(event)
def nb_write(fd, buf):
"""Write bytes from buffer `buf` to file descriptor `fd`. Return the
number of bytes written.
The file descriptor must be in non-blocking mode.
"""
hub, event = None, None
while True:
try:
return _write(fd, buf)
except OSError as e:
if e.errno not in ignored_errors:
raise
if not PY3:
sys.exc_clear()
if hub is None:
hub = get_hub()
event = hub.loop.io(fd, 2)
hub.wait(event)
def tp_read(fd, n):
"""Read up to *n* bytes from file descriptor *fd*. Return a string
containing the bytes read. If end-of-file is reached, an empty string
is returned.
Reading is done using the threadpool.
"""
return get_hub().threadpool.apply(_read, (fd, n))
def tp_write(fd, buf):
"""Write bytes from buffer *buf* to file descriptor *fd*. Return the
number of bytes written.
Writing is done using the threadpool.
"""
return get_hub().threadpool.apply(_write, (fd, buf))
if hasattr(os, 'fork'):
_raw_fork = os.fork
def fork_gevent():
"""
Forks the process using :func:`os.fork` and prepares the
child process to continue using gevent before returning.
.. note::
The PID returned by this function may not be waitable with
either the original :func:`os.waitpid` or this module's
:func:`waitpid` and it may not generate SIGCHLD signals if
libev child watchers are or ever have been in use. For
example, the :mod:`gevent.subprocess` module uses libev
child watchers (which parts of gevent use libev child
watchers is subject to change at any time). Most
applications should use :func:`fork_and_watch`, which is
monkey-patched as the default replacement for
:func:`os.fork` and implements the ``fork`` function of
this module by default, unless the environment variable
``GEVENT_NOWAITPID`` is defined before this module is
imported.
.. versionadded:: 1.1b2
"""
result = _raw_fork()
if not result:
reinit()
return result
def fork():
"""
A wrapper for :func:`fork_gevent` for non-POSIX platforms.
"""
return fork_gevent()
if hasattr(os, 'forkpty'):
_raw_forkpty = os.forkpty
def forkpty_gevent():
"""
Forks the process using :func:`os.forkpty` and prepares the
child process to continue using gevent before returning.
Returns a tuple (pid, master_fd). The `master_fd` is *not* put into
non-blocking mode.
Availability: Some Unix systems.
.. seealso:: This function has the same limitations as :func:`fork_gevent`.
.. versionadded:: 1.1b5
"""
pid, master_fd = _raw_forkpty()
if not pid:
reinit()
return pid, master_fd
forkpty = forkpty_gevent
__implements__.append('forkpty')
__extensions__.append("forkpty_gevent")
if hasattr(os, 'WNOWAIT') or hasattr(os, 'WNOHANG'):
# We can only do this on POSIX
import time
_waitpid = os.waitpid
_WNOHANG = os.WNOHANG
# replaced by the signal module.
_on_child_hook = lambda: None
# {pid -> watcher or tuple(pid, rstatus, timestamp)}
_watched_children = {}
def _on_child(watcher, callback):
# XXX: Could handle tracing here by not stopping
# until the pid is terminated
watcher.stop()
_watched_children[watcher.pid] = (watcher.pid, watcher.rstatus, time.time())
if callback:
callback(watcher)
# dispatch an "event"; used by gevent.signal.signal
_on_child_hook()
# now is as good a time as any to reap children
_reap_children()
def _reap_children(timeout=60):
# Remove all the dead children that haven't been waited on
# for the *timeout* seconds.
# Some platforms queue delivery of SIGCHLD for all children that die;
# in that case, a well-behaved application should call waitpid() for each
# signal.
# Some platforms (linux) only guarantee one delivery if multiple children
# die. On that platform, the well-behave application calls waitpid() in a loop
# until it gets back -1, indicating no more dead children need to be waited for.
# In either case, waitpid should be called the same number of times as dead children,
# thus removing all the watchers when a SIGCHLD arrives. The (generous) timeout
# is to work with applications that neglect to call waitpid and prevent "unlimited"
# growth.
# Note that we don't watch for the case of pid wraparound. That is, we fork a new
# child with the same pid as an existing watcher, but the child is already dead,
# just not waited on yet.
now = time.time()
oldest_allowed = now - timeout
dead = [pid for pid, val
in _watched_children.items()
if isinstance(val, tuple) and val[2] < oldest_allowed]
for pid in dead:
del _watched_children[pid]
def waitpid(pid, options):
"""
Wait for a child process to finish.
If the child process was spawned using :func:`fork_and_watch`, then this
function behaves cooperatively. If not, it *may* have race conditions; see
:func:`fork_gevent` for more information.
The arguments are as for the underlying :func:`os.waitpid`. Some combinations
of *options* may not be supported (as of 1.1 that includes WUNTRACED).
Availability: POSIX.
.. versionadded:: 1.1b1
"""
# XXX Does not handle tracing children
if pid <= 0:
# magic functions for multiple children.
if pid == -1:
# Any child. If we have one that we're watching and that finished,
# we need to use that one. Otherwise, let the OS take care of it.
for k, v in _watched_children.items():
if isinstance(v, tuple):
pid = k
break
if pid <= 0:
# If we didn't find anything, go to the OS. Otherwise,
# handle waiting
return _waitpid(pid, options)
if pid in _watched_children:
# yes, we're watching it
if options & _WNOHANG or isinstance(_watched_children[pid], tuple):
# We're either asked not to block, or it already finished, in which
# case blocking doesn't matter
result = _watched_children[pid]
if isinstance(result, tuple):
# it finished. libev child watchers
# are one-shot
del _watched_children[pid]
return result[:2]
# it's not finished
return (0, 0)
else:
# we should block. Let the underlying OS call block; it should
# eventually die with OSError, depending on signal delivery
try:
return _waitpid(pid, options)
except OSError:
if pid in _watched_children and isinstance(_watched_children, tuple):
result = _watched_children[pid]
del _watched_children[pid]
return result[:2]
raise
# we're not watching it
return _waitpid(pid, options)
def fork_and_watch(callback=None, loop=None, ref=False, fork=fork_gevent):
"""
Fork a child process and start a child watcher for it in the parent process.
This call cooperates with :func:`waitpid` to enable cooperatively waiting
for children to finish. When monkey-patching, these functions are patched in as
:func:`os.fork` and :func:`os.waitpid`, respectively.
In the child process, this function calls :func:`gevent.hub.reinit` before returning.
Availability: POSIX.
:keyword callback: If given, a callable that will be called with the child watcher
when the child finishes.
:keyword loop: The loop to start the watcher in. Defaults to the
loop of the current hub.
:keyword fork: The fork function. Defaults to :func:`the one defined in this
module <gevent.os.fork_gevent>` (which automatically calls :func:`gevent.hub.reinit`).
Pass the builtin :func:`os.fork` function if you do not need to
initialize gevent in the child process.
.. versionadded:: 1.1b1
.. seealso::
:func:`gevent.monkey.get_original` To access the builtin :func:`os.fork`.
"""
pid = fork()
if pid:
# parent
loop = loop or get_hub().loop
watcher = loop.child(pid, ref=ref)
_watched_children[pid] = watcher
watcher.start(_on_child, watcher, callback)
return pid
__extensions__.append('fork_and_watch')
__extensions__.append('fork_gevent')
if 'forkpty' in __implements__:
def forkpty_and_watch(callback=None, loop=None, ref=False, forkpty=forkpty_gevent):
"""
Like :func:`fork_and_watch`, except using :func:`forkpty_gevent`.
Availability: Some Unix systems.
.. versionadded:: 1.1b5
"""
result = []
def _fork():
pid_and_fd = forkpty()
result.append(pid_and_fd)
return pid_and_fd[0]
fork_and_watch(callback, loop, ref, _fork)
return result[0]
__extensions__.append('forkpty_and_watch')
# Watch children by default
if not os.getenv('GEVENT_NOWAITPID'):
# Broken out into separate functions instead of simple name aliases
# for documentation purposes.
def fork(*args, **kwargs):
"""
Forks a child process and starts a child watcher for it in the
parent process so that ``waitpid`` and SIGCHLD work as expected.
This implementation of ``fork`` is a wrapper for :func:`fork_and_watch`
when the environment variable ``GEVENT_NOWAITPID`` is *not* defined.
This is the default and should be used by most applications.
.. versionchanged:: 1.1b2
"""
# take any args to match fork_and_watch
return fork_and_watch(*args, **kwargs)
if 'forkpty' in __implements__:
def forkpty(*args, **kwargs):
"""
Like :func:`fork`, but using :func:`forkpty_gevent`.
This implementation of ``forkpty`` is a wrapper for :func:`forkpty_and_watch`
when the environment variable ``GEVENT_NOWAITPID`` is *not* defined.
This is the default and should be used by most applications.
.. versionadded:: 1.1b5
"""
# take any args to match fork_and_watch
return forkpty_and_watch(*args, **kwargs)
__implements__.append("waitpid")
else:
def fork():
"""
Forks a child process, initializes gevent in the child,
but *does not* prepare the parent to wait for the child or receive SIGCHLD.
This implementation of ``fork`` is a wrapper for :func:`fork_gevent`
when the environment variable ``GEVENT_NOWAITPID`` *is* defined.
This is not recommended for most applications.
"""
return fork_gevent()
if 'forkpty' in __implements__:
def forkpty():
"""
Like :func:`fork`, but using :func:`os.forkpty`
This implementation of ``forkpty`` is a wrapper for :func:`forkpty_gevent`
when the environment variable ``GEVENT_NOWAITPID`` *is* defined.
This is not recommended for most applications.
.. versionadded:: 1.1b5
"""
return forkpty_gevent()
__extensions__.append("waitpid")
else:
__implements__.remove('fork')
__all__ = __implements__ + __extensions__
|
|
import json
import requests
from datetime import date, datetime
def _request(symbol):
response = requests.get('http://www.google.com/finance/info', params={
'q': symbol,
})
return response.text
def _try_float(val):
try:
return float(val)
except:
return None
def _try_date(val):
try:
d = [int(i) for i in val.split('/')]
return date(
d[2],
d[0],
d[1],
)
except:
return None
def _format_all(value):
return dict(
price=_try_float(value['l_fix']),
change=_try_float(value['c_fix']),
transaction_exchange=value['e'],
change_percent=_try_float(value['cp_fix']),
symbol=value['t'],
previous_close=_try_float(value['pcls_fix']),
)
def get(symbol):
'''
:param symbol: str
use "," to separate multiple symbols.
:returns: list or dict
'''
if isinstance(symbol, list):
symbol = ','.join(symbol) + ','
values = json.loads(_request(
symbol
)[3:])
if ',' not in symbol:
return _format_all(values[0])
data = {}
for value in values:
transaction = _format_all(value)
data['{}:{}'.format(transaction['transaction_exchange'], transaction.pop('symbol'))] = transaction
return data
def _request_historical_prices(symbol, period, interval=86400):
"""
:param symbol: str
exchange:ticker
:param period: str
1d = 1 day
2y = 2 years
:param interval: int
prices interval
Default is 86400 aka 1 day.
:param returns:
"""
params = {
'p': period,
'i': interval,
'f': 'd,c,v,o,h,l',
}
symbol = symbol.split(':')
if len(symbol) == 2:
params['q'] = symbol[1]
params['x'] = symbol[0]
else:
params['q'] = symbol[0]
data = requests.get(
'http://www.google.com/finance/getprices',
params=params
)
return parse_google_get_prices(data.text)
def fill_portfolio_historical_prices(portfolio, period, interval=86400, min_fill_date=None):
period_before_total_value = 0.0
portfolio['period_close_value'] = 0.0
portfolio['period_close_value_percent'] = 0.0
for symbol in portfolio['transactions']:
s = portfolio['transactions'][symbol]
prices = _request_historical_prices(
symbol=symbol,
period=period,
interval=interval,
)
close = 0.0
close_value = 0.0
prices_dict = {}
if min_fill_date:
prices = [price for price in prices if price['datetime'].date()>=min_fill_date]
for price in prices:
prices_dict[price['datetime'].date()] = price['close']
close = price['close']
close_value = price.setdefault('close_value', 0)
close_value += sum([float(shares) * price['close'] for shares in s['shares_grouped']])
price['close_value'] = close_value
s['historical_prices'] = prices
if len(prices) == 0:
return
s['period_close_value'] = 0.0
for date_, shares, paid_price in zip(s['trade_dates'], s['shares_grouped'], s['paid_prices']):
close = prices[0]['close'] if date_ not in prices_dict else paid_price
close_value = float(shares) * close
s['period_close_value'] += close_value
s['period_gain'] = s['value'] - s['period_close_value']
s['period_gain_percent'] = (s['period_gain'] / s['period_close_value']) * 100
portfolio['period_close_value'] += s['period_close_value']
portfolio['period_gain'] = portfolio['value'] - portfolio['period_close_value']
portfolio['period_gain_percent'] = (portfolio['period_gain'] / portfolio['period_close_value']) * 100
def fill_portfolio_daily(portfolio):
transactions_info = get([symbol for symbol in portfolio['transactions']])
value = 0
cost = 0
gain_today = 0
change = 0
previous_close = 0
price = 0
for symbol in portfolio['transactions']:
s = portfolio['transactions'][symbol]
transaction_info = transactions_info[symbol]
for transaction in s if isinstance(s, list) else [s]:
transaction['price'] = float(transaction_info['price']) if transaction_info['price'] else float(0)
transaction['value'] = transaction_info['price'] * transaction['shares']
transaction['cost'] = transaction['paid_price'] * transaction['shares']
transaction['gain'] = transaction['value'] - transaction['cost']
transaction['gain_percent'] = (transaction['gain'] / transaction['cost']) * 100
transaction['paid_prices'] = [transaction['paid_price']]
transaction['trade_dates'] = [transaction['trade_date']]
transaction['shares_grouped'] = [transaction['shares']]
transaction['change'] = transaction_info['change'] if transaction_info['change'] else float(0)
transaction['change_percent'] = transaction_info['change_percent'] if transaction_info['change_percent'] else float(0)
transaction['gain_today'] = transaction_info['change'] * transaction['shares'] if transaction_info['change'] else float(0)
transaction['previous_close'] = transaction_info['previous_close']
value += transaction['value']
cost += transaction['cost']
gain_today += transaction['gain_today']
change += transaction['change'] if transaction['change'] else 0.0
previous_close += transaction['previous_close'] if transaction['previous_close'] else 0.0
price += transaction_info['price']
portfolio['value'] = value
portfolio['cost'] = cost
portfolio['gain'] = value - cost
portfolio['gain_percent'] = (portfolio['gain'] / cost) * 100
portfolio['gain_today'] = gain_today
portfolio['change'] = change
portfolio['change_percent'] = ((price - previous_close) / previous_close) * 100 if previous_close else 0.0
portfolio['generated'] = datetime.utcnow()
_fill_lots(portfolio)
def _fill_lots(portfolio):
transactions = portfolio['transactions']
for symbol in transactions:
if isinstance(transactions[symbol], dict) or not transactions[symbol]:
continue
if len(transactions[symbol]) == 1:
transactions[symbol] = transactions[symbol][0]
continue
value = 0
cost = 0
gain = 0
gain_today = 0
shares = []
prices = []
lots = []
transaction = transactions[symbol][0]
change = transaction['change']
change_percent = transaction['change_percent']
previous_close = transaction['previous_close']
price = transaction['price']
trade_date = None
trade_dates = []
for transaction in transactions[symbol]:
if trade_date == None or trade_date < transaction['trade_date']:
trade_date = transaction['trade_date']
trade_dates.append(transaction['trade_date'])
value += transaction['value']
cost += transaction['cost']
gain += transaction['gain']
gain_today += transaction['gain_today']
shares.append(transaction['shares'])
prices.append(transaction['paid_price'])
lots.append(transaction)
paid_price = sum(prices)/float(len(prices))
transactions[symbol] = {
'paid_price': paid_price,
'paid_prices': prices,
'lots': lots,
'lots_count': len(lots),
'change': change,
'change_percent': change_percent,
'previous_close': previous_close,
'price': price,
'shares': sum(shares),
'shares_grouped': shares,
'gain': gain,
'gain_today': gain_today,
'gain_percent': ((value - cost) / cost) * 100,
'value': value,
'cost': cost,
'trade_date': min(trade_dates),
'trade_dates': trade_dates
}
def parse_google_get_prices(data):
data = data.splitlines()
time_ = 0
interval = int(data[3][9:])
points = []
for d in data[6:]:
if d[:16] == 'TIMEZONE_OFFSET=':
timezone_offset = int(data[6][16:])
continue
d = d.split(',')
inc = 0
if d[0][:1] == 'a':
time_ = int(d[0][1:]) - (timezone_offset * 60)
else:
inc = int(d[0])
points.append({
'datetime': datetime.fromtimestamp(time_ + (interval * inc)),
'close': float(d[1]),
'high': float(d[2]),
'low': float(d[3]),
'open': float(d[4]),
'volume': float(d[5]),
})
return points
|
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import weakref
import fasteners
from oslo_config import cfg
from oslo_utils import reflection
from oslo_utils import timeutils
from oslo_concurrency._i18n import _
LOG = logging.getLogger(__name__)
_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Enables or disables inter-process locks.',
deprecated_group='DEFAULT'),
cfg.StrOpt('lock_path',
default=os.environ.get("OSLO_LOCK_PATH"),
help='Directory to use for lock files. For security, the '
'specified directory should only be writable by the user '
'running the processes that need locking. '
'Defaults to environment variable OSLO_LOCK_PATH. '
'If external locks are used, a lock path must be set.',
deprecated_group='DEFAULT')
]
def _register_opts(conf):
conf.register_opts(_opts, group='oslo_concurrency')
CONF = cfg.CONF
_register_opts(CONF)
def set_defaults(lock_path):
"""Set value for lock_path.
This can be used by tests to set lock_path to a temporary directory.
"""
cfg.set_defaults(_opts, lock_path=lock_path)
def get_lock_path(conf):
"""Return the path used for external file-based locks.
:param conf: Configuration object
:type conf: oslo_config.cfg.ConfigOpts
.. versionadded:: 1.8
"""
_register_opts(conf)
return conf.oslo_concurrency.lock_path
InterProcessLock = fasteners.InterProcessLock
ReaderWriterLock = fasteners.ReaderWriterLock
"""A reader/writer lock.
.. versionadded:: 0.4
"""
class FairLocks(object):
"""A garbage collected container of fair locks.
With a fair lock, contending lockers will get the lock in the order in
which they tried to acquire it.
This collection internally uses a weak value dictionary so that when a
lock is no longer in use (by any threads) it will automatically be
removed from this container by the garbage collector.
"""
def __init__(self):
self._locks = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def get(self, name):
"""Gets (or creates) a lock with a given name.
:param name: The lock name to get/create (used to associate
previously created names with the same lock).
Returns an newly constructed lock (or an existing one if it was
already created for the given name).
"""
with self._lock:
try:
return self._locks[name]
except KeyError:
# The fasteners module specifies that
# ReaderWriterLock.write_lock() will give FIFO behaviour,
# so we don't need to do anything special ourselves.
rwlock = ReaderWriterLock()
self._locks[name] = rwlock
return rwlock
_fair_locks = FairLocks()
def internal_fair_lock(name):
return _fair_locks.get(name)
class Semaphores(object):
"""A garbage collected container of semaphores.
This collection internally uses a weak value dictionary so that when a
semaphore is no longer in use (by any threads) it will automatically be
removed from this container by the garbage collector.
.. versionadded:: 0.3
"""
def __init__(self):
self._semaphores = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def get(self, name):
"""Gets (or creates) a semaphore with a given name.
:param name: The semaphore name to get/create (used to associate
previously created names with the same semaphore).
Returns an newly constructed semaphore (or an existing one if it was
already created for the given name).
"""
with self._lock:
try:
return self._semaphores[name]
except KeyError:
sem = threading.Semaphore()
self._semaphores[name] = sem
return sem
def __len__(self):
"""Returns how many semaphores exist at the current time."""
return len(self._semaphores)
_semaphores = Semaphores()
def _get_lock_path(name, lock_file_prefix, lock_path=None):
# NOTE(mikal): the lock name cannot contain directory
# separators
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
local_lock_path = lock_path or CONF.oslo_concurrency.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
return os.path.join(local_lock_path, name)
def external_lock(name, lock_file_prefix=None, lock_path=None):
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
return InterProcessLock(lock_file_path)
def remove_external_lock_file(name, lock_file_prefix=None, lock_path=None,
semaphores=None):
"""Remove an external lock file when it's not used anymore
This will be helpful when we have a lot of lock files
"""
with internal_lock(name, semaphores=semaphores):
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
try:
os.remove(lock_file_path)
except OSError as exc:
if exc.errno != errno.ENOENT:
LOG.warning('Failed to remove file %(file)s',
{'file': lock_file_path})
def internal_lock(name, semaphores=None):
if semaphores is None:
semaphores = _semaphores
return semaphores.get(name)
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None,
do_log=True, semaphores=None, delay=0.01, fair=False):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The path in which to store external lock files. For
external locking to work properly, this must be the same for all
references to the lock.
:param do_log: Whether to log acquire/release messages. This is primarily
intended to reduce log message duplication when `lock` is used from the
`synchronized` decorator.
:param semaphores: Container that provides semaphores to use when locking.
This ensures that threads inside the same application can not collide,
due to the fact that external process locks are unaware of a processes
active threads.
:param delay: Delay between acquisition attempts (in seconds).
:param fair: Whether or not we want a "fair" lock where contending lockers
will get the lock in the order in which they tried to acquire it.
.. versionchanged:: 0.2
Added *do_log* optional parameter.
.. versionchanged:: 0.3
Added *delay* and *semaphores* optional parameters.
"""
if fair:
if semaphores is not None:
raise NotImplementedError(_('Specifying semaphores is not '
'supported when using fair locks.'))
# The fastners module specifies that write_lock() provides fairness.
int_lock = internal_fair_lock(name).write_lock()
else:
int_lock = internal_lock(name, semaphores=semaphores)
with int_lock:
if do_log:
LOG.debug('Acquired lock "%(lock)s"', {'lock': name})
try:
if external and not CONF.oslo_concurrency.disable_process_locking:
ext_lock = external_lock(name, lock_file_prefix, lock_path)
ext_lock.acquire(delay=delay)
if do_log:
LOG.debug('Acquired external semaphore "%(lock)s"',
{'lock': name})
try:
yield ext_lock
finally:
ext_lock.release()
else:
yield int_lock
finally:
if do_log:
LOG.debug('Releasing lock "%(lock)s"', {'lock': name})
def lock_with_prefix(lock_file_prefix):
"""Partial object generator for the lock context manager.
Redefine lock in each project like so::
(in nova/utils.py)
from oslo_concurrency import lockutils
_prefix = 'nova'
lock = lockutils.lock_with_prefix(_prefix)
lock_cleanup = lockutils.remove_external_lock_file_with_prefix(_prefix)
(in nova/foo.py)
from nova import utils
with utils.lock('mylock'):
...
Eventually clean up with::
lock_cleanup('mylock')
:param lock_file_prefix: A string used to provide lock files on disk with a
meaningful prefix. Will be separated from the lock name with a hyphen,
which may optionally be included in the lock_file_prefix (e.g.
``'nova'`` and ``'nova-'`` are equivalent).
"""
return functools.partial(lock, lock_file_prefix=lock_file_prefix)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None,
semaphores=None, delay=0.01, fair=False):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
.. versionchanged:: 0.3
Added *delay* and *semaphores* optional parameter.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
t1 = timeutils.now()
t2 = None
try:
with lock(name, lock_file_prefix, external, lock_path,
do_log=False, semaphores=semaphores, delay=delay,
fair=fair):
t2 = timeutils.now()
LOG.debug('Lock "%(name)s" acquired by "%(function)s" :: '
'waited %(wait_secs)0.3fs',
{'name': name,
'function': reflection.get_callable_name(f),
'wait_secs': (t2 - t1)})
return f(*args, **kwargs)
finally:
t3 = timeutils.now()
if t2 is None:
held_secs = "N/A"
else:
held_secs = "%0.3fs" % (t3 - t2)
LOG.debug('Lock "%(name)s" released by "%(function)s" :: held '
'%(held_secs)s',
{'name': name,
'function': reflection.get_callable_name(f),
'held_secs': held_secs})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from oslo_concurrency import lockutils
_prefix = 'nova'
synchronized = lockutils.synchronized_with_prefix(_prefix)
lock_cleanup = lockutils.remove_external_lock_file_with_prefix(_prefix)
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
Eventually clean up with::
lock_cleanup('mylock')
:param lock_file_prefix: A string used to provide lock files on disk with a
meaningful prefix. Will be separated from the lock name with a hyphen,
which may optionally be included in the lock_file_prefix (e.g.
``'nova'`` and ``'nova-'`` are equivalent).
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def remove_external_lock_file_with_prefix(lock_file_prefix):
"""Partial object generator for the remove lock file function.
Redefine remove_external_lock_file_with_prefix in each project like so::
(in nova/utils.py)
from oslo_concurrency import lockutils
_prefix = 'nova'
synchronized = lockutils.synchronized_with_prefix(_prefix)
lock = lockutils.lock_with_prefix(_prefix)
lock_cleanup = lockutils.remove_external_lock_file_with_prefix(_prefix)
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
def baz(self, *args):
...
with utils.lock('mylock'):
...
...
<eventually call lock_cleanup('mylock') to clean up>
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(remove_external_lock_file,
lock_file_prefix=lock_file_prefix)
def _lock_wrapper(argv):
"""Create a dir for locks and pass it to command from arguments
This is exposed as a console script entry point named
lockutils-wrapper
If you run this:
lockutils-wrapper stestr run <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["OSLO_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
def main():
sys.exit(_lock_wrapper(sys.argv))
if __name__ == '__main__':
raise NotImplementedError(_('Calling lockutils directly is no longer '
'supported. Please use the '
'lockutils-wrapper console script instead.'))
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.osconfig_v1.types import patch_jobs
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import datetime_pb2 # type: ignore
from google.type import dayofweek_pb2 # type: ignore
from google.type import timeofday_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.osconfig.v1",
manifest={
"PatchDeployment",
"OneTimeSchedule",
"RecurringSchedule",
"WeeklySchedule",
"MonthlySchedule",
"WeekDayOfMonth",
"CreatePatchDeploymentRequest",
"GetPatchDeploymentRequest",
"ListPatchDeploymentsRequest",
"ListPatchDeploymentsResponse",
"DeletePatchDeploymentRequest",
"UpdatePatchDeploymentRequest",
"PausePatchDeploymentRequest",
"ResumePatchDeploymentRequest",
},
)
class PatchDeployment(proto.Message):
r"""Patch deployments are configurations that individual patch jobs use
to complete a patch. These configurations include instance filter,
package repository settings, and a schedule. For more information
about creating and managing patch deployments, see `Scheduling patch
jobs <https://cloud.google.com/compute/docs/os-patch-management/schedule-patch-jobs>`__.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
name (str):
Unique name for the patch deployment resource in a project.
The patch deployment name is in the form:
``projects/{project_id}/patchDeployments/{patch_deployment_id}``.
This field is ignored when you create a new patch
deployment.
description (str):
Optional. Description of the patch
deployment. Length of the description is limited
to 1024 characters.
instance_filter (google.cloud.osconfig_v1.types.PatchInstanceFilter):
Required. VM instances to patch.
patch_config (google.cloud.osconfig_v1.types.PatchConfig):
Optional. Patch configuration that is
applied.
duration (google.protobuf.duration_pb2.Duration):
Optional. Duration of the patch. After the
duration ends, the patch times out.
one_time_schedule (google.cloud.osconfig_v1.types.OneTimeSchedule):
Required. Schedule a one-time execution.
This field is a member of `oneof`_ ``schedule``.
recurring_schedule (google.cloud.osconfig_v1.types.RecurringSchedule):
Required. Schedule recurring executions.
This field is a member of `oneof`_ ``schedule``.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time the patch deployment was created.
Timestamp is in
`RFC3339 <https://www.ietf.org/rfc/rfc3339.txt>`__ text
format.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time the patch deployment was last updated.
Timestamp is in
`RFC3339 <https://www.ietf.org/rfc/rfc3339.txt>`__ text
format.
last_execute_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The last time a patch job was started by this
deployment. Timestamp is in
`RFC3339 <https://www.ietf.org/rfc/rfc3339.txt>`__ text
format.
rollout (google.cloud.osconfig_v1.types.PatchRollout):
Optional. Rollout strategy of the patch job.
state (google.cloud.osconfig_v1.types.PatchDeployment.State):
Output only. Current state of the patch
deployment.
"""
class State(proto.Enum):
r"""Represents state of patch peployment."""
STATE_UNSPECIFIED = 0
ACTIVE = 1
PAUSED = 2
name = proto.Field(proto.STRING, number=1,)
description = proto.Field(proto.STRING, number=2,)
instance_filter = proto.Field(
proto.MESSAGE, number=3, message=patch_jobs.PatchInstanceFilter,
)
patch_config = proto.Field(proto.MESSAGE, number=4, message=patch_jobs.PatchConfig,)
duration = proto.Field(proto.MESSAGE, number=5, message=duration_pb2.Duration,)
one_time_schedule = proto.Field(
proto.MESSAGE, number=6, oneof="schedule", message="OneTimeSchedule",
)
recurring_schedule = proto.Field(
proto.MESSAGE, number=7, oneof="schedule", message="RecurringSchedule",
)
create_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=9, message=timestamp_pb2.Timestamp,)
last_execute_time = proto.Field(
proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp,
)
rollout = proto.Field(proto.MESSAGE, number=11, message=patch_jobs.PatchRollout,)
state = proto.Field(proto.ENUM, number=12, enum=State,)
class OneTimeSchedule(proto.Message):
r"""Sets the time for a one time patch deployment. Timestamp is in
`RFC3339 <https://www.ietf.org/rfc/rfc3339.txt>`__ text format.
Attributes:
execute_time (google.protobuf.timestamp_pb2.Timestamp):
Required. The desired patch job execution
time.
"""
execute_time = proto.Field(
proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,
)
class RecurringSchedule(proto.Message):
r"""Sets the time for recurring patch deployments.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
time_zone (google.type.datetime_pb2.TimeZone):
Required. Defines the time zone that ``time_of_day`` is
relative to. The rules for daylight saving time are
determined by the chosen time zone.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. The time that the recurring schedule becomes
effective. Defaults to ``create_time`` of the patch
deployment.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. The end time at which a recurring
patch deployment schedule is no longer active.
time_of_day (google.type.timeofday_pb2.TimeOfDay):
Required. Time of the day to run a recurring
deployment.
frequency (google.cloud.osconfig_v1.types.RecurringSchedule.Frequency):
Required. The frequency unit of this
recurring schedule.
weekly (google.cloud.osconfig_v1.types.WeeklySchedule):
Required. Schedule with weekly executions.
This field is a member of `oneof`_ ``schedule_config``.
monthly (google.cloud.osconfig_v1.types.MonthlySchedule):
Required. Schedule with monthly executions.
This field is a member of `oneof`_ ``schedule_config``.
last_execute_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the last patch job ran
successfully.
next_execute_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the next patch job is
scheduled to run.
"""
class Frequency(proto.Enum):
r"""Specifies the frequency of the recurring patch deployments."""
FREQUENCY_UNSPECIFIED = 0
WEEKLY = 1
MONTHLY = 2
DAILY = 3
time_zone = proto.Field(proto.MESSAGE, number=1, message=datetime_pb2.TimeZone,)
start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
time_of_day = proto.Field(proto.MESSAGE, number=4, message=timeofday_pb2.TimeOfDay,)
frequency = proto.Field(proto.ENUM, number=5, enum=Frequency,)
weekly = proto.Field(
proto.MESSAGE, number=6, oneof="schedule_config", message="WeeklySchedule",
)
monthly = proto.Field(
proto.MESSAGE, number=7, oneof="schedule_config", message="MonthlySchedule",
)
last_execute_time = proto.Field(
proto.MESSAGE, number=9, message=timestamp_pb2.Timestamp,
)
next_execute_time = proto.Field(
proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp,
)
class WeeklySchedule(proto.Message):
r"""Represents a weekly schedule.
Attributes:
day_of_week (google.type.dayofweek_pb2.DayOfWeek):
Required. Day of the week.
"""
day_of_week = proto.Field(proto.ENUM, number=1, enum=dayofweek_pb2.DayOfWeek,)
class MonthlySchedule(proto.Message):
r"""Represents a monthly schedule. An example of a valid monthly
schedule is "on the third Tuesday of the month" or "on the 15th
of the month".
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
week_day_of_month (google.cloud.osconfig_v1.types.WeekDayOfMonth):
Required. Week day in a month.
This field is a member of `oneof`_ ``day_of_month``.
month_day (int):
Required. One day of the month. 1-31
indicates the 1st to the 31st day. -1 indicates
the last day of the month. Months without the
target day will be skipped. For example, a
schedule to run "every month on the 31st" will
not run in February, April, June, etc.
This field is a member of `oneof`_ ``day_of_month``.
"""
week_day_of_month = proto.Field(
proto.MESSAGE, number=1, oneof="day_of_month", message="WeekDayOfMonth",
)
month_day = proto.Field(proto.INT32, number=2, oneof="day_of_month",)
class WeekDayOfMonth(proto.Message):
r"""Represents one week day in a month. An example is "the 4th
Sunday".
Attributes:
week_ordinal (int):
Required. Week number in a month. 1-4
indicates the 1st to 4th week of the month. -1
indicates the last week of the month.
day_of_week (google.type.dayofweek_pb2.DayOfWeek):
Required. A day of the week.
day_offset (int):
Optional. Represents the number of days before or after the
given week day of month that the patch deployment is
scheduled for. For example if ``week_ordinal`` and
``day_of_week`` values point to the second day of the month
and this ``day_offset`` value is set to ``3``, the patch
deployment takes place three days after the second Tuesday
of the month. If this value is negative, for example -5, the
patches are deployed five days before before the second
Tuesday of the month. Allowed values are in range [-30, 30].
"""
week_ordinal = proto.Field(proto.INT32, number=1,)
day_of_week = proto.Field(proto.ENUM, number=2, enum=dayofweek_pb2.DayOfWeek,)
day_offset = proto.Field(proto.INT32, number=3,)
class CreatePatchDeploymentRequest(proto.Message):
r"""A request message for creating a patch deployment.
Attributes:
parent (str):
Required. The project to apply this patch deployment to in
the form ``projects/*``.
patch_deployment_id (str):
Required. A name for the patch deployment in the project.
When creating a name the following rules apply:
- Must contain only lowercase letters, numbers, and
hyphens.
- Must start with a letter.
- Must be between 1-63 characters.
- Must end with a number or a letter.
- Must be unique within the project.
patch_deployment (google.cloud.osconfig_v1.types.PatchDeployment):
Required. The patch deployment to create.
"""
parent = proto.Field(proto.STRING, number=1,)
patch_deployment_id = proto.Field(proto.STRING, number=2,)
patch_deployment = proto.Field(proto.MESSAGE, number=3, message="PatchDeployment",)
class GetPatchDeploymentRequest(proto.Message):
r"""A request message for retrieving a patch deployment.
Attributes:
name (str):
Required. The resource name of the patch deployment in the
form ``projects/*/patchDeployments/*``.
"""
name = proto.Field(proto.STRING, number=1,)
class ListPatchDeploymentsRequest(proto.Message):
r"""A request message for listing patch deployments.
Attributes:
parent (str):
Required. The resource name of the parent in the form
``projects/*``.
page_size (int):
Optional. The maximum number of patch
deployments to return. Default is 100.
page_token (str):
Optional. A pagination token returned from a
previous call to ListPatchDeployments that
indicates where this listing should continue
from.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListPatchDeploymentsResponse(proto.Message):
r"""A response message for listing patch deployments.
Attributes:
patch_deployments (Sequence[google.cloud.osconfig_v1.types.PatchDeployment]):
The list of patch deployments.
next_page_token (str):
A pagination token that can be used to get
the next page of patch deployments.
"""
@property
def raw_page(self):
return self
patch_deployments = proto.RepeatedField(
proto.MESSAGE, number=1, message="PatchDeployment",
)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeletePatchDeploymentRequest(proto.Message):
r"""A request message for deleting a patch deployment.
Attributes:
name (str):
Required. The resource name of the patch deployment in the
form ``projects/*/patchDeployments/*``.
"""
name = proto.Field(proto.STRING, number=1,)
class UpdatePatchDeploymentRequest(proto.Message):
r"""A request message for updating a patch deployment.
Attributes:
patch_deployment (google.cloud.osconfig_v1.types.PatchDeployment):
Required. The patch deployment to Update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. Field mask that controls which
fields of the patch deployment should be
updated.
"""
patch_deployment = proto.Field(proto.MESSAGE, number=1, message="PatchDeployment",)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class PausePatchDeploymentRequest(proto.Message):
r"""A request message for pausing a patch deployment.
Attributes:
name (str):
Required. The resource name of the patch deployment in the
form ``projects/*/patchDeployments/*``.
"""
name = proto.Field(proto.STRING, number=1,)
class ResumePatchDeploymentRequest(proto.Message):
r"""A request message for resuming a patch deployment.
Attributes:
name (str):
Required. The resource name of the patch deployment in the
form ``projects/*/patchDeployments/*``.
"""
name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
import os
import time
from functools import wraps
from flask import (Flask, redirect, url_for, session,
request, render_template)
from flask_oauthlib.client import OAuth, OAuthException
from flask_sslify import SSLify
SPOTIFY_APP_ID = os.environ.get('SPOTIFY_APP_ID')
SPOTIFY_APP_SECRET = os.environ.get('SPOTIFY_APP_SECRET')
SECRET_KEY = os.environ.get('SPOTIFY_KEY')
app = Flask(__name__)
app.secret_key = SECRET_KEY
oauth = OAuth(app)
sslify = SSLify(app)
spotify_scopes = [
'playlist-read-private',
'user-read-private',
'playlist-modify-private',
'playlist-modify-public',
]
spotify_scope_str = ' '.join(spotify_scopes)
spotify = oauth.remote_app(
'spotify',
consumer_key=SPOTIFY_APP_ID,
consumer_secret=SPOTIFY_APP_SECRET,
# https://developer.spotify.com/web-api/using-scopes/
request_token_params={'scope': spotify_scope_str},
base_url='https://api.spotify.com/v1/',
request_token_url=None,
access_token_url='https://accounts.spotify.com/api/token',
authorize_url='https://accounts.spotify.com/authorize',
content_type='application/json',
)
@spotify.tokengetter
def get_spotify_oauth_token():
return session.get('oauth_token')
def requires_login(f):
@wraps(f)
def decorated_function(*args, **kwargs):
oauth_token = session.get('oauth_token')
expire = session.get('expire')
if not oauth_token:
return redirect(url_for('login'))
elif expire < time.time():
return redirect(url_for('login'))
return f(*args, **kwargs)
return decorated_function
"""
Routes
"""
@app.route('/')
def index():
return render_template('home.html')
@app.route('/login')
def login():
callback = url_for(
'spotify_authorized',
next=request.args.get('next') or None,
_external=True
)
return spotify.authorize(callback=callback)
@app.route('/login/authorized')
def spotify_authorized():
resp = spotify.authorized_response()
if resp is None:
return 'Access denied: reason={0} error={1}'.format(
request.args['error_reason'],
request.args['error_description']
)
if isinstance(resp, OAuthException):
return 'Access denied: {0}'.format(resp.message)
session['oauth_token'] = (resp['access_token'], '')
me = spotify.get('me')
session['user_id'] = me.data['id']
session['expire'] = time.time() + 3500 # roughly 1 hour
return redirect(url_for('index'))
@app.route('/playlists')
@requires_login
def playlists():
user_id = session.get('user_id')
resp = spotify.get('users/{user_id}/playlists'.format(user_id=user_id))
playlists = resp.data['items']
return render_template('playlists.html', playlists=playlists)
@app.route('/artists')
@requires_login
def artists():
q = request.args.get('q', 'pitbull')
get_data = {
'market': 'from_token',
'type': 'artist',
'q': q,
}
if q:
resp = spotify.get('search/', data=get_data)
artists = resp.data['artists']['items']
else:
artists = []
return render_template('artists.html', artists=artists, q=q)
@app.route('/artists/related/<artist_id>')
@requires_login
def related_artists(artist_id):
"""Returns the list of related artists to the given
artist_id
"""
artists = get_related_artists(artist_id)
return render_template('artists.html', artists=artists)
@app.route('/artists/related-tracks/<artist_id>')
@requires_login
def related_tracks(artist_id):
"""Given an artist_id, gets related artists, then
their top tracks
"""
resp = spotify.get('artists/{0}'.format(artist_id))
artist_name = resp.data['name']
related_artists = get_related_artists(artist_id)
selected_artist_tracks = get_top_tracks(artist_id)
track_collection = selected_artist_tracks[:3]
track_uris = [track['uri'] for track in track_collection]
for artist in related_artists:
tracks = get_top_tracks(artist['id'])[:3]
track_collection.extend(tracks)
track_uris.extend([track['uri'] for track in tracks])
track_uri_str = ','.join(track_uris)
return render_template('tracks.html',
tracks=track_collection,
track_uri_str=track_uri_str,
artist_name=artist_name)
@app.route('/playlists/new', methods=['POST'])
@requires_login
def new_playlist():
"""Given POST data of artist_id
creates a new playlist based off of this artists
related artists
"""
playlist_name = request.form['playlist_name']
public = request.form['public']
track_uris = request.form['track_uris'].split(',')
new_playlist_data = {
'name': playlist_name,
'public': public
}
user_id = session.get('user_id')
resp = spotify.post('users/{0}/playlists'.format(user_id),
data=new_playlist_data,
format='json')
if str(resp.status).startswith('2'):
href = resp.data['href']
href += '/tracks' # to add new tracks
resp = spotify.post(href,
data={'uris': track_uris},
format='json')
print(resp.data)
return redirect(url_for('playlists'))
return redirect(url_for('artists'))
def get_related_artists(artist_id):
"""Given the artist_id, gets related ones
"""
resp = spotify.get('artists/{0}/related-artists'.format(artist_id))
artists = resp.data['artists']
return artists
def get_top_tracks(artist_id):
"""Given artist_id, gets top tracks for artist
"""
get_data = {'country': 'US'}
resp = spotify.get('artists/{0}/top-tracks'.format(artist_id),
data=get_data)
tracks = resp.data['tracks']
return tracks
if __name__ == '__main__':
app.debug = True
app.run()
|
|
import tensorflow as tf
import numpy as np
from data import *
def weight_variable(shape, name=None):
initial = tf.truncated_normal(shape, stddev=0.1)
if name:
return tf.Variable(initial, name)
else:
return tf.Variable(initial)
def bias_variable(shape, name=None):
initial = tf.constant(0.1, shape=shape)
if name:
return tf.Variable(initial, name)
else:
return tf.Variable(initial)
class Implynet:
def gen_feed_dict(self, partial_obs, full_obs):
ret = dict()
ret[self.partial_obs] = partial_obs
ret[self.full_obs] = full_obs
return ret
# load the model and give back a session
def load_model(self, sess, saved_loc):
self.saver.restore(sess, saved_loc)
print("Model restored.")
# make the model
def __init__(self, name):
with tf.variable_scope('imply') as scope:
# set up placeholders
self.partial_obs = tf.placeholder(tf.float32, [N_BATCH, L , L , 2], name="partial_obs")
self.full_obs = tf.placeholder(tf.float32, [N_BATCH, L , L , 2], name="full_obs")
# some constants
self.n_hidden = 500
# make hidden represnatation
W1 = weight_variable([L * L * 2, self.n_hidden])
b1 = bias_variable([self.n_hidden])
partial_flat = tf.reshape(self.partial_obs, [N_BATCH, L * L * 2])
hidden = tf.nn.relu(tf.matmul(partial_flat, W1) + b1)
W_preds = [weight_variable([self.n_hidden, 2]) for _ in range(L*L)]
b_preds = [bias_variable([2]) for _ in range(L*L)]
e2 = tf.constant(1e-10, shape=[N_BATCH, 2])
self.query_preds = [tf.nn.softmax(tf.matmul(hidden, W_preds[i]) + b_preds[i])+e2 for i in range(L*L)]
print "query_preds shape ", show_dim(self.query_preds)
# doing some reshape of the input tensor
full_obs_trans = tf.transpose(self.full_obs, perm=[1,2,0,3])
print full_obs_trans.get_shape()
full_obs_split = tf.reshape(full_obs_trans, [L*L, N_BATCH, 2])
full_obs_split = tf.unpack(full_obs_split)
print show_dim(full_obs_split)
self.query_pred_costs = []
for idx in range(L * L):
blah = -tf.reduce_sum(full_obs_split[idx] * tf.log(self.query_preds[idx]))
self.query_pred_costs.append(blah)
print "costs shapes ", show_dim(self.query_pred_costs)
self.cost_query_pred = sum(self.query_pred_costs)
# ------------------------------------------------------------------------ training steps
# gvs = optimizer.compute_gradients(cost)
# capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
# train_op = optimizer.apply_gradients(capped_gvs)
# optimizer = tf.train.RMSPropOptimizer(0.0001)
# optimizer = tf.train.RMSPropOptimizer(0.0001)
optimizer = tf.train.AdagradOptimizer(0.01)
pred_gvs = optimizer.compute_gradients(self.cost_query_pred)
capped_pred_gvs = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in pred_gvs]
#train_pred = optimizer.minimize(cost_pred, var_list = VAR_pred)
self.train_query_pred = optimizer.apply_gradients(capped_pred_gvs)
# train_query_pred = optimizer.minimize(cost_query_pred, var_list = VAR_pred)
# Before starting, initialize the variables. We will 'run' this first.
self.init = tf.initialize_all_variables()
self.saver = tf.train.Saver()
# save the model
def save(self, sess, model_loc="model_imply.ckpt"):
save_path = self.saver.save(sess, model_loc)
print("Model saved in file: %s" % save_path)
# train on a particular data batch
def train(self, sess, data_batch):
partial_obs, full_obs = data_batch
feed_dic = self.gen_feed_dict(partial_obs, full_obs)
# qry_prd = sess.run(self.query_preds, feed_dict=feed_dic)
# print qry_prd[0][0]
# qry_costs = sess.run(self.query_pred_costs, feed_dict=feed_dic)
# print qry_costs[0]
cost_query_pred_pre = sess.run([self.cost_query_pred], feed_dict=feed_dic)[0]
sess.run([self.train_query_pred], feed_dict=feed_dic)
cost_query_pred_post = sess.run([self.cost_query_pred], feed_dict=feed_dic)[0]
print "train query pred ", cost_query_pred_pre, " ",\
cost_query_pred_post, " ", True if cost_query_pred_post < cost_query_pred_pre else False
# =========== HELPERS =============
# a placeholder to feed in a single observation
def get_feed_dic_obs(self, obs):
# needing to create all the nessisary feeds
obss = []
num_obs = len(obs)
_obs = np.zeros([L,L,2])
for ob_idx in range(num_obs):
cord, lab = obs[ob_idx]
xx, yy = cord
_obs[xx][yy] = lab
obss = np.array([_obs for i in range(N_BATCH)])
feed_dic = dict()
feed_dic[self.partial_obs] = obss
return feed_dic
def get_all_preds(self, sess, obs):
dick = self.get_feed_dic_obs(obs)
predzz = sess.run(self.query_preds, dick)
predzz0 = np.array([x[0] for x in predzz])
predzz0 = np.reshape(predzz0, [L,L,2])
return predzz0
def get_most_confuse(self, sess, obs):
obs_qry = [_[0] for _ in obs]
all_preds = self.get_all_preds(sess, obs)
all_pred_at_key1 = []
for i in range(L):
for j in range(L):
qry = i, j
value = all_preds[i][j]
if qry not in obs_qry:
all_pred_at_key1.append((qry, value))
most_confs = [(abs(x[1][0] - x[1][1]), x[0]) for x in all_pred_at_key1]
most_conf = min(most_confs)
return most_conf[1]
def get_active_trace(self, sess, query, epi=0.0):
obs = []
for i in range(OBS_SIZE):
if np.random.random() < epi:
rand_coord = sample_coord_new(obs)
obs.append((rand_coord, query(rand_coord)))
else:
most_conf = self.get_most_confuse(sess, obs)
obs.append((most_conf, query(most_conf)))
return obs
class Invnet:
def gen_feed_dict(self, true_lab, obs):
ret = dict()
ret[self.true_label] = true_lab
ret[self.observations] = obs
return ret
# load the model and give back a session
def load_model(self, sess, saved_loc):
self.saver.restore(sess, saved_loc)
print("Inversion Model restored.")
# save the model
def save(self, sess, model_loc="model_invert.ckpt"):
save_path = self.saver.save(sess, model_loc)
print("Model saved in file: %s" % save_path)
def __init__(self, name):
with tf.variable_scope('inv') as scope:
self.true_label = tf.placeholder(tf.float32, [N_BATCH, X_L], name="true_label_"+name)
self.observations = tf.placeholder(tf.float32, [N_BATCH, L, L, 2], name="obs_"+name)
self.n_hidden = 1200
W_inv1 = weight_variable([L*L*2, self.n_hidden], name="W_inv1_"+name)
b_inv1 = bias_variable([self.n_hidden], name="b_inv1_"+name)
W_inv2 = weight_variable([self.n_hidden,X_L], name="W_inv2_"+name)
b_inv2 = bias_variable([X_L], name="b_inv2_"+name)
self.VARS = [W_inv1, b_inv1, W_inv2, b_inv2]
reshape_ob = tf.reshape(self.observations, [N_BATCH, L*L*2])
blah = tf.nn.relu(tf.matmul(reshape_ob, W_inv1) + b_inv1)
epsilon1 = tf.constant(1e-10, shape=[N_BATCH, X_L])
self.pred = tf.nn.softmax(tf.matmul(blah, W_inv2) + b_inv2) + epsilon1
self.cost = -tf.reduce_sum(self.true_label * tf.log(self.pred))
optimizer = tf.train.RMSPropOptimizer(0.001)
inv_gvs = optimizer.compute_gradients(self.cost)
self.train_inv = optimizer.apply_gradients(inv_gvs)
all_var_var = tf.get_collection(tf.GraphKeys.VARIABLES, scope='inv')
self.init = tf.initialize_variables(all_var_var)
self.saver = tf.train.Saver(self.VARS)
# train on a particular data batch
def train(self, sess, data_batch):
true_lab, obss = data_batch
feed_dic = self.gen_feed_dict(true_lab, obss)
cost_pre = sess.run([self.cost], feed_dict=feed_dic)[0]
sess.run([self.train_inv], feed_dict=feed_dic)
cost_post = sess.run([self.cost], feed_dict=feed_dic)[0]
print "train inv ", cost_pre, " ", cost_post, " ", True if cost_post < cost_pre else False
# get inversion from observations
def invert(self, sess, obs):
obss = [obs for _ in range(N_BATCH)]
fake_lab = [np.zeros(shape=[X_L]) for _ in range(N_BATCH)]
data_in = inv_batch_obs(fake_lab, obss)
feed_dic = self.gen_feed_dict(*data_in)
return sess.run([self.pred], feed_dict=feed_dic)[0][0]
|
|
import argparse
import ConfigParser
import sys
import os
import pkg_resources
import time
from datetime import datetime, timedelta
from termcolor import colored
from fake_section_head import FakeSectionHead
from live_logs import download_live_logs
from s3_logs import download_s3_logs
from search import find_cached_logs
from tail import start_tail
from grep import grep_files
from cat import cat_files
VERSION = pkg_resources.require("singularity-logfetch")[0].version
CONF_READ_ERR_FORMAT = 'Could not load config from {0} due to {1}'
DEFAULT_CONF_DIR = os.path.expanduser('~/.logfetch')
DEFAULT_CONF_FILE = 'default'
DEFAULT_PARALLEL_FETCHES = 10
DEFAULT_CHUNK_SIZE = 8192
DEFAULT_DEST = os.path.expanduser('~/.logfetch_cache')
DEFAULT_TASK_COUNT = 20
DEFAULT_DAYS = 7
DEFAULT_S3_PATTERN = '%requestId/%%Y/%m/%taskId_%index-%s-%filename'
IS_A_TTY = sys.stdout.isatty()
def exit(reason, color='red'):
sys.stderr.write(colored(reason, color) + '\n')
sys.exit(1)
def tail_logs(args):
try:
start_tail(args)
except KeyboardInterrupt:
exit('Stopping logtail...', 'magenta')
def search_logs(args):
try:
all_logs = find_cached_logs(args)
grep_files(args, all_logs)
except KeyboardInterrupt:
exit('Stopping logfetch...', 'magenta')
def fetch_logs(args):
try:
check_dest(args)
all_logs = []
if not args.skip_s3:
all_logs += download_s3_logs(args)
if not args.skip_live:
all_logs += download_live_logs(args)
if not args.download_only:
grep_files(args, all_logs)
except KeyboardInterrupt:
exit('Stopping logfetch...', 'magenta')
def cat_logs(args):
try:
check_dest(args)
all_logs = []
if not args.skip_s3:
all_logs += download_s3_logs(args)
if not args.skip_live:
all_logs += download_live_logs(args)
if not args.download_only:
cat_files(args, all_logs)
except KeyboardInterrupt:
exit('Stopping logcat...', 'magenta')
def check_dest(args):
if not os.path.exists(args.dest):
os.makedirs(args.dest)
def check_args(args):
if args.deployId and not args.requestId:
exit("Must specify request-id (-r) when specifying deploy-id")
elif not args.requestId and not args.deployId and not args.taskId:
exit('Must specify one of\n -t task-id\n -r request-id and -d deploy-id\n -r request-id')
def convert_to_date(args, argument, is_start):
try:
if isinstance(argument, datetime):
return argument
else:
val = datetime.utcnow() - timedelta(days=int(argument))
except:
try:
if args.zone:
timestring = '{0} {1}'.format(argument, '00:00:00' if is_start else '23:59:59') if len(argument) < 11 else argument
val = datetime.utcfromtimestamp(time.mktime(datetime.strptime(timestring, "%Y-%m-%d %H:%M:%S").timetuple()))
else:
timestring = '{0} {1}'.format(argument, '00:00:00' if is_start else '23:59:59') if len(argument) < 11 else argument
val = datetime.strptime('{0} UTC'.format(timestring), "%Y-%m-%d %H:%M:%S %Z")
except:
exit('Start/End days value must be either a number of days or a date in format "%%Y-%%m-%%d %%H:%%M:%%S" or "%%Y-%%m-%%d"')
return val
def fetch():
conf_parser = argparse.ArgumentParser(version=VERSION, description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False)
conf_parser.add_argument("-f", "--conf-folder", dest='conf_folder', help="specify a folder for config files to live")
conf_parser.add_argument("-c", "--conf-file", dest='conf_file', help="Specify config file within the conf folder", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
conf_dir = args.conf_folder if args.conf_folder else DEFAULT_CONF_DIR
conf_file = os.path.expanduser(conf_dir + '/' + args.conf_file) if args.conf_file else os.path.expanduser(conf_dir + '/' + DEFAULT_CONF_FILE)
config = ConfigParser.SafeConfigParser()
config.optionxform = str
defaults = {
"num_parallel_fetches" : DEFAULT_PARALLEL_FETCHES,
"chunk_size" : DEFAULT_CHUNK_SIZE,
"dest" : DEFAULT_DEST,
"task_count" : DEFAULT_TASK_COUNT,
"start" : datetime.strptime('{0} 00:00:00'.format(datetime.now().strftime("%Y-%m-%d")), "%Y-%m-%d %H:%M:%S") - timedelta(days=DEFAULT_DAYS),
"file_pattern" : DEFAULT_S3_PATTERN,
"end" : datetime.strptime('{0} 23:59:59'.format(datetime.now().strftime("%Y-%m-%d")), "%Y-%m-%d %H:%M:%S")
}
try:
config.readfp(FakeSectionHead(open(os.path.expanduser(conf_file))))
defaults.update(dict(config.items("Defaults")))
except Exception, err:
sys.stderr.write(CONF_READ_ERR_FORMAT.format(conf_file, err) + '\n')
parser = argparse.ArgumentParser(parents=[conf_parser], description="Fetch log files from Singularity. One can specify either a TaskId, RequestId and DeployId, or RequestId", prog="logfetch")
parser.set_defaults(**defaults)
parser.add_argument("-t", "--task-id", dest="taskId", help="TaskId of task to fetch logs for")
parser.add_argument("-r", "--request-id", dest="requestId", help="RequestId of request to fetch logs for (can be a glob)")
parser.add_argument("-T", "--task-count", dest="task_count", help="Number of recent tasks per request to fetch logs from", type=int)
parser.add_argument("-d", "--deploy-id", dest="deployId", help="DeployId of task to fetch logs for (can be a glob)")
parser.add_argument("-o", "--dest", dest="dest", help="Destination directory")
parser.add_argument("-n", "--num-parallel-fetches", dest="num_parallel_fetches", help="Number of fetches to make at once", type=int)
parser.add_argument("-C", "--chunk-size", dest="chunk_size", help="Chunk size for writing from response to filesystem", type=int)
parser.add_argument("-u", "--singularity-uri-base", dest="singularity_uri_base", help="The base for singularity (eg. http://localhost:8080/singularity/v1)")
parser.add_argument("-s", "--start", dest="start", help="Search for logs no older than this, can be an integer number of days or date in format '%%Y-%%m-%%d %%H:%%M:%%S' or '%%Y-%%m-%%d'")
parser.add_argument("-e", "--end", dest="end", help="Search for logs no newer than this, can be an integer number of days or date in format '%%Y-%%m-%%d %%H:%%M:%%S' or '%%Y-%%m-%%d' (defaults to None/now)")
parser.add_argument("-l", "--log-type", dest="logtype", help="Logfile type to downlaod (ie 'access.log'), can be a glob (ie *.log)")
parser.add_argument("-p", "--file-pattern", dest="file_pattern", help="S3 uploader file pattern")
parser.add_argument("-N", "--no-name-fetch-off", dest="no_name_fetch_off", help="If a logtype matcher is specified, but the s3 log pattern does not include file name, don't download any s3 files", action="store_true")
parser.add_argument("-g", "--grep", dest="grep", help="Regex to grep for (normal grep syntax) or a full grep command")
parser.add_argument("-z", "--local-zone", dest="zone", help="If specified, input times in the local time zone and convert to UTC, if not specified inputs are assumed to be UTC", action="store_true")
parser.add_argument("-S", "--skip-s3", dest="skip_s3", help="Don't download/search s3 logs", action='store_true')
parser.add_argument("-L", "--skip-live", dest="skip_live", help="Don't download/search live logs", action='store_true')
parser.add_argument("-U", "--use-cache", dest="use_cache", help="Use cache for live logs, don't re-download them", action='store_true')
parser.add_argument("--search", dest="search", help="run logsearch on the local cache of downloaded files", action='store_true')
parser.add_argument("-V", "--verbose", dest="verbose", help="Print more verbose output", action='store_true')
parser.add_argument("--silent", dest="silent", help="No stderr (progress, file names, etc) output", action='store_true')
parser.add_argument("-D" ,"--download-only", dest="download_only", help="Only download files, don't unzip or grep", action='store_true')
args = parser.parse_args(remaining_argv)
if not IS_A_TTY:
args.silent = True
check_args(args)
args.start = convert_to_date(args, args.start, True)
args.end = convert_to_date(args, args.end, False)
args.dest = os.path.expanduser(args.dest)
try:
setattr(args, 'headers', dict(config.items("Request Headers")))
except:
if not args.silent:
sys.stderr.write('No additional request headers found\n')
setattr(args, 'headers', {})
if args.search:
search_logs(args)
else:
fetch_logs(args)
def search():
conf_parser = argparse.ArgumentParser(version=VERSION, description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False)
conf_parser.add_argument("-f", "--conf-folder", dest='conf_folder', help="specify a folder for config files to live")
conf_parser.add_argument("-c", "--conf-file", dest='conf_file', help="Specify config file within the conf folder", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
conf_dir = args.conf_folder if args.conf_folder else DEFAULT_CONF_DIR
conf_file = os.path.expanduser(conf_dir + '/' + args.conf_file) if args.conf_file else os.path.expanduser(conf_dir + '/' + DEFAULT_CONF_FILE)
config = ConfigParser.SafeConfigParser()
config.optionxform = str
defaults = {
"dest" : DEFAULT_DEST,
"start" : datetime.strptime('{0} 00:00:00'.format(datetime.now().strftime("%Y-%m-%d")), "%Y-%m-%d %H:%M:%S") - timedelta(days=DEFAULT_DAYS),
"file_pattern" : DEFAULT_S3_PATTERN,
"end" : datetime.strptime('{0} 23:59:59'.format(datetime.now().strftime("%Y-%m-%d")), "%Y-%m-%d %H:%M:%S")
}
try:
config.readfp(FakeSectionHead(open(os.path.expanduser(conf_file))))
defaults.update(dict(config.items("Defaults")))
except Exception, err:
sys.stderr.write(CONF_READ_ERR_FORMAT.format(conf_file, err) + '\n')
parser = argparse.ArgumentParser(parents=[conf_parser], description="Search log files in the cache directory", prog="logsearch")
parser.set_defaults(**defaults)
parser.add_argument("-t", "--task-id", dest="taskId", help="TaskId of task to fetch logs for")
parser.add_argument("-r", "--request-id", dest="requestId", help="RequestId of request to fetch logs for (can be a glob)")
parser.add_argument("-d", "--deploy-id", dest="deployId", help="DeployId of task to fetch logs for (can be a glob)")
parser.add_argument("-o", "--dest", dest="dest", help="Destination directory")
parser.add_argument("-s", "--start", dest="start", help="Search for logs no older than this, can be an integer number of days or date in format '%%Y-%%m-%%d %%H:%%M:%%S' or '%%Y-%%m-%%d'")
parser.add_argument("-e", "--end", dest="end", help="Search for logs no newer than this, can be an integer number of days or date in format '%%Y-%%m-%%d %%H:%%M:%%S' or '%%Y-%%m-%%d' (defaults to None/now)")
parser.add_argument("-l", "--log-type", dest="logtype", help="Logfile type to downlaod (ie 'access.log'), can be a glob (ie *.log)")
parser.add_argument("-p", "--file-pattern", dest="file_pattern", help="S3 uploader file pattern")
parser.add_argument("-g", "--grep", dest="grep", help="Regex to grep for (normal grep syntax) or a full grep command")
parser.add_argument("-z", "--local-zone", dest="zone", help="If specified, input times in the local time zone and convert to UTC, if not specified inputs are assumed to be UTC", action="store_true")
parser.add_argument("-V", "--verbose", dest="verbose", help="Print more verbose output", action='store_true')
parser.add_argument("--silent", dest="silent", help="No stderr (progress, file names, etc) output", action='store_true')
args, unknown = parser.parse_known_args(remaining_argv)
if not IS_A_TTY:
args.silent = True
if args.verbose and unknown:
if not args.silent:
sys.stderr.write(colored('Found unknown args {0}'.format(unknown), 'magenta'))
check_args(args)
args.start = convert_to_date(args, args.start, True)
args.end = convert_to_date(args, args.end, False)
args.dest = os.path.expanduser(args.dest)
search_logs(args)
def cat():
conf_parser = argparse.ArgumentParser(version=VERSION, description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False)
conf_parser.add_argument("-f", "--conf-folder", dest="conf_folder", help="specify a folder for config files to live")
conf_parser.add_argument("-c", "--conf-file", dest="conf_file", help="Specify config file within the conf folder", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
conf_dir = args.conf_folder if args.conf_folder else DEFAULT_CONF_DIR
conf_file = os.path.expanduser(conf_dir + '/' + args.conf_file) if args.conf_file else os.path.expanduser(conf_dir + '/' + DEFAULT_CONF_FILE)
config = ConfigParser.SafeConfigParser()
config.optionxform = str
defaults = {
"num_parallel_fetches" : DEFAULT_PARALLEL_FETCHES,
"chunk_size" : DEFAULT_CHUNK_SIZE,
"dest" : DEFAULT_DEST,
"task_count" : DEFAULT_TASK_COUNT,
"start" : datetime.strptime('{0} 00:00:00'.format(datetime.now().strftime("%Y-%m-%d")), "%Y-%m-%d %H:%M:%S") - timedelta(days=DEFAULT_DAYS),
"file_pattern" : DEFAULT_S3_PATTERN,
"end" : datetime.strptime('{0} 23:59:59'.format(datetime.now().strftime("%Y-%m-%d")), "%Y-%m-%d %H:%M:%S")
}
try:
config.readfp(FakeSectionHead(open(os.path.expanduser(conf_file))))
defaults.update(dict(config.items("Defaults")))
except Exception, err:
sys.stderr.write(CONF_READ_ERR_FORMAT.format(conf_file, err) + '\n')
parser = argparse.ArgumentParser(parents=[conf_parser], description="Fetch log files from Singularity and cat to stdout. One can specify either a TaskId, RequestId and DeployId, or RequestId", prog="logcat")
parser.set_defaults(**defaults)
parser.add_argument("-t", "--task-id", dest="taskId", help="TaskId of task to fetch logs for")
parser.add_argument("-r", "--request-id", dest="requestId", help="RequestId of request to fetch logs for (can be a glob)")
parser.add_argument("-T", "--task-count", dest="taskCount", help="Number of recent tasks per request to fetch logs from", type=int)
parser.add_argument("-d", "--deploy-id", dest="deployId", help="DeployId of tasks to fetch logs for (can be a glob)")
parser.add_argument("-o", "--dest", dest="dest", help="Destination directory")
parser.add_argument("-n", "--num-parallel-fetches", dest="num_parallel_fetches", help="Number of fetches to make at once", type=int)
parser.add_argument("-C", "--chunk-size", dest="chunk_size", help="Chunk size for writing from response to filesystem", type=int)
parser.add_argument("-u", "--singularity-uri-base", dest="singularity_uri_base", help="The base for singularity (eg. http://localhost:8080/singularity/v1)")
parser.add_argument("-s", "--start", dest="start", help="Search for logs no older than this, can be an integer number of days or date in format '%%Y-%%m-%%d %%H:%%M:%%S' or '%%Y-%%m-%%d'")
parser.add_argument("-e", "--end", dest="end", help="Search for logs no newer than this, can be an integer number of days or date in format '%%Y-%%m-%%d %%H:%%M:%%S' or '%%Y-%%m-%%d' (defaults to None/now)")
parser.add_argument("-l", "--logtype", dest="logtype", help="Logfile type to downlaod (ie 'access.log'), can be a glob (ie *.log)")
parser.add_argument("-p", "--file-pattern", dest="file_pattern", help="S3 uploader file pattern")
parser.add_argument("-N", "--no-name-fetch-off", dest="no_name_fetch_off", help="If a logtype matcher is specified, but the s3 log pattern does not include file name, don't download any s3 files", action="store_true")
parser.add_argument("-z", "--local-zone", dest="zone", help="If specified, input times in the local time zone and convert to UTC, if not specified inputs are assumed to be UTC", action="store_true")
parser.add_argument("-S", "--skip-s3", dest="skip_s3", help="Don't download/search s3 logs", action='store_true')
parser.add_argument("-L", "--skip-live", dest="skip_live", help="Don't download/search live logs", action='store_true')
parser.add_argument("-U", "--use-cache", dest="use_cache", help="Use cache for live logs, don't re-download them", action='store_true')
parser.add_argument("-V", "--verbose", dest="verbose", help="Print more verbose output", action='store_true')
parser.add_argument("--silent", dest="silent", help="No stderr (progress, file names, etc) output", action='store_true')
parser.add_argument("-D" ,"--download-only", dest="download_only", help="Only download files, don't unzip or grep", action='store_true')
args = parser.parse_args(remaining_argv)
if not IS_A_TTY:
args.silent = True
check_args(args)
args.start = convert_to_date(args, args.start, True)
args.end = convert_to_date(args, args.end, False)
args.dest = os.path.expanduser(args.dest)
try:
setattr(args, 'headers', dict(config.items("Request Headers")))
except:
if not args.silent:
sys.stderr.write('No additional request headers found\n')
setattr(args, 'headers', {})
cat_logs(args)
def tail():
conf_parser = argparse.ArgumentParser(version=VERSION, description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False)
conf_parser.add_argument("-f", "--conf-folder", dest="conf_folder", help="specify a folder for config files to live")
conf_parser.add_argument("-c", "--conf-file", dest="conf_file", help="Specify config file within the conf folder", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
conf_dir = args.conf_folder if args.conf_folder else DEFAULT_CONF_DIR
conf_file = os.path.expanduser(conf_dir + '/' + args.conf_file) if args.conf_file else os.path.expanduser(conf_dir + '/' + DEFAULT_CONF_FILE)
config = ConfigParser.SafeConfigParser()
config.optionxform = str
defaults = {'verbose': False}
try:
config.readfp(FakeSectionHead(open(os.path.expanduser(conf_file))))
defaults.update(dict(config.items("Defaults")))
except Exception, err:
sys.stderr.write(CONF_READ_ERR_FORMAT.format(conf_file, err) + '\n')
parser = argparse.ArgumentParser(parents=[conf_parser], description="Tail log files from Singularity. One can specify either a TaskId, RequestId and DeployId, or RequestId", prog="logtail")
parser.set_defaults(**defaults)
parser.add_argument("-t", "--task-id", dest="taskId", help="TaskId of task to fetch logs for")
parser.add_argument("-r", "--request-id", dest="requestId", help="RequestId of request to fetch logs for (can be a glob)")
parser.add_argument("-d", "--deploy-id", dest="deployId", help="DeployId of tasks to fetch logs for (can be a glob)")
parser.add_argument("-u", "--singularity-uri-base", dest="singularity_uri_base", help="The base for singularity (eg. http://localhost:8080/singularity/v1)")
parser.add_argument("-l", "--logfile", dest="logfile", help="Logfile path/name to tail (ie 'logs/access.log')")
parser.add_argument("-V", "--verbose", dest="verbose", help="more verbose output", action='store_true')
parser.add_argument("--silent", dest="silent", help="No stderr (progress, file names, etc) output", action='store_true')
args = parser.parse_args(remaining_argv)
if not IS_A_TTY:
args.silent = True
if not args.logfile:
exit("Must specify logfile to tail (-l)")
check_args(args)
args.dest = os.path.expanduser(args.dest)
try:
setattr(args, 'headers', dict(config.items("Request Headers")))
except:
if not args.silent:
sys.stderr.write('No additional request headers found\n')
setattr(args, 'headers', {})
tail_logs(args)
|
|
import mock
import datetime
from modularodm import Q
from nose.tools import * # flake8: noqa (PEP8 asserts)
from framework import auth
from framework.auth import exceptions
from framework.exceptions import PermissionsError
from website import models, project
from tests import base
from tests.base import fake
from tests import factories
from framework.celery_tasks import handlers
class TestUser(base.OsfTestCase):
def setUp(self):
super(TestUser, self).setUp()
self.user = factories.AuthUserFactory()
def tearDown(self):
models.Node.remove()
models.User.remove()
models.Session.remove()
super(TestUser, self).tearDown()
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2454
def test_add_unconfirmed_email_when_email_verifications_is_None(self):
self.user.email_verifications = None
self.user.save()
email = fake.email()
self.user.add_unconfirmed_email(email)
self.user.save()
assert_in(email, self.user.unconfirmed_emails)
def test_unconfirmed_emails(self):
assert_equal(
self.user.unconfirmed_emails,
[]
)
self.user.add_unconfirmed_email('foo@bar.com')
assert_equal(
self.user.unconfirmed_emails,
['foo@bar.com']
)
# email_verifications field may be None
self.user.email_verifications = None
self.user.save()
assert_equal(self.user.unconfirmed_emails, [])
def test_unconfirmed_emails_unregistered_user(self):
assert_equal(
factories.UnregUserFactory().unconfirmed_emails,
[]
)
def test_unconfirmed_emails_unconfirmed_user(self):
user = factories.UnconfirmedUserFactory()
assert_equal(
user.unconfirmed_emails,
[user.username]
)
def test_remove_unconfirmed_email(self):
self.user.add_unconfirmed_email('foo@bar.com')
self.user.save()
assert_in('foo@bar.com', self.user.unconfirmed_emails) # sanity check
self.user.remove_unconfirmed_email('foo@bar.com')
self.user.save()
assert_not_in('foo@bar.com', self.user.unconfirmed_emails)
def test_confirm_email(self):
token = self.user.add_unconfirmed_email('foo@bar.com')
self.user.confirm_email(token)
assert_not_in('foo@bar.com', self.user.unconfirmed_emails)
assert_in('foo@bar.com', self.user.emails)
def test_confirm_email_comparison_is_case_insensitive(self):
u = factories.UnconfirmedUserFactory.build(
username='letsgettacos@lgt.com'
)
u.add_unconfirmed_email('LetsGetTacos@LGT.com')
u.save()
assert_false(u.is_confirmed) # sanity check
token = u.get_confirmation_token('LetsGetTacos@LGT.com')
confirmed = u.confirm_email(token)
assert_true(confirmed)
assert_true(u.is_confirmed)
def test_cannot_remove_primary_email_from_email_list(self):
with assert_raises(PermissionsError) as e:
self.user.remove_email(self.user.username)
assert_equal(e.exception.message, "Can't remove primary email")
def test_add_same_unconfirmed_email_twice(self):
email = "test@example.com"
token1 = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(token1, self.user.get_confirmation_token(email))
assert_equal(email, self.user._get_unconfirmed_email_for_token(token1))
token2 = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_not_equal(token1, self.user.get_confirmation_token(email))
assert_equal(token2, self.user.get_confirmation_token(email))
assert_equal(email, self.user._get_unconfirmed_email_for_token(token2))
with assert_raises(exceptions.InvalidTokenError):
self.user._get_unconfirmed_email_for_token(token1)
def test_contributed_property(self):
projects_contributed_to = project.model.Node.find(Q('contributors', 'eq', self.user._id))
assert_equal(list(self.user.contributed), list(projects_contributed_to))
def test_contributor_to_property(self):
normal_node = factories.ProjectFactory(creator=self.user)
normal_contributed_node = factories.ProjectFactory()
normal_contributed_node.add_contributor(self.user)
normal_contributed_node.save()
deleted_node = factories.ProjectFactory(creator=self.user, is_deleted=True)
bookmark_collection_node = factories.BookmarkCollectionFactory(creator=self.user)
collection_node = factories.CollectionFactory(creator=self.user)
project_to_be_invisible_on = factories.ProjectFactory()
project_to_be_invisible_on.add_contributor(self.user, visible=False)
project_to_be_invisible_on.save()
contributor_to_nodes = [node._id for node in self.user.contributor_to]
assert_in(normal_node._id, contributor_to_nodes)
assert_in(normal_contributed_node._id, contributor_to_nodes)
assert_in(project_to_be_invisible_on._id, contributor_to_nodes)
assert_not_in(deleted_node._id, contributor_to_nodes)
assert_not_in(bookmark_collection_node._id, contributor_to_nodes)
assert_not_in(collection_node._id, contributor_to_nodes)
def test_visible_contributor_to_property(self):
invisible_contributor = factories.UserFactory()
normal_node = factories.ProjectFactory(creator=invisible_contributor)
deleted_node = factories.ProjectFactory(creator=invisible_contributor, is_deleted=True)
bookmark_collection_node = factories.BookmarkCollectionFactory(creator=invisible_contributor)
collection_node = factories.CollectionFactory(creator=invisible_contributor)
project_to_be_invisible_on = factories.ProjectFactory()
project_to_be_invisible_on.add_contributor(invisible_contributor, visible=False)
project_to_be_invisible_on.save()
visible_contributor_to_nodes = [node._id for node in invisible_contributor.visible_contributor_to]
assert_in(normal_node._id, visible_contributor_to_nodes)
assert_not_in(deleted_node._id, visible_contributor_to_nodes)
assert_not_in(bookmark_collection_node._id, visible_contributor_to_nodes)
assert_not_in(collection_node._id, visible_contributor_to_nodes)
assert_not_in(project_to_be_invisible_on._id, visible_contributor_to_nodes)
def test_created_property(self):
# make sure there's at least one project
factories.ProjectFactory(creator=self.user)
projects_created_by_user = project.model.Node.find(Q('creator', 'eq', self.user._id))
assert_equal(list(self.user.created), list(projects_created_by_user))
class TestUserMerging(base.OsfTestCase):
ADDONS_UNDER_TEST = {
'deletable': {
'user_settings': factories.MockAddonUserSettings,
'node_settings': factories.MockAddonNodeSettings,
},
'unmergeable': {
'user_settings': factories.MockAddonUserSettings,
'node_settings': factories.MockAddonNodeSettings,
},
'mergeable': {
'user_settings': factories.MockAddonUserSettingsMergeable,
'node_settings': factories.MockAddonNodeSettings,
}
}
def setUp(self):
super(TestUserMerging, self).setUp()
self.user = factories.UserFactory()
with self.context:
handlers.celery_before_request()
def _add_unconfirmed_user(self):
self.unconfirmed = factories.UnconfirmedUserFactory()
self.user.system_tags = ['shared', 'user']
self.unconfirmed.system_tags = ['shared', 'unconfirmed']
def _add_unregistered_user(self):
self.unregistered = factories.UnregUserFactory()
self.project_with_unreg_contrib = factories.ProjectFactory()
self.project_with_unreg_contrib.add_unregistered_contributor(
fullname='Unreg',
email=self.unregistered.username,
auth=auth.Auth(self.project_with_unreg_contrib.creator)
)
self.project_with_unreg_contrib.save()
def test_can_be_merged_no_addons(self):
# No addons present
assert_true(self.user.can_be_merged)
def test_can_be_merged_unmergable_addon(self):
self.user.add_addon('unmergeable')
assert_false(self.user.can_be_merged)
def test_can_be_merged_mergable_addon(self):
self.user.add_addon('mergeable')
assert_true(self.user.can_be_merged)
def test_can_be_merged_both_addons(self):
self.user.add_addon('mergeable')
self.user.add_addon('unmergeable')
assert_false(self.user.can_be_merged)
def test_can_be_merged_delete_unmergable_addon(self):
self.user.add_addon('mergeable')
self.user.add_addon('deletable')
self.user.delete_addon('deletable')
assert_true(self.user.can_be_merged)
def test_merge_unconfirmed_into_unmergeable(self):
self.user.add_addon('unmergeable')
self.user.save()
# sanity check
assert_false(self.user.can_be_merged)
unconf = factories.UnconfirmedUserFactory()
# make sure this doesn't raise an exception
self.user.merge_user(unconf)
unreg = factories.UnregUserFactory()
# make sure this doesn't raise an exception
self.user.merge_user(unreg)
def test_merge_unmergeable_into_mergeable(self):
# These states should never happen in the current codebase...
# but that's why we have tests.
unconfirmed = factories.UnconfirmedUserFactory()
unconfirmed.add_addon('unmergeable')
with assert_raises(exceptions.MergeConflictError):
self.user.merge_user(unconfirmed)
unregistered = factories.UnregUserFactory()
unregistered.add_addon('unmergeable')
with assert_raises(exceptions.MergeConflictError):
self.user.merge_user(unregistered)
def test_merge_unmergeabled_into_unmergeable(self):
self.user.add_addon('unmergeable')
# These states should never happen in the current codebase...
# but that's why we have tests.
unconfirmed = factories.UnconfirmedUserFactory()
unconfirmed.add_addon('unmergeable')
with assert_raises(exceptions.MergeConflictError):
self.user.merge_user(unconfirmed)
unregistered = factories.UnregUserFactory()
unregistered.add_addon('unmergeable')
with assert_raises(exceptions.MergeConflictError):
self.user.merge_user(unregistered)
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_merge(self, mock_get_mailchimp_api):
other_user = factories.UserFactory()
other_user.save()
# define values for users' fields
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
self.user.comments_viewed_timestamp['shared_gt'] = today
other_user.comments_viewed_timestamp['shared_gt'] = yesterday
self.user.comments_viewed_timestamp['shared_lt'] = yesterday
other_user.comments_viewed_timestamp['shared_lt'] = today
self.user.comments_viewed_timestamp['user'] = yesterday
other_user.comments_viewed_timestamp['other'] = yesterday
self.user.email_verifications = {'user': {'email': 'a'}}
other_user.email_verifications = {'other': {'email': 'b'}}
self.user.external_accounts = [factories.ExternalAccountFactory()]
other_user.external_accounts = [factories.ExternalAccountFactory()]
self.user.mailchimp_mailing_lists = {
'user': True,
'shared_gt': True,
'shared_lt': False,
}
other_user.mailchimp_mailing_lists = {
'other': True,
'shared_gt': False,
'shared_lt': True,
}
self.user.piwik_token = 'abc'
other_user.piwik_token = 'def'
self.user.security_messages = {
'user': today,
'shared': today,
}
other_user.security_messages = {
'other': today,
'shared': today,
}
self.user.system_tags = ['user', 'shared']
other_user.system_tags = ['other', 'shared']
self.user.watched = [factories.WatchConfigFactory()]
other_user.watched = [factories.WatchConfigFactory()]
self.user.save()
other_user.save()
# define expected behavior for ALL FIELDS of the User object
default_to_master_user_fields = [
'_id',
'date_confirmed',
'date_disabled',
'date_last_login',
'date_registered',
'email_last_sent',
'family_name',
'fullname',
'given_name',
'is_claimed',
'is_invited',
'is_registered',
'jobs',
'locale',
'merged_by',
'middle_names',
'password',
'piwik_token',
'recently_added',
'schools',
'social',
'suffix',
'timezone',
'username',
'mailing_lists',
'verification_key',
'_affiliated_institutions',
'contributor_added_email_records'
]
calculated_fields = {
'comments_viewed_timestamp': {
'user': yesterday,
'other': yesterday,
'shared_gt': today,
'shared_lt': today,
},
'email_verifications': {
'user': {'email': 'a'},
'other': {'email': 'b'},
},
'emails': [
self.user.username,
other_user.username,
],
'external_accounts': [
self.user.external_accounts[0]._id,
other_user.external_accounts[0]._id,
],
'mailchimp_mailing_lists': {
'user': True,
'other': True,
'shared_gt': True,
'shared_lt': True,
},
'osf_mailing_lists': {
'Open Science Framework Help': True
},
'security_messages': {
'user': today,
'other': today,
'shared': today,
},
'system_tags': ['user', 'shared', 'other'],
'unclaimed_records': {},
'watched': [
self.user.watched[0]._id,
other_user.watched[0]._id,
],
}
# from the explicit rules above, compile expected field/value pairs
expected = {}
expected.update(calculated_fields)
for key in default_to_master_user_fields:
expected[key] = getattr(self.user, key)
# ensure all fields of the user object have an explicit expectation
assert_equal(
set(expected.keys()),
set(self.user._fields),
)
# mock mailchimp
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': x, 'list_name': list_name} for x, list_name in enumerate(self.user.mailchimp_mailing_lists)]}
# perform the merge
self.user.merge_user(other_user)
self.user.save()
handlers.celery_teardown_request()
# check each field/value pair
for k, v in expected.iteritems():
assert_equal(
getattr(self.user, k),
v,
# "{} doesn't match expectation".format(k)
)
# check fields set on merged user
assert_equal(other_user.merged_by, self.user)
assert_equal(
0,
models.Session.find(
Q('data.auth_user_id', 'eq', other_user._id)
).count()
)
def test_merge_unconfirmed(self):
self._add_unconfirmed_user()
unconfirmed_username = self.unconfirmed.username
self.user.merge_user(self.unconfirmed)
assert_true(self.unconfirmed.is_merged)
assert_equal(self.unconfirmed.merged_by, self.user)
assert_true(self.user.is_claimed)
assert_false(self.user.is_invited)
# TODO: test profile fields - jobs, schools, social
# TODO: test security_messages
# TODO: test mailing_lists
assert_equal(self.user.system_tags, ['shared', 'user', 'unconfirmed'])
# TODO: test emails
# TODO: test watched
# TODO: test external_accounts
assert_equal(self.unconfirmed.email_verifications, {})
assert_is_none(self.unconfirmed.username)
assert_is_none(self.unconfirmed.password)
assert_is_none(self.unconfirmed.verification_key)
# The mergee's email no longer needs to be confirmed by merger
unconfirmed_emails = [record['email'] for record in self.user.email_verifications.values()]
assert_not_in(unconfirmed_username, unconfirmed_emails)
def test_merge_unregistered(self):
# test only those behaviors that are not tested with unconfirmed users
self._add_unregistered_user()
self.user.merge_user(self.unregistered)
self.project_with_unreg_contrib.reload()
assert_true(self.user.is_invited)
assert_in(self.user, self.project_with_unreg_contrib.contributors)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_merge_doesnt_send_signal(self, mock_notify):
#Explictly reconnect signal as it is disconnected by default for test
project.signals.contributor_added.connect(project.views.contributor.notify_added_contributor)
other_user = factories.UserFactory()
self.user.merge_user(other_user)
assert_equal(other_user.merged_by._id, self.user._id)
assert_false(mock_notify.called)
|
|
# -*- coding: utf-8 -*-
# Copyright 2014-2018 Rumma & Ko Ltd
# License: BSD, see LICENSE for more details.
"""
Defines the :rst:dir:`sigal_image` directive.
.. rst:directive:: sigal_image
.. _picsel: https://github.com/lsaffre/picsel
.. _Shotwell: https://en.wikipedia.org/wiki/Shotwell_%28software%29
.. _digiKam: https://www.digikam.org/
.. _Sigal: http://sigal.saimon.org/en/latest/
This creates a bridge between a photo collection managed with
Shotwell_ or digiKam_ and a blog generated with Sphinx. All photos
remain in the single central file tree managed by Shotwell_ or
digiKam_. From within Shotwell_ or digiKam_ you use a tag "blog" to
mark all photos that are to be available for your Sphinx blog. Then
you use picsel_ to extract those images to a separate directory. This
tree serves as input for Sigal_ which will generate a static html
gallery. An example of a Sigal gallery is `here
<http://sigal.saffre-rumma.net/>`__. The :rst:dir:`sigal_image`
directive was the last missing part of this publishing bridge: it
allows you to integrate your pictures into blog entries.
Usage::
.. sigal_image:: partial/path/to/photo.jpg[|title_or_options]
For example, if `sigal_base_url` in your :xfile:`conf.py` is set to
``"http://sigal.saffre-rumma.net"``, the following directive in your
rst source file::
.. sigal_image:: 2014/04/10/img_6617.jpg
will insert the following rst code::
.. raw:: html
<a href="http://sigal.saffre-rumma.net/2014/04/10/img_6617.jpg">
<img
src="http://sigal.saffre-rumma.net/2014/04/10/thumbnails/img_6617.jpg"/>
</a>
The file name can contain **formatting instructions** inspired by
`Wikipedia pictures
<https://en.wikipedia.org/wiki/Wikipedia:Picture_tutorial>`_ which
uses a variable number of pipe characters. For example:
>>> print(line2html("foo.jpg"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="foo.jpg"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; width:280px;" title="foo.jpg"/></a>
>>> print(line2html("foo.jpg|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; width:280px;" title="This is a nice picture"/></a>
>>> print(line2html("foo.jpg|thumb|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; float:right; width:280px;" title="This is a nice picture"/></a>
>>> print(line2html("foo.jpg|thumb|left|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; float:left;; width:280px;" title="This is a nice picture"/></a>
The generated HTML also includes attributes for `lightbox
<http://lokeshdhakar.com/projects/lightbox2/>`_. In order to activate
this feature you must add the content of the lighbox :file:`dist`
directory somewhere to your web server and then change your
`layout.html` template to something like this::
{%- block extrahead %}
{{ super() }}
<script src="/data/lightbox/js/lightbox-plus-jquery.min.js"></script>
<link href="/data/lightbox/css/lightbox.css" rel="stylesheet" />
{% endblock %}
"""
import os
from atelier.sphinxconf.insert_input import InsertInputDirective
TEMPLATE1 = """
.. raw:: html
<a href="%(target)s"><img src="%(src)s" style="padding:4px"/></a>
"""
#TEMPLATE = """<a href="%(target)s" style="%(style)s" %(class)s data-lightbox="image-1" data-title="%(caption)s"/><img src="%(src)s" style="padding:4px" title="%(caption)s"/></a>"""
TEMPLATE = """<a href="%(target)s" %(class)s data-lightbox="image-1" data-title="%(caption)s"/><img src="%(src)s" style="%(style)s" title="%(caption)s"/></a>"""
class Format(object):
@classmethod
def update_context(self, caption, tplkw):
tplkw.update(caption=caption)
tplkw.update(style="padding:4px; width:280px;")
class Thumb(Format):
@classmethod
def update_context(self, caption, tplkw):
chunks = caption.split('|')
if len(chunks) == 1:
tplkw['style'] = "padding:4px; float:right; width:280px;"
elif len(chunks) == 2:
align, caption = chunks
if not align in ("right", "left", "center"):
raise Exception("Invalid alignment '{0}'".format(align))
tplkw['style'] = "padding:4px; float:{0};; width:280px;".format(align)
else:
raise Exception("Impossible")
tplkw.update(caption=caption)
class Wide(Format):
@classmethod
def update_context(self, caption, tplkw):
chunks = caption.split('|')
if len(chunks) == 1:
tplkw['style'] = "padding:4px; width:100%;"
else:
raise Exception("Impossible")
tplkw.update(caption=caption)
FORMATS = dict()
FORMATS[None] = Format()
FORMATS['thumb'] = Thumb()
FORMATS['wide'] = Wide()
def buildurl(*parts):
return 'http://example.com/' + '/'.join(parts)
def line2html(name, buildurl=buildurl):
name = name.strip()
if not name:
return ''
kw = dict() # style="padding:4px")
kw['class'] = ''
kw['style'] = "padding:4px; width:280px;"
if True: # new format using only | as separator
caption = name
fmt = FORMATS[None]
chunks = name.split('|', 1)
if len(chunks) == 2:
name, caption = chunks
chunks = caption.split('|', 1)
if len(chunks) == 2:
fmtname, caption = chunks
fmt = FORMATS[fmtname]
fmt.update_context(caption, kw)
if ' ' in name:
raise Exception("Invalid filename. Spaces not allowed.")
else:
chunks = name.split(None, 1)
if len(chunks) == 1:
kw.update(caption='')
elif len(chunks) == 2:
name, caption = chunks
chunks = caption.split('|', 1)
if len(chunks) == 1:
fmt = FORMATS[None]
elif len(chunks) == 2:
fmtname, caption = chunks
fmt = FORMATS[fmtname]
else:
raise Exception("Impossible")
fmt.update_context(caption, kw)
else:
raise Exception("FILENAME <whitespace> DESC %s" % chunks)
head, tail = os.path.split(name)
kw.update(target=buildurl(head, tail))
kw.update(src=buildurl(head, 'thumbnails', tail))
return TEMPLATE % kw
class SigalImage(InsertInputDirective):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
# option_spec = {
# 'style': directives.unchanged,
# 'class': directives.unchanged,
# }
def get_rst(self):
env = self.state.document.settings.env
base_url = env.config.sigal_base_url
def buildurl(*parts):
return base_url + '/' + '/'.join(parts)
s = ''
for name in self.content:
s += line2html(name, buildurl)
if s:
s = "\n\n.. raw:: html\n\n {0}\n\n".format(s)
return s
def get_headers(self):
return ['title', 'author', 'date']
def format_entry(self, e):
cells = []
# text = ''.join([unicode(c) for c in e.title.children])
# cells.append(":doc:`%s <%s>`" % (text, e.docname))
cells.append(":doc:`%s`" % e.docname)
cells.append(str(e.meta.get('author', '')))
cells.append(str(e.meta.get('date', '')))
return cells
def setup(app):
app.add_config_value(
'sigal_base_url', 'http://sigal.saffre-rumma.net', True)
app.add_directive('sigal_image', SigalImage)
# app.add_role(str('rref'), ReferingRefRole(
# lowercase=True,
# innernodeclass=nodes.emphasis,
# warn_dangling=True))
|
|
"""TestAuthZ implementations of learning.Objective"""
import datetime
import pytest
from tests.utilities.general import is_never_authz, is_no_authz, uses_cataloging
from dlkit.abstract_osid.authorization import objects as ABCObjects
from dlkit.abstract_osid.authorization import queries as ABCQueries
from dlkit.abstract_osid.authorization.objects import Authorization
from dlkit.abstract_osid.authorization.objects import AuthorizationList
from dlkit.abstract_osid.authorization.objects import Vault as ABCVault
from dlkit.abstract_osid.osid import errors
from dlkit.abstract_osid.osid.objects import OsidCatalogForm, OsidCatalog
from dlkit.abstract_osid.osid.objects import OsidForm
from dlkit.primordium.calendaring.primitives import DateTime
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
JANE_REQUEST = proxy_example.SimpleRequest(username='jane_doe')
JANE_CONDITION = PROXY_SESSION.get_proxy_condition()
JANE_CONDITION.set_http_request(JANE_REQUEST)
JANE_PROXY = PROXY_SESSION.get_proxy(JANE_CONDITION)
LOOKUP_OBJECTIVE_FUNCTION_ID = Id(**{'identifier': 'lookup', 'namespace': 'learning.Objective', 'authority': 'ODL.MIT.EDU'})
SEARCH_OBJECTIVE_FUNCTION_ID = Id(**{'identifier': 'search', 'namespace': 'learning.Objective', 'authority': 'ODL.MIT.EDU'})
CREATE_OBJECTIVE_FUNCTION_ID = Id(**{'identifier': 'create', 'namespace': 'learning.Objective', 'authority': 'ODL.MIT.EDU'})
DELETE_OBJECTIVE_FUNCTION_ID = Id(**{'identifier': 'delete', 'namespace': 'learning.Objective', 'authority': 'ODL.MIT.EDU'})
ASSIGN_OBJECTIVE_FUNCTION_ID = Id(**{'identifier': 'assign', 'namespace': 'learning.ObjectiveObjectiveBank', 'authority': 'ODL.MIT.EDU'})
CREATE_OBJECTIVEBANK_FUNCTION_ID = Id(**{'identifier': 'create', 'namespace': 'learning.ObjectiveBank', 'authority': 'ODL.MIT.EDU'})
DELETE_OBJECTIVEBANK_FUNCTION_ID = Id(**{'identifier': 'delete', 'namespace': 'learning.ObjectiveBank', 'authority': 'ODL.MIT.EDU'})
LOOKUP_OBJECTIVEBANK_FUNCTION_ID = Id(**{'identifier': 'lookup', 'namespace': 'learning.ObjectiveBank', 'authority': 'ODL.MIT.EDU'})
ACCESS_OBJECTIVEBANK_HIERARCHY_FUNCTION_ID = Id(**{'identifier': 'access', 'namespace': 'learning.ObjectiveBank', 'authority': 'ODL.MIT.EDU'})
MODIFY_OBJECTIVEBANK_HIERARCHY_FUNCTION_ID = Id(**{'identifier': 'modify', 'namespace': 'learning.ObjectiveBank', 'authority': 'ODL.MIT.EDU'})
ROOT_QUALIFIER_ID = Id('learning.ObjectiveBank%3AROOT%40ODL.MIT.EDU')
BOOTSTRAP_VAULT_TYPE = Type(authority='ODL.MIT.EDU', namespace='authorization.Vault', identifier='bootstrap_vault')
OVERRIDE_VAULT_TYPE = Type(authority='ODL.MIT.EDU', namespace='authorization.Vault', identifier='override_vault')
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
DEFAULT_GENUS_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'GenusType', 'authority': 'DLKIT.MIT.EDU'})
ALIAS_ID = Id(**{'identifier': 'ALIAS', 'namespace': 'ALIAS', 'authority': 'ALIAS'})
AGENT_ID = Id(**{'identifier': 'jane_doe', 'namespace': 'osid.agent.Agent', 'authority': 'MIT-ODL'})
NEW_TYPE = Type(**{'identifier': 'NEW', 'namespace': 'MINE', 'authority': 'YOURS'})
NEW_TYPE_2 = Type(**{'identifier': 'NEW 2', 'namespace': 'MINE', 'authority': 'YOURS'})
BLUE_TYPE = Type(authority='BLUE', namespace='BLUE', identifier='BLUE')
@pytest.fixture(scope="class",
params=['TEST_SERVICE'])
def authz_adapter_class_fixture(request):
request.cls.service_config = request.param
request.cls.authz_mgr = Runtime().get_manager(
'AUTHORIZATION',
implementation='TEST_SERVICE')
if not is_never_authz(request.cls.service_config):
request.cls.vault_admin_session = request.cls.authz_mgr.get_vault_admin_session()
request.cls.vault_lookup_session = request.cls.authz_mgr.get_vault_lookup_session()
create_form = request.cls.vault_admin_session.get_vault_form_for_create([])
create_form.display_name = 'Test Vault'
create_form.description = 'Test Vault for AuthorizationSession tests'
create_form.genus_type = BOOTSTRAP_VAULT_TYPE
request.cls.vault = request.cls.vault_admin_session.create_vault(create_form)
create_form = request.cls.vault_admin_session.get_vault_form_for_create([])
create_form.display_name = 'Test Override Vault'
create_form.description = 'Test Override Vault for AuthorizationSession tests'
create_form.genus_type = OVERRIDE_VAULT_TYPE
request.cls.override_vault = request.cls.vault_admin_session.create_vault(create_form)
request.cls.authz_admin_session = request.cls.authz_mgr.get_authorization_admin_session_for_vault(request.cls.vault.ident)
request.cls.override_authz_admin_session = request.cls.authz_mgr.get_authorization_admin_session_for_vault(request.cls.override_vault.ident)
request.cls.authz_lookup_session = request.cls.authz_mgr.get_authorization_lookup_session_for_vault(request.cls.vault.ident)
request.cls.objective_bank_list = list()
request.cls.objective_bank_id_list = list()
request.cls.authz_list = list()
request.cls.authz_id_list = list()
request.cls.learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=PROXY,
implementation='TEST_SERVICE')
for num in [0, 1, 2, 3, 4, 5, 6, 7]:
create_form = request.cls.learning_mgr.get_objective_bank_form_for_create([])
create_form.display_name = 'Test ObjectiveBank ' + str(num)
create_form.description = 'Test ObjectiveBank for Testing Authorization Number: ' + str(num)
objective_bank = request.cls.learning_mgr.create_objective_bank(create_form)
request.cls.objective_bank_list.append(objective_bank)
request.cls.objective_bank_id_list.append(objective_bank.ident)
request.cls.learning_mgr.add_root_objective_bank(request.cls.objective_bank_id_list[0])
request.cls.learning_mgr.add_child_objective_bank(request.cls.objective_bank_id_list[0], request.cls.objective_bank_id_list[1])
request.cls.learning_mgr.add_child_objective_bank(request.cls.objective_bank_id_list[0], request.cls.objective_bank_id_list[2])
request.cls.learning_mgr.add_child_objective_bank(request.cls.objective_bank_id_list[1], request.cls.objective_bank_id_list[3])
request.cls.learning_mgr.add_child_objective_bank(request.cls.objective_bank_id_list[1], request.cls.objective_bank_id_list[4])
request.cls.learning_mgr.add_child_objective_bank(request.cls.objective_bank_id_list[2], request.cls.objective_bank_id_list[5])
# The hierarchy should look like this. (t) indicates where lookup is
# explicitely authorized:
#
# _____ 0 _____
# | |
# _ 1(t) _ 2 not in hierarchy
# | | |
# 3 4 5(t) 6 7(t) (the 'blue' objective in objective_bank 2 is also assigned to objective_bank 7)
request.cls.svc_mgr = Runtime().get_service_manager(
'AUTHORIZATION',
proxy=PROXY,
implementation=request.cls.service_config)
request.cls.catalog = request.cls.svc_mgr.get_vault(request.cls.vault.ident)
# Set up ObjectiveBank lookup authorization for Jane
create_form = request.cls.authz_admin_session.get_authorization_form_for_create_for_agent(
AGENT_ID,
LOOKUP_OBJECTIVEBANK_FUNCTION_ID,
ROOT_QUALIFIER_ID,
[])
create_form.display_name = 'Jane Lookup Authorization'
create_form.description = 'Test Authorization for AuthorizationSession tests'
jane_lookup_authz = request.cls.authz_admin_session.create_authorization(create_form)
request.cls.authz_list.append(jane_lookup_authz)
request.cls.authz_id_list.append(jane_lookup_authz.ident)
# Set up Objective lookup authorizations for Jane
for num in [1, 5]:
create_form = request.cls.authz_admin_session.get_authorization_form_for_create_for_agent(
AGENT_ID,
LOOKUP_OBJECTIVE_FUNCTION_ID,
request.cls.objective_bank_id_list[num],
[])
create_form.display_name = 'Test Authorization ' + str(num)
create_form.description = 'Test Authorization for AuthorizationSession tests'
authz = request.cls.authz_admin_session.create_authorization(create_form)
request.cls.authz_list.append(authz)
request.cls.authz_id_list.append(authz.ident)
# Set up Objective lookup override authorizations for Jane
for num in [7]:
create_form = request.cls.override_authz_admin_session.get_authorization_form_for_create_for_agent(
AGENT_ID,
LOOKUP_OBJECTIVE_FUNCTION_ID,
request.cls.objective_bank_id_list[num],
[])
create_form.display_name = 'Test Authorization ' + str(num) + ' (override)'
create_form.description = 'Test Authorization for AuthorizationSession tests'
authz = request.cls.override_authz_admin_session.create_authorization(create_form)
request.cls.authz_list.append(authz)
request.cls.authz_id_list.append(authz.ident)
# Set up Objective search override authorizations for Jane
for num in [7]:
create_form = request.cls.override_authz_admin_session.get_authorization_form_for_create_for_agent(
AGENT_ID,
SEARCH_OBJECTIVE_FUNCTION_ID,
request.cls.objective_bank_id_list[num],
[])
create_form.display_name = 'Test Authorization ' + str(num) + ' (override)'
create_form.description = 'Test Authorization for AuthorizationSession tests'
authz = request.cls.override_authz_admin_session.create_authorization(create_form)
request.cls.authz_list.append(authz)
request.cls.authz_id_list.append(authz.ident)
# Set up Objective search authorizations for Jane
for num in [1, 5]:
create_form = request.cls.authz_admin_session.get_authorization_form_for_create_for_agent(
AGENT_ID,
SEARCH_OBJECTIVE_FUNCTION_ID,
request.cls.objective_bank_id_list[num],
[])
create_form.display_name = 'Test Authorization ' + str(num)
create_form.description = 'Test Authorization for AuthorizationSession tests'
authz = request.cls.authz_admin_session.create_authorization(create_form)
request.cls.authz_list.append(authz)
request.cls.authz_id_list.append(authz.ident)
else:
request.cls.catalog = request.cls.svc_mgr.get_authorization_session(proxy=PROXY)
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for catalog in request.cls.learning_mgr.get_objective_banks():
for obj in catalog.get_objectives():
catalog.delete_objective(obj.ident)
request.cls.learning_mgr.delete_objective_bank(catalog.ident)
for vault in request.cls.vault_lookup_session.get_vaults():
lookup_session = request.cls.authz_mgr.get_authorization_lookup_session_for_vault(vault.ident)
admin_session = request.cls.authz_mgr.get_authorization_admin_session_for_vault(vault.ident)
for authz in lookup_session.get_authorizations():
admin_session.delete_authorization(authz.ident)
request.cls.vault_admin_session.delete_vault(vault.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def authz_adapter_test_fixture(request):
request.cls.objective_id_lists = []
count = 0
if not is_never_authz(request.cls.service_config):
for objective_bank_ in request.cls.objective_bank_list:
request.cls.objective_id_lists.append([])
for color in ['Red', 'Blue', 'Red']:
create_form = objective_bank_.get_objective_form_for_create([])
create_form.display_name = color + ' ' + str(count) + ' Objective'
create_form.description = color + ' objective for authz adapter tests from ObjectiveBank number ' + str(count)
if color == 'Blue':
create_form.genus_type = BLUE_TYPE
objective = objective_bank_.create_objective(create_form)
if count == 2 and color == 'Blue':
request.cls.learning_mgr.assign_objective_to_objective_bank(
objective.ident,
request.cls.objective_bank_id_list[7])
request.cls.objective_id_lists[count].append(objective.ident)
count += 1
def test_tear_down():
if not is_never_authz(request.cls.service_config):
for index, objective_bank_ in enumerate(request.cls.objective_bank_list):
for objective_id in request.cls.objective_id_lists[index]:
objective_bank_.delete_objective(objective_id)
request.addfinalizer(test_tear_down)
@pytest.mark.usefixtures("authz_adapter_class_fixture", "authz_adapter_test_fixture")
class TestObjectiveAuthzAdapter(object):
def test_lookup_objective_bank_0_plenary_isolated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[0])
objective_bank.use_isolated_objective_bank_view()
objective_bank.use_plenary_objective_view()
# with pytest.raises(errors.NotFound):
# objectives = objective_bank.get_objectives()
# with pytest.raises(errors.NotFound):
# objectives = objective_bank.get_objectives_by_genus_type(BLUE_TYPE)
# for objective_id in self.objective_id_lists[0]:
# with pytest.raises(errors.NotFound):
# objective = objective_bank.get_objective(objective_id)
# with pytest.raises(errors.NotFound):
# objectives = objective_bank.get_objectives_by_ids(self.objective_id_lists[0])
def test_lookup_objective_bank_0_plenary_federated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[0])
objective_bank.use_federated_objective_bank_view()
objective_bank.use_plenary_objective_view()
assert objective_bank.can_lookup_objectives()
assert objective_bank.get_objectives().available() == 1
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 1
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).next().ident == self.objective_id_lists[2][1]
objective_bank.get_objective(self.objective_id_lists[2][1])
for objective_num in [0, 2]:
with pytest.raises(errors.NotFound): # Is this right? Perhaps PermissionDenied
objective = objective_bank.get_objective(self.objective_id_lists[2][objective_num])
def test_lookup_objective_bank_0_comparative_federated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[0])
objective_bank.use_federated_objective_bank_view()
objective_bank.use_comparative_objective_view()
# print "START"
assert objective_bank.get_objectives().available() == 13
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 5
for objective in objective_bank.get_objectives():
objective_bank.get_objective(objective.ident)
objective_ids = [objective.ident for objective in objective_bank.get_objectives()]
objective_bank.get_objectives_by_ids(objective_ids)
for objective_id in self.objective_id_lists[0]:
with pytest.raises(errors.NotFound):
objective = objective_bank.get_objective(objective_id)
objective = objective_bank.get_objective(self.objective_id_lists[2][1])
for objective_num in [0, 2]:
with pytest.raises(errors.NotFound):
objective = objective_bank.get_objective(self.objective_id_lists[2][objective_num])
for objective_id in self.objective_id_lists[1]:
objective = objective_bank.get_objective(objective_id)
for objective_id in self.objective_id_lists[3]:
objective = objective_bank.get_objective(objective_id)
for objective_id in self.objective_id_lists[4]:
objective = objective_bank.get_objective(objective_id)
for objective_id in self.objective_id_lists[5]:
objective = objective_bank.get_objective(objective_id)
def test_lookup_objective_bank_0_comparative_isolated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[0])
objective_bank.use_isolated_objective_bank_view()
objective_bank.use_comparative_objective_view()
assert objective_bank.get_objectives().available() == 0
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 0
def test_lookup_objective_bank_1_plenary_isolated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[1])
objective_bank.use_isolated_objective_bank_view()
objective_bank.use_plenary_objective_view()
assert objective_bank.get_objectives().available() == 3
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 1
def test_lookup_objective_bank_1_plenary_federated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[1])
objective_bank.use_federated_objective_bank_view()
objective_bank.use_plenary_objective_view()
assert objective_bank.get_objectives().available() == 9
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 3
def test_lookup_objective_bank_1_comparative_federated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[1])
objective_bank.use_federated_objective_bank_view()
objective_bank.use_comparative_objective_view()
assert objective_bank.get_objectives().available() == 9
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 3
def test_lookup_objective_bank_1_comparative_isolated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[1])
objective_bank.use_isolated_objective_bank_view()
objective_bank.use_comparative_objective_view()
assert objective_bank.get_objectives().available() == 3
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 1
def test_lookup_objective_bank_2_plenary_isolated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[2])
objective_bank.use_isolated_objective_bank_view()
objective_bank.use_plenary_objective_view()
assert objective_bank.get_objectives().available() == 1
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 1
# with pytest.raises(errors.PermissionDenied):
# objectives = objective_bank.get_objectives()
# with pytest.raises(errors.PermissionDenied):
# objectives = objective_bank.get_objectives_by_genus_type(BLUE_TYPE)
def test_lookup_objective_bank_2_plenary_federated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[2])
objective_bank.use_federated_objective_bank_view()
objective_bank.use_plenary_objective_view()
assert objective_bank.get_objectives().available() == 1
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 1
# with pytest.raises(errors.PermissionDenied):
# objectives = objective_bank.get_objectives()
# with pytest.raises(errors.PermissionDenied):
# objectives = objective_bank.get_objectives_by_genus_type(BLUE_TYPE)
def test_lookup_objective_bank_2_comparative_federated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[2])
objective_bank.use_federated_objective_bank_view()
objective_bank.use_comparative_objective_view()
assert objective_bank.get_objectives().available() == 4
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 2
# self.assertEqual(objective_bank.get_objectives().available(), 3)
# self.assertEqual(objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available(), 1)
def test_lookup_objective_bank_2_comparative_isolated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[2])
objective_bank.use_isolated_objective_bank_view()
objective_bank.use_comparative_objective_view()
assert objective_bank.get_objectives().available() == 1
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 1
# with pytest.raises(errors.PermissionDenied):
# objectives = objective_bank.get_objectives()
# with pytest.raises(errors.PermissionDenied):
# objectives = objective_bank.get_objectives_by_genus_type(BLUE_TYPE)
def test_lookup_objective_bank_3_plenary_isolated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[3])
objective_bank.use_isolated_objective_bank_view()
objective_bank.use_plenary_objective_view()
assert objective_bank.get_objectives().available() == 3
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 1
def test_lookup_objective_bank_3_plenary_federated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[3])
objective_bank.use_federated_objective_bank_view()
objective_bank.use_plenary_objective_view()
assert objective_bank.get_objectives().available() == 3
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 1
def test_lookup_objective_bank_3_comparative_federated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[3])
objective_bank.use_federated_objective_bank_view()
objective_bank.use_comparative_objective_view()
assert objective_bank.get_objectives().available() == 3
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 1
def test_lookup_objective_bank_3_comparative_isolated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[3])
objective_bank.use_isolated_objective_bank_view()
objective_bank.use_comparative_objective_view()
assert objective_bank.get_objectives().available() == 3
assert objective_bank.get_objectives_by_genus_type(BLUE_TYPE).available() == 1
def test_query_objective_bank_0_isolated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[0])
objective_bank.use_isolated_objective_bank_view()
with pytest.raises(errors.PermissionDenied):
query = objective_bank.get_objective_query()
def test_query_objective_bank_0_federated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[0])
objective_bank.use_federated_objective_bank_view()
query = objective_bank.get_objective_query()
query.match_display_name('red')
assert objective_bank.get_objectives_by_query(query).available() == 8
query.clear_display_name_terms()
query.match_display_name('blue')
assert objective_bank.get_objectives_by_query(query).available() == 5
def test_query_objective_bank_1_isolated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[1])
objective_bank.use_isolated_objective_bank_view()
query = objective_bank.get_objective_query()
query.match_display_name('red')
assert objective_bank.get_objectives_by_query(query).available() == 2
def test_query_objective_bank_1_federated(self):
if not is_never_authz(self.service_config):
janes_learning_mgr = Runtime().get_service_manager(
'LEARNING',
proxy=JANE_PROXY,
implementation='TEST_SERVICE_JSON_AUTHZ')
objective_bank = janes_learning_mgr.get_objective_bank(self.objective_bank_id_list[1])
objective_bank.use_federated_objective_bank_view()
query = objective_bank.get_objective_query()
query.match_display_name('red')
assert objective_bank.get_objectives_by_query(query).available() == 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.